2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
19 #include "ar9003_mac.h"
21 #define BITS_PER_BYTE 8
22 #define OFDM_PLCP_BITS 22
23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
29 #define HT_LTF(_ns) (4 * (_ns))
30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32 #define TIME_SYMBOLS(t) ((t) >> 2)
33 #define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18)
34 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
35 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
38 static u16 bits_per_symbol[][2] = {
40 { 26, 54 }, /* 0: BPSK */
41 { 52, 108 }, /* 1: QPSK 1/2 */
42 { 78, 162 }, /* 2: QPSK 3/4 */
43 { 104, 216 }, /* 3: 16-QAM 1/2 */
44 { 156, 324 }, /* 4: 16-QAM 3/4 */
45 { 208, 432 }, /* 5: 64-QAM 2/3 */
46 { 234, 486 }, /* 6: 64-QAM 3/4 */
47 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
52 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
53 struct ath_atx_tid *tid, struct sk_buff *skb);
54 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
55 int tx_flags, struct ath_txq *txq);
56 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
57 struct ath_txq *txq, struct list_head *bf_q,
58 struct ath_tx_status *ts, int txok);
59 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
60 struct list_head *head, bool internal);
61 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
62 struct ath_tx_status *ts, int nframes, int nbad,
64 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
66 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
68 struct ath_atx_tid *tid,
78 /*********************/
79 /* Aggregation logic */
80 /*********************/
82 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
83 __acquires(&txq->axq_lock)
85 spin_lock_bh(&txq->axq_lock);
88 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
89 __releases(&txq->axq_lock)
91 spin_unlock_bh(&txq->axq_lock);
94 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
95 __releases(&txq->axq_lock)
97 struct sk_buff_head q;
100 __skb_queue_head_init(&q);
101 skb_queue_splice_init(&txq->complete_q, &q);
102 spin_unlock_bh(&txq->axq_lock);
104 while ((skb = __skb_dequeue(&q)))
105 ieee80211_tx_status(sc->hw, skb);
108 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
110 struct ath_atx_ac *ac = tid->ac;
119 list_add_tail(&tid->list, &ac->tid_q);
125 list_add_tail(&ac->list, &txq->axq_acq);
128 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
130 struct ath_txq *txq = tid->ac->txq;
132 WARN_ON(!tid->paused);
134 ath_txq_lock(sc, txq);
137 if (skb_queue_empty(&tid->buf_q))
140 ath_tx_queue_tid(txq, tid);
141 ath_txq_schedule(sc, txq);
143 ath_txq_unlock_complete(sc, txq);
146 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
149 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
150 sizeof(tx_info->rate_driver_data));
151 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
154 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
156 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
157 seqno << IEEE80211_SEQ_SEQ_SHIFT);
160 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
162 struct ath_txq *txq = tid->ac->txq;
165 struct list_head bf_head;
166 struct ath_tx_status ts;
167 struct ath_frame_info *fi;
168 bool sendbar = false;
170 INIT_LIST_HEAD(&bf_head);
172 memset(&ts, 0, sizeof(ts));
174 while ((skb = __skb_dequeue(&tid->buf_q))) {
175 fi = get_frame_info(skb);
179 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
181 ieee80211_free_txskb(sc->hw, skb);
187 list_add_tail(&bf->list, &bf_head);
188 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
189 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
192 ath_tx_send_normal(sc, txq, NULL, skb);
196 if (tid->baw_head == tid->baw_tail) {
197 tid->state &= ~AGGR_ADDBA_COMPLETE;
198 tid->state &= ~AGGR_CLEANUP;
202 ath_txq_unlock(sc, txq);
203 ath_send_bar(tid, tid->seq_start);
204 ath_txq_lock(sc, txq);
208 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
213 index = ATH_BA_INDEX(tid->seq_start, seqno);
214 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
216 __clear_bit(cindex, tid->tx_buf);
218 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
219 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
220 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
221 if (tid->bar_index >= 0)
226 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
231 index = ATH_BA_INDEX(tid->seq_start, seqno);
232 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
233 __set_bit(cindex, tid->tx_buf);
235 if (index >= ((tid->baw_tail - tid->baw_head) &
236 (ATH_TID_MAX_BUFS - 1))) {
237 tid->baw_tail = cindex;
238 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
243 * TODO: For frame(s) that are in the retry state, we will reuse the
244 * sequence number(s) without setting the retry bit. The
245 * alternative is to give up on these and BAR the receiver's window
248 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
249 struct ath_atx_tid *tid)
254 struct list_head bf_head;
255 struct ath_tx_status ts;
256 struct ath_frame_info *fi;
258 memset(&ts, 0, sizeof(ts));
259 INIT_LIST_HEAD(&bf_head);
261 while ((skb = __skb_dequeue(&tid->buf_q))) {
262 fi = get_frame_info(skb);
266 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
270 list_add_tail(&bf->list, &bf_head);
273 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
275 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
278 tid->seq_next = tid->seq_start;
279 tid->baw_tail = tid->baw_head;
283 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
284 struct sk_buff *skb, int count)
286 struct ath_frame_info *fi = get_frame_info(skb);
287 struct ath_buf *bf = fi->bf;
288 struct ieee80211_hdr *hdr;
289 int prev = fi->retries;
291 TX_STAT_INC(txq->axq_qnum, a_retries);
292 fi->retries += count;
297 hdr = (struct ieee80211_hdr *)skb->data;
298 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
299 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
300 sizeof(*hdr), DMA_TO_DEVICE);
303 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
305 struct ath_buf *bf = NULL;
307 spin_lock_bh(&sc->tx.txbuflock);
309 if (unlikely(list_empty(&sc->tx.txbuf))) {
310 spin_unlock_bh(&sc->tx.txbuflock);
314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
318 spin_unlock_bh(&sc->tx.txbuflock);
323 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
325 spin_lock_bh(&sc->tx.txbuflock);
326 list_add_tail(&bf->list, &sc->tx.txbuf);
327 spin_unlock_bh(&sc->tx.txbuflock);
330 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
334 tbf = ath_tx_get_buffer(sc);
338 ATH_TXBUF_RESET(tbf);
340 tbf->bf_mpdu = bf->bf_mpdu;
341 tbf->bf_buf_addr = bf->bf_buf_addr;
342 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
343 tbf->bf_state = bf->bf_state;
348 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
349 struct ath_tx_status *ts, int txok,
350 int *nframes, int *nbad)
352 struct ath_frame_info *fi;
354 u32 ba[WME_BA_BMP_SIZE >> 5];
361 isaggr = bf_isaggr(bf);
363 seq_st = ts->ts_seqnum;
364 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
368 fi = get_frame_info(bf->bf_mpdu);
369 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
372 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
380 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
381 struct ath_buf *bf, struct list_head *bf_q,
382 struct ath_tx_status *ts, int txok, bool retry)
384 struct ath_node *an = NULL;
386 struct ieee80211_sta *sta;
387 struct ieee80211_hw *hw = sc->hw;
388 struct ieee80211_hdr *hdr;
389 struct ieee80211_tx_info *tx_info;
390 struct ath_atx_tid *tid = NULL;
391 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
392 struct list_head bf_head;
393 struct sk_buff_head bf_pending;
394 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
395 u32 ba[WME_BA_BMP_SIZE >> 5];
396 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
397 bool rc_update = true, isba;
398 struct ieee80211_tx_rate rates[4];
399 struct ath_frame_info *fi;
402 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
407 hdr = (struct ieee80211_hdr *)skb->data;
409 tx_info = IEEE80211_SKB_CB(skb);
411 memcpy(rates, tx_info->control.rates, sizeof(rates));
413 retries = ts->ts_longretry + 1;
414 for (i = 0; i < ts->ts_rateindex; i++)
415 retries += rates[i].count;
419 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
423 INIT_LIST_HEAD(&bf_head);
425 bf_next = bf->bf_next;
427 if (!bf->bf_stale || bf_next != NULL)
428 list_move_tail(&bf->list, &bf_head);
430 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
437 an = (struct ath_node *)sta->drv_priv;
438 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
439 tid = ATH_AN_2_TID(an, tidno);
440 seq_first = tid->seq_start;
441 isba = ts->ts_flags & ATH9K_TX_BA;
444 * The hardware occasionally sends a tx status for the wrong TID.
445 * In this case, the BA status cannot be considered valid and all
446 * subframes need to be retransmitted
448 * Only BlockAcks have a TID and therefore normal Acks cannot be
451 if (isba && tidno != ts->tid)
454 isaggr = bf_isaggr(bf);
455 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
457 if (isaggr && txok) {
458 if (ts->ts_flags & ATH9K_TX_BA) {
459 seq_st = ts->ts_seqnum;
460 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
463 * AR5416 can become deaf/mute when BA
464 * issue happens. Chip needs to be reset.
465 * But AP code may have sychronization issues
466 * when perform internal reset in this routine.
467 * Only enable reset in STA mode for now.
469 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
474 __skb_queue_head_init(&bf_pending);
476 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
478 u16 seqno = bf->bf_state.seqno;
480 txfail = txpending = sendbar = 0;
481 bf_next = bf->bf_next;
484 tx_info = IEEE80211_SKB_CB(skb);
485 fi = get_frame_info(skb);
487 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
488 /* transmit completion, subframe is
489 * acked by block ack */
491 } else if (!isaggr && txok) {
492 /* transmit completion */
494 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
496 * cleanup in progress, just fail
497 * the un-acked sub-frames
502 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
503 if (txok || !an->sleeping)
504 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
511 bar_index = max_t(int, bar_index,
512 ATH_BA_INDEX(seq_first, seqno));
516 * Make sure the last desc is reclaimed if it
517 * not a holding desc.
519 INIT_LIST_HEAD(&bf_head);
520 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
521 bf_next != NULL || !bf_last->bf_stale)
522 list_move_tail(&bf->list, &bf_head);
524 if (!txpending || (tid->state & AGGR_CLEANUP)) {
526 * complete the acked-ones/xretried ones; update
529 ath_tx_update_baw(sc, tid, seqno);
531 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
532 memcpy(tx_info->control.rates, rates, sizeof(rates));
533 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
537 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
540 /* retry the un-acked ones */
541 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
542 bf->bf_next == NULL && bf_last->bf_stale) {
545 tbf = ath_clone_txbuf(sc, bf_last);
547 * Update tx baw and complete the
548 * frame with failed status if we
552 ath_tx_update_baw(sc, tid, seqno);
554 ath_tx_complete_buf(sc, bf, txq,
556 bar_index = max_t(int, bar_index,
557 ATH_BA_INDEX(seq_first, seqno));
565 * Put this buffer to the temporary pending
566 * queue to retain ordering
568 __skb_queue_tail(&bf_pending, skb);
574 /* prepend un-acked frames to the beginning of the pending frame queue */
575 if (!skb_queue_empty(&bf_pending)) {
577 ieee80211_sta_set_buffered(sta, tid->tidno, true);
579 skb_queue_splice(&bf_pending, &tid->buf_q);
581 ath_tx_queue_tid(txq, tid);
583 if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
584 tid->ac->clear_ps_filter = true;
588 if (bar_index >= 0) {
589 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
591 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
592 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
594 ath_txq_unlock(sc, txq);
595 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
596 ath_txq_lock(sc, txq);
599 if (tid->state & AGGR_CLEANUP)
600 ath_tx_flush_tid(sc, tid);
605 ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
608 static bool ath_lookup_legacy(struct ath_buf *bf)
611 struct ieee80211_tx_info *tx_info;
612 struct ieee80211_tx_rate *rates;
616 tx_info = IEEE80211_SKB_CB(skb);
617 rates = tx_info->control.rates;
619 for (i = 0; i < 4; i++) {
620 if (!rates[i].count || rates[i].idx < 0)
623 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
630 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
631 struct ath_atx_tid *tid)
634 struct ieee80211_tx_info *tx_info;
635 struct ieee80211_tx_rate *rates;
636 u32 max_4ms_framelen, frmlen;
637 u16 aggr_limit, bt_aggr_limit, legacy = 0;
638 int q = tid->ac->txq->mac80211_qnum;
642 tx_info = IEEE80211_SKB_CB(skb);
643 rates = tx_info->control.rates;
646 * Find the lowest frame length among the rate series that will have a
647 * 4ms (or TXOP limited) transmit duration.
649 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
651 for (i = 0; i < 4; i++) {
657 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
662 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
667 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
670 frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
671 max_4ms_framelen = min(max_4ms_framelen, frmlen);
675 * limit aggregate size by the minimum rate if rate selected is
676 * not a probe rate, if rate selected is a probe rate then
677 * avoid aggregation of this packet.
679 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
682 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
685 * Override the default aggregation limit for BTCOEX.
687 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
689 aggr_limit = bt_aggr_limit;
692 * h/w can accept aggregates up to 16 bit lengths (65535).
693 * The IE, however can hold up to 65536, which shows up here
694 * as zero. Ignore 65536 since we are constrained by hw.
696 if (tid->an->maxampdu)
697 aggr_limit = min(aggr_limit, tid->an->maxampdu);
703 * Returns the number of delimiters to be added to
704 * meet the minimum required mpdudensity.
706 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
707 struct ath_buf *bf, u16 frmlen,
710 #define FIRST_DESC_NDELIMS 60
711 struct sk_buff *skb = bf->bf_mpdu;
712 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
713 u32 nsymbits, nsymbols;
716 int width, streams, half_gi, ndelim, mindelim;
717 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
719 /* Select standard number of delimiters based on frame length alone */
720 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
723 * If encryption enabled, hardware requires some more padding between
725 * TODO - this could be improved to be dependent on the rate.
726 * The hardware can keep up at lower rates, but not higher rates
728 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
729 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
730 ndelim += ATH_AGGR_ENCRYPTDELIM;
733 * Add delimiter when using RTS/CTS with aggregation
734 * and non enterprise AR9003 card
736 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
737 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
738 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
741 * Convert desired mpdu density from microeconds to bytes based
742 * on highest rate in rate series (i.e. first rate) to determine
743 * required minimum length for subframe. Take into account
744 * whether high rate is 20 or 40Mhz and half or full GI.
746 * If there is no mpdu density restriction, no further calculation
750 if (tid->an->mpdudensity == 0)
753 rix = tx_info->control.rates[0].idx;
754 flags = tx_info->control.rates[0].flags;
755 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
756 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
759 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
761 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
766 streams = HT_RC_2_STREAMS(rix);
767 nsymbits = bits_per_symbol[rix % 8][width] * streams;
768 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
770 if (frmlen < minlen) {
771 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
772 ndelim = max(mindelim, ndelim);
778 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
780 struct ath_atx_tid *tid,
781 struct list_head *bf_q,
784 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
785 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
786 int rl = 0, nframes = 0, ndelim, prev_al = 0;
787 u16 aggr_limit = 0, al = 0, bpad = 0,
788 al_delta, h_baw = tid->baw_size / 2;
789 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
790 struct ieee80211_tx_info *tx_info;
791 struct ath_frame_info *fi;
796 skb = skb_peek(&tid->buf_q);
797 fi = get_frame_info(skb);
800 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
803 __skb_unlink(skb, &tid->buf_q);
804 ieee80211_free_txskb(sc->hw, skb);
808 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
809 seqno = bf->bf_state.seqno;
811 /* do not step over block-ack window */
812 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
813 status = ATH_AGGR_BAW_CLOSED;
817 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
818 struct ath_tx_status ts = {};
819 struct list_head bf_head;
821 INIT_LIST_HEAD(&bf_head);
822 list_add(&bf->list, &bf_head);
823 __skb_unlink(skb, &tid->buf_q);
824 ath_tx_update_baw(sc, tid, seqno);
825 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
833 aggr_limit = ath_lookup_rate(sc, bf, tid);
837 /* do not exceed aggregation limit */
838 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
841 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
842 ath_lookup_legacy(bf))) {
843 status = ATH_AGGR_LIMITED;
847 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
848 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
851 /* do not exceed subframe limit */
852 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
853 status = ATH_AGGR_LIMITED;
857 /* add padding for previous frame to aggregation length */
858 al += bpad + al_delta;
861 * Get the delimiters needed to meet the MPDU
862 * density for this node.
864 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
866 bpad = PADBYTES(al_delta) + (ndelim << 2);
871 /* link buffers of this frame to the aggregate */
873 ath_tx_addto_baw(sc, tid, seqno);
874 bf->bf_state.ndelim = ndelim;
876 __skb_unlink(skb, &tid->buf_q);
877 list_add_tail(&bf->list, bf_q);
879 bf_prev->bf_next = bf;
883 } while (!skb_queue_empty(&tid->buf_q));
893 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
894 * width - 0 for 20 MHz, 1 for 40 MHz
895 * half_gi - to use 4us v/s 3.6 us for symbol time
897 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
898 int width, int half_gi, bool shortPreamble)
900 u32 nbits, nsymbits, duration, nsymbols;
903 /* find number of symbols: PLCP + data */
904 streams = HT_RC_2_STREAMS(rix);
905 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
906 nsymbits = bits_per_symbol[rix % 8][width] * streams;
907 nsymbols = (nbits + nsymbits - 1) / nsymbits;
910 duration = SYMBOL_TIME(nsymbols);
912 duration = SYMBOL_TIME_HALFGI(nsymbols);
914 /* addup duration for legacy/ht training and signal fields */
915 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
920 static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
922 int streams = HT_RC_2_STREAMS(mcs);
926 symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
927 bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
928 bits -= OFDM_PLCP_BITS;
930 bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
937 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
939 u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
942 /* 4ms is the default (and maximum) duration */
943 if (!txop || txop > 4096)
946 cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
947 cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
948 cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
949 cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
950 for (mcs = 0; mcs < 32; mcs++) {
951 cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
952 cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
953 cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
954 cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
958 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
959 struct ath_tx_info *info, int len)
961 struct ath_hw *ah = sc->sc_ah;
963 struct ieee80211_tx_info *tx_info;
964 struct ieee80211_tx_rate *rates;
965 const struct ieee80211_rate *rate;
966 struct ieee80211_hdr *hdr;
967 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
972 tx_info = IEEE80211_SKB_CB(skb);
973 rates = tx_info->control.rates;
974 hdr = (struct ieee80211_hdr *)skb->data;
976 /* set dur_update_en for l-sig computation except for PS-Poll frames */
977 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
978 info->rtscts_rate = fi->rtscts_rate;
980 for (i = 0; i < 4; i++) {
981 bool is_40, is_sgi, is_sp;
984 if (!rates[i].count || (rates[i].idx < 0))
988 info->rates[i].Tries = rates[i].count;
990 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
991 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
992 info->flags |= ATH9K_TXDESC_RTSENA;
993 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
994 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
995 info->flags |= ATH9K_TXDESC_CTSENA;
998 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
999 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
1000 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1001 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1003 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1004 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1005 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1007 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1009 info->rates[i].Rate = rix | 0x80;
1010 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1011 ah->txchainmask, info->rates[i].Rate);
1012 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
1013 is_40, is_sgi, is_sp);
1014 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1015 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1020 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1021 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1022 !(rate->flags & IEEE80211_RATE_ERP_G))
1023 phy = WLAN_RC_PHY_CCK;
1025 phy = WLAN_RC_PHY_OFDM;
1027 info->rates[i].Rate = rate->hw_value;
1028 if (rate->hw_value_short) {
1029 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1030 info->rates[i].Rate |= rate->hw_value_short;
1035 if (bf->bf_state.bfs_paprd)
1036 info->rates[i].ChSel = ah->txchainmask;
1038 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1039 ah->txchainmask, info->rates[i].Rate);
1041 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1042 phy, rate->bitrate * 100, len, rix, is_sp);
1045 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1046 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1047 info->flags &= ~ATH9K_TXDESC_RTSENA;
1049 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1050 if (info->flags & ATH9K_TXDESC_RTSENA)
1051 info->flags &= ~ATH9K_TXDESC_CTSENA;
1054 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1056 struct ieee80211_hdr *hdr;
1057 enum ath9k_pkt_type htype;
1060 hdr = (struct ieee80211_hdr *)skb->data;
1061 fc = hdr->frame_control;
1063 if (ieee80211_is_beacon(fc))
1064 htype = ATH9K_PKT_TYPE_BEACON;
1065 else if (ieee80211_is_probe_resp(fc))
1066 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1067 else if (ieee80211_is_atim(fc))
1068 htype = ATH9K_PKT_TYPE_ATIM;
1069 else if (ieee80211_is_pspoll(fc))
1070 htype = ATH9K_PKT_TYPE_PSPOLL;
1072 htype = ATH9K_PKT_TYPE_NORMAL;
1077 static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1078 struct ath_txq *txq, int len)
1080 struct ath_hw *ah = sc->sc_ah;
1081 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1082 struct ath_buf *bf_first = bf;
1083 struct ath_tx_info info;
1084 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
1086 memset(&info, 0, sizeof(info));
1087 info.is_first = true;
1088 info.is_last = true;
1089 info.txpower = MAX_RATE_POWER;
1090 info.qcu = txq->axq_qnum;
1092 info.flags = ATH9K_TXDESC_INTREQ;
1093 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1094 info.flags |= ATH9K_TXDESC_NOACK;
1095 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1096 info.flags |= ATH9K_TXDESC_LDPC;
1098 ath_buf_set_rate(sc, bf, &info, len);
1100 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1101 info.flags |= ATH9K_TXDESC_CLRDMASK;
1103 if (bf->bf_state.bfs_paprd)
1104 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1108 struct sk_buff *skb = bf->bf_mpdu;
1109 struct ath_frame_info *fi = get_frame_info(skb);
1111 info.type = get_hw_packet_type(skb);
1113 info.link = bf->bf_next->bf_daddr;
1117 info.buf_addr[0] = bf->bf_buf_addr;
1118 info.buf_len[0] = skb->len;
1119 info.pkt_len = fi->framelen;
1120 info.keyix = fi->keyix;
1121 info.keytype = fi->keytype;
1125 info.aggr = AGGR_BUF_FIRST;
1126 else if (!bf->bf_next)
1127 info.aggr = AGGR_BUF_LAST;
1129 info.aggr = AGGR_BUF_MIDDLE;
1131 info.ndelim = bf->bf_state.ndelim;
1132 info.aggr_len = len;
1135 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
1140 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1141 struct ath_atx_tid *tid)
1144 enum ATH_AGGR_STATUS status;
1145 struct ieee80211_tx_info *tx_info;
1146 struct list_head bf_q;
1150 if (skb_queue_empty(&tid->buf_q))
1153 INIT_LIST_HEAD(&bf_q);
1155 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
1158 * no frames picked up to be aggregated;
1159 * block-ack window is not open.
1161 if (list_empty(&bf_q))
1164 bf = list_first_entry(&bf_q, struct ath_buf, list);
1165 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
1166 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1168 if (tid->ac->clear_ps_filter) {
1169 tid->ac->clear_ps_filter = false;
1170 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1172 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
1175 /* if only one frame, send as non-aggregate */
1176 if (bf == bf->bf_lastbf) {
1177 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1178 bf->bf_state.bf_type = BUF_AMPDU;
1180 TX_STAT_INC(txq->axq_qnum, a_aggr);
1183 ath_tx_fill_desc(sc, bf, txq, aggr_len);
1184 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1185 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
1186 status != ATH_AGGR_BAW_CLOSED);
1189 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1192 struct ath_atx_tid *txtid;
1193 struct ath_node *an;
1196 an = (struct ath_node *)sta->drv_priv;
1197 txtid = ATH_AN_2_TID(an, tid);
1199 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1202 /* update ampdu factor/density, they may have changed. This may happen
1203 * in HT IBSS when a beacon with HT-info is received after the station
1204 * has already been added.
1206 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1207 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
1208 sta->ht_cap.ampdu_factor);
1209 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
1210 an->mpdudensity = density;
1213 txtid->state |= AGGR_ADDBA_PROGRESS;
1214 txtid->paused = true;
1215 *ssn = txtid->seq_start = txtid->seq_next;
1216 txtid->bar_index = -1;
1218 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1219 txtid->baw_head = txtid->baw_tail = 0;
1224 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1226 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1227 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1228 struct ath_txq *txq = txtid->ac->txq;
1230 if (txtid->state & AGGR_CLEANUP)
1233 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
1234 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1238 ath_txq_lock(sc, txq);
1239 txtid->paused = true;
1242 * If frames are still being transmitted for this TID, they will be
1243 * cleaned up during tx completion. To prevent race conditions, this
1244 * TID can only be reused after all in-progress subframes have been
1247 if (txtid->baw_head != txtid->baw_tail)
1248 txtid->state |= AGGR_CLEANUP;
1250 txtid->state &= ~AGGR_ADDBA_COMPLETE;
1252 ath_tx_flush_tid(sc, txtid);
1253 ath_txq_unlock_complete(sc, txq);
1256 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1257 struct ath_node *an)
1259 struct ath_atx_tid *tid;
1260 struct ath_atx_ac *ac;
1261 struct ath_txq *txq;
1265 for (tidno = 0, tid = &an->tid[tidno];
1266 tidno < WME_NUM_TID; tidno++, tid++) {
1274 ath_txq_lock(sc, txq);
1276 buffered = !skb_queue_empty(&tid->buf_q);
1279 list_del(&tid->list);
1283 list_del(&ac->list);
1286 ath_txq_unlock(sc, txq);
1288 ieee80211_sta_set_buffered(sta, tidno, buffered);
1292 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1294 struct ath_atx_tid *tid;
1295 struct ath_atx_ac *ac;
1296 struct ath_txq *txq;
1299 for (tidno = 0, tid = &an->tid[tidno];
1300 tidno < WME_NUM_TID; tidno++, tid++) {
1305 ath_txq_lock(sc, txq);
1306 ac->clear_ps_filter = true;
1308 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
1309 ath_tx_queue_tid(txq, tid);
1310 ath_txq_schedule(sc, txq);
1313 ath_txq_unlock_complete(sc, txq);
1317 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1319 struct ath_atx_tid *txtid;
1320 struct ath_node *an;
1322 an = (struct ath_node *)sta->drv_priv;
1324 txtid = ATH_AN_2_TID(an, tid);
1325 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1326 txtid->state |= AGGR_ADDBA_COMPLETE;
1327 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1328 ath_tx_resume_tid(sc, txtid);
1331 /********************/
1332 /* Queue Management */
1333 /********************/
1335 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1336 struct ath_txq *txq)
1338 struct ath_atx_ac *ac, *ac_tmp;
1339 struct ath_atx_tid *tid, *tid_tmp;
1341 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1342 list_del(&ac->list);
1344 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1345 list_del(&tid->list);
1347 ath_tid_drain(sc, txq, tid);
1352 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1354 struct ath_hw *ah = sc->sc_ah;
1355 struct ath9k_tx_queue_info qi;
1356 static const int subtype_txq_to_hwq[] = {
1357 [WME_AC_BE] = ATH_TXQ_AC_BE,
1358 [WME_AC_BK] = ATH_TXQ_AC_BK,
1359 [WME_AC_VI] = ATH_TXQ_AC_VI,
1360 [WME_AC_VO] = ATH_TXQ_AC_VO,
1364 memset(&qi, 0, sizeof(qi));
1365 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
1366 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1367 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1368 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1369 qi.tqi_physCompBuf = 0;
1372 * Enable interrupts only for EOL and DESC conditions.
1373 * We mark tx descriptors to receive a DESC interrupt
1374 * when a tx queue gets deep; otherwise waiting for the
1375 * EOL to reap descriptors. Note that this is done to
1376 * reduce interrupt load and this only defers reaping
1377 * descriptors, never transmitting frames. Aside from
1378 * reducing interrupts this also permits more concurrency.
1379 * The only potential downside is if the tx queue backs
1380 * up in which case the top half of the kernel may backup
1381 * due to a lack of tx descriptors.
1383 * The UAPSD queue is an exception, since we take a desc-
1384 * based intr on the EOSP frames.
1386 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1387 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
1389 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1390 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1392 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1393 TXQ_FLAG_TXDESCINT_ENABLE;
1395 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1396 if (axq_qnum == -1) {
1398 * NB: don't print a message, this happens
1399 * normally on parts with too few tx queues
1403 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1404 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1406 txq->axq_qnum = axq_qnum;
1407 txq->mac80211_qnum = -1;
1408 txq->axq_link = NULL;
1409 __skb_queue_head_init(&txq->complete_q);
1410 INIT_LIST_HEAD(&txq->axq_q);
1411 INIT_LIST_HEAD(&txq->axq_acq);
1412 spin_lock_init(&txq->axq_lock);
1414 txq->axq_ampdu_depth = 0;
1415 txq->axq_tx_inprogress = false;
1416 sc->tx.txqsetup |= 1<<axq_qnum;
1418 txq->txq_headidx = txq->txq_tailidx = 0;
1419 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1420 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1422 return &sc->tx.txq[axq_qnum];
1425 int ath_txq_update(struct ath_softc *sc, int qnum,
1426 struct ath9k_tx_queue_info *qinfo)
1428 struct ath_hw *ah = sc->sc_ah;
1430 struct ath9k_tx_queue_info qi;
1432 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
1434 ath9k_hw_get_txq_props(ah, qnum, &qi);
1435 qi.tqi_aifs = qinfo->tqi_aifs;
1436 qi.tqi_cwmin = qinfo->tqi_cwmin;
1437 qi.tqi_cwmax = qinfo->tqi_cwmax;
1438 qi.tqi_burstTime = qinfo->tqi_burstTime;
1439 qi.tqi_readyTime = qinfo->tqi_readyTime;
1441 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1442 ath_err(ath9k_hw_common(sc->sc_ah),
1443 "Unable to update hardware queue %u!\n", qnum);
1446 ath9k_hw_resettxqueue(ah, qnum);
1452 int ath_cabq_update(struct ath_softc *sc)
1454 struct ath9k_tx_queue_info qi;
1455 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
1456 int qnum = sc->beacon.cabq->axq_qnum;
1458 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1460 * Ensure the readytime % is within the bounds.
1462 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1463 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1464 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1465 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1467 qi.tqi_readyTime = (cur_conf->beacon_interval *
1468 sc->config.cabqReadytime) / 100;
1469 ath_txq_update(sc, qnum, &qi);
1474 static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1476 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1477 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1480 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1481 struct list_head *list, bool retry_tx)
1483 struct ath_buf *bf, *lastbf;
1484 struct list_head bf_head;
1485 struct ath_tx_status ts;
1487 memset(&ts, 0, sizeof(ts));
1488 ts.ts_status = ATH9K_TX_FLUSH;
1489 INIT_LIST_HEAD(&bf_head);
1491 while (!list_empty(list)) {
1492 bf = list_first_entry(list, struct ath_buf, list);
1495 list_del(&bf->list);
1497 ath_tx_return_buffer(sc, bf);
1501 lastbf = bf->bf_lastbf;
1502 list_cut_position(&bf_head, list, &lastbf->list);
1505 if (bf_is_ampdu_not_probing(bf))
1506 txq->axq_ampdu_depth--;
1509 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1512 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
1517 * Drain a given TX queue (could be Beacon or Data)
1519 * This assumes output has been stopped and
1520 * we do not need to block ath_tx_tasklet.
1522 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1524 ath_txq_lock(sc, txq);
1526 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1527 int idx = txq->txq_tailidx;
1529 while (!list_empty(&txq->txq_fifo[idx])) {
1530 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1533 INCR(idx, ATH_TXFIFO_DEPTH);
1535 txq->txq_tailidx = idx;
1538 txq->axq_link = NULL;
1539 txq->axq_tx_inprogress = false;
1540 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1542 /* flush any pending frames if aggregation is enabled */
1543 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
1544 ath_txq_drain_pending_buffers(sc, txq);
1546 ath_txq_unlock_complete(sc, txq);
1549 bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1551 struct ath_hw *ah = sc->sc_ah;
1552 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1553 struct ath_txq *txq;
1557 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1560 ath9k_hw_abort_tx_dma(ah);
1562 /* Check if any queue remains active */
1563 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1564 if (!ATH_TXQ_SETUP(sc, i))
1567 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1572 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
1574 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1575 if (!ATH_TXQ_SETUP(sc, i))
1579 * The caller will resume queues with ieee80211_wake_queues.
1580 * Mark the queue as not stopped to prevent ath_tx_complete
1581 * from waking the queue too early.
1583 txq = &sc->tx.txq[i];
1584 txq->stopped = false;
1585 ath_draintxq(sc, txq, retry_tx);
1591 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1593 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1594 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1597 /* For each axq_acq entry, for each tid, try to schedule packets
1598 * for transmit until ampdu_depth has reached min Q depth.
1600 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1602 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1603 struct ath_atx_tid *tid, *last_tid;
1605 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
1606 list_empty(&txq->axq_acq) ||
1607 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1610 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1611 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1613 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1614 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1615 list_del(&ac->list);
1618 while (!list_empty(&ac->tid_q)) {
1619 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1621 list_del(&tid->list);
1627 ath_tx_sched_aggr(sc, txq, tid);
1630 * add tid to round-robin queue if more frames
1631 * are pending for the tid
1633 if (!skb_queue_empty(&tid->buf_q))
1634 ath_tx_queue_tid(txq, tid);
1636 if (tid == last_tid ||
1637 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1641 if (!list_empty(&ac->tid_q) && !ac->sched) {
1643 list_add_tail(&ac->list, &txq->axq_acq);
1646 if (ac == last_ac ||
1647 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1657 * Insert a chain of ath_buf (descriptors) on a txq and
1658 * assume the descriptors are already chained together by caller.
1660 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1661 struct list_head *head, bool internal)
1663 struct ath_hw *ah = sc->sc_ah;
1664 struct ath_common *common = ath9k_hw_common(ah);
1665 struct ath_buf *bf, *bf_last;
1666 bool puttxbuf = false;
1670 * Insert the frame on the outbound list and
1671 * pass it on to the hardware.
1674 if (list_empty(head))
1677 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1678 bf = list_first_entry(head, struct ath_buf, list);
1679 bf_last = list_entry(head->prev, struct ath_buf, list);
1681 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
1682 txq->axq_qnum, txq->axq_depth);
1684 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1685 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
1686 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1689 list_splice_tail_init(head, &txq->axq_q);
1691 if (txq->axq_link) {
1692 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
1693 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
1694 txq->axq_qnum, txq->axq_link,
1695 ito64(bf->bf_daddr), bf->bf_desc);
1699 txq->axq_link = bf_last->bf_desc;
1703 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1704 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1705 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
1706 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1710 TX_STAT_INC(txq->axq_qnum, txstart);
1711 ath9k_hw_txstart(ah, txq->axq_qnum);
1716 if (bf_is_ampdu_not_probing(bf))
1717 txq->axq_ampdu_depth++;
1721 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1722 struct sk_buff *skb, struct ath_tx_control *txctl)
1724 struct ath_frame_info *fi = get_frame_info(skb);
1725 struct list_head bf_head;
1729 * Do not queue to h/w when any of the following conditions is true:
1730 * - there are pending frames in software queue
1731 * - the TID is currently paused for ADDBA/BAR request
1732 * - seqno is not within block-ack window
1733 * - h/w queue depth exceeds low water mark
1735 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
1736 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
1737 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1739 * Add this frame to software queue for scheduling later
1742 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1743 __skb_queue_tail(&tid->buf_q, skb);
1744 if (!txctl->an || !txctl->an->sleeping)
1745 ath_tx_queue_tid(txctl->txq, tid);
1749 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1751 ieee80211_free_txskb(sc->hw, skb);
1755 bf->bf_state.bf_type = BUF_AMPDU;
1756 INIT_LIST_HEAD(&bf_head);
1757 list_add(&bf->list, &bf_head);
1759 /* Add sub-frame to BAW */
1760 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
1762 /* Queue to h/w without aggregation */
1763 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
1765 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
1766 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
1769 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1770 struct ath_atx_tid *tid, struct sk_buff *skb)
1772 struct ath_frame_info *fi = get_frame_info(skb);
1773 struct list_head bf_head;
1778 INIT_LIST_HEAD(&bf_head);
1779 list_add_tail(&bf->list, &bf_head);
1780 bf->bf_state.bf_type = 0;
1784 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1785 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
1786 TX_STAT_INC(txq->axq_qnum, queued);
1789 static void setup_frame_info(struct ieee80211_hw *hw,
1790 struct ieee80211_sta *sta,
1791 struct sk_buff *skb,
1794 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1795 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1796 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1797 const struct ieee80211_rate *rate;
1798 struct ath_frame_info *fi = get_frame_info(skb);
1799 struct ath_node *an = NULL;
1800 enum ath9k_key_type keytype;
1801 bool short_preamble = false;
1804 * We check if Short Preamble is needed for the CTS rate by
1805 * checking the BSS's global flag.
1806 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1808 if (tx_info->control.vif &&
1809 tx_info->control.vif->bss_conf.use_short_preamble)
1810 short_preamble = true;
1812 rate = ieee80211_get_rts_cts_rate(hw, tx_info);
1813 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1816 an = (struct ath_node *) sta->drv_priv;
1818 memset(fi, 0, sizeof(*fi));
1820 fi->keyix = hw_key->hw_key_idx;
1821 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1822 fi->keyix = an->ps_key;
1824 fi->keyix = ATH9K_TXKEYIX_INVALID;
1825 fi->keytype = keytype;
1826 fi->framelen = framelen;
1827 fi->rtscts_rate = rate->hw_value;
1829 fi->rtscts_rate |= rate->hw_value_short;
1832 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1834 struct ath_hw *ah = sc->sc_ah;
1835 struct ath9k_channel *curchan = ah->curchan;
1837 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1838 (curchan->channelFlags & CHANNEL_5GHZ) &&
1839 (chainmask == 0x7) && (rate < 0x90))
1841 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
1849 * Assign a descriptor (and sequence number if necessary,
1850 * and map buffer for DMA. Frees skb on error
1852 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1853 struct ath_txq *txq,
1854 struct ath_atx_tid *tid,
1855 struct sk_buff *skb)
1857 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1858 struct ath_frame_info *fi = get_frame_info(skb);
1859 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1864 bf = ath_tx_get_buffer(sc);
1866 ath_dbg(common, XMIT, "TX buffers are full\n");
1870 ATH_TXBUF_RESET(bf);
1873 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1874 seqno = tid->seq_next;
1875 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1878 hdr->seq_ctrl |= cpu_to_le16(fragno);
1880 if (!ieee80211_has_morefrags(hdr->frame_control))
1881 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1883 bf->bf_state.seqno = seqno;
1888 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1889 skb->len, DMA_TO_DEVICE);
1890 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1892 bf->bf_buf_addr = 0;
1893 ath_err(ath9k_hw_common(sc->sc_ah),
1894 "dma_mapping_error() on TX\n");
1895 ath_tx_return_buffer(sc, bf);
1904 /* FIXME: tx power */
1905 static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1906 struct ath_tx_control *txctl)
1908 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1909 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1910 struct ath_atx_tid *tid = NULL;
1914 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
1915 ieee80211_is_data_qos(hdr->frame_control)) {
1916 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1917 IEEE80211_QOS_CTL_TID_MASK;
1918 tid = ATH_AN_2_TID(txctl->an, tidno);
1920 WARN_ON(tid->ac->txq != txctl->txq);
1923 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
1925 * Try aggregation if it's a unicast data frame
1926 * and the destination is HT capable.
1928 ath_tx_send_ampdu(sc, tid, skb, txctl);
1930 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1933 dev_kfree_skb_any(skb);
1935 ieee80211_free_txskb(sc->hw, skb);
1939 bf->bf_state.bfs_paprd = txctl->paprd;
1942 bf->bf_state.bfs_paprd_timestamp = jiffies;
1944 ath_tx_send_normal(sc, txctl->txq, tid, skb);
1948 /* Upon failure caller should free skb */
1949 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1950 struct ath_tx_control *txctl)
1952 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1953 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1954 struct ieee80211_sta *sta = txctl->sta;
1955 struct ieee80211_vif *vif = info->control.vif;
1956 struct ath_softc *sc = hw->priv;
1957 struct ath_txq *txq = txctl->txq;
1958 int padpos, padsize;
1959 int frmlen = skb->len + FCS_LEN;
1962 /* NOTE: sta can be NULL according to net/mac80211.h */
1964 txctl->an = (struct ath_node *)sta->drv_priv;
1966 if (info->control.hw_key)
1967 frmlen += info->control.hw_key->icv_len;
1970 * As a temporary workaround, assign seq# here; this will likely need
1971 * to be cleaned up to work better with Beacon transmission and virtual
1974 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1975 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1976 sc->tx.seq_no += 0x10;
1977 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1978 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1981 /* Add the padding after the header if this is not already done */
1982 padpos = ath9k_cmn_padpos(hdr->frame_control);
1983 padsize = padpos & 3;
1984 if (padsize && skb->len > padpos) {
1985 if (skb_headroom(skb) < padsize)
1988 skb_push(skb, padsize);
1989 memmove(skb->data, skb->data + padsize, padpos);
1990 hdr = (struct ieee80211_hdr *) skb->data;
1993 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1994 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1995 !ieee80211_is_data(hdr->frame_control))
1996 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1998 setup_frame_info(hw, sta, skb, frmlen);
2001 * At this point, the vif, hw_key and sta pointers in the tx control
2002 * info are no longer valid (overwritten by the ath_frame_info data.
2005 q = skb_get_queue_mapping(skb);
2007 ath_txq_lock(sc, txq);
2008 if (txq == sc->tx.txq_map[q] &&
2009 ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
2011 ieee80211_stop_queue(sc->hw, q);
2012 txq->stopped = true;
2015 ath_tx_start_dma(sc, skb, txctl);
2017 ath_txq_unlock(sc, txq);
2026 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2027 int tx_flags, struct ath_txq *txq)
2029 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2030 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2031 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2032 int q, padpos, padsize;
2033 unsigned long flags;
2035 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
2037 if (sc->sc_ah->caldata)
2038 sc->sc_ah->caldata->paprd_packet_sent = true;
2040 if (!(tx_flags & ATH_TX_ERROR))
2041 /* Frame was ACKed */
2042 tx_info->flags |= IEEE80211_TX_STAT_ACK;
2044 padpos = ath9k_cmn_padpos(hdr->frame_control);
2045 padsize = padpos & 3;
2046 if (padsize && skb->len>padpos+padsize) {
2048 * Remove MAC header padding before giving the frame back to
2051 memmove(skb->data + padsize, skb->data, padpos);
2052 skb_pull(skb, padsize);
2055 spin_lock_irqsave(&sc->sc_pm_lock, flags);
2056 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
2057 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2059 "Going back to sleep after having received TX status (0x%lx)\n",
2060 sc->ps_flags & (PS_WAIT_FOR_BEACON |
2062 PS_WAIT_FOR_PSPOLL_DATA |
2063 PS_WAIT_FOR_TX_ACK));
2065 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2067 q = skb_get_queue_mapping(skb);
2068 if (txq == sc->tx.txq_map[q]) {
2069 if (WARN_ON(--txq->pending_frames < 0))
2070 txq->pending_frames = 0;
2073 txq->pending_frames < sc->tx.txq_max_pending[q]) {
2074 ieee80211_wake_queue(sc->hw, q);
2075 txq->stopped = false;
2079 __skb_queue_tail(&txq->complete_q, skb);
2082 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
2083 struct ath_txq *txq, struct list_head *bf_q,
2084 struct ath_tx_status *ts, int txok)
2086 struct sk_buff *skb = bf->bf_mpdu;
2087 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2088 unsigned long flags;
2092 tx_flags |= ATH_TX_ERROR;
2094 if (ts->ts_status & ATH9K_TXERR_FILT)
2095 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2097 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
2098 bf->bf_buf_addr = 0;
2100 if (bf->bf_state.bfs_paprd) {
2101 if (time_after(jiffies,
2102 bf->bf_state.bfs_paprd_timestamp +
2103 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
2104 dev_kfree_skb_any(skb);
2106 complete(&sc->paprd_complete);
2108 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2109 ath_tx_complete(sc, skb, tx_flags, txq);
2111 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2112 * accidentally reference it later.
2117 * Return the list of ath_buf of this mpdu to free queue
2119 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2120 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2121 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2124 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2125 struct ath_tx_status *ts, int nframes, int nbad,
2128 struct sk_buff *skb = bf->bf_mpdu;
2129 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2130 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2131 struct ieee80211_hw *hw = sc->hw;
2132 struct ath_hw *ah = sc->sc_ah;
2136 tx_info->status.ack_signal = ts->ts_rssi;
2138 tx_rateindex = ts->ts_rateindex;
2139 WARN_ON(tx_rateindex >= hw->max_rates);
2141 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2142 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2144 BUG_ON(nbad > nframes);
2146 tx_info->status.ampdu_len = nframes;
2147 tx_info->status.ampdu_ack_len = nframes - nbad;
2149 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2150 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
2152 * If an underrun error is seen assume it as an excessive
2153 * retry only if max frame trigger level has been reached
2154 * (2 KB for single stream, and 4 KB for dual stream).
2155 * Adjust the long retry as if the frame was tried
2156 * hw->max_rate_tries times to affect how rate control updates
2157 * PER for the failed rate.
2158 * In case of congestion on the bus penalizing this type of
2159 * underruns should help hardware actually transmit new frames
2160 * successfully by eventually preferring slower rates.
2161 * This itself should also alleviate congestion on the bus.
2163 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2164 ATH9K_TX_DELIM_UNDERRUN)) &&
2165 ieee80211_is_data(hdr->frame_control) &&
2166 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2167 tx_info->status.rates[tx_rateindex].count =
2171 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2172 tx_info->status.rates[i].count = 0;
2173 tx_info->status.rates[i].idx = -1;
2176 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2179 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2180 struct ath_tx_status *ts, struct ath_buf *bf,
2181 struct list_head *bf_head)
2186 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2187 txq->axq_tx_inprogress = false;
2188 if (bf_is_ampdu_not_probing(bf))
2189 txq->axq_ampdu_depth--;
2191 if (!bf_isampdu(bf)) {
2192 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
2193 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
2195 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2197 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2198 ath_txq_schedule(sc, txq);
2201 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2203 struct ath_hw *ah = sc->sc_ah;
2204 struct ath_common *common = ath9k_hw_common(ah);
2205 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2206 struct list_head bf_head;
2207 struct ath_desc *ds;
2208 struct ath_tx_status ts;
2211 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
2212 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2215 ath_txq_lock(sc, txq);
2217 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2220 if (list_empty(&txq->axq_q)) {
2221 txq->axq_link = NULL;
2222 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2223 ath_txq_schedule(sc, txq);
2226 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2229 * There is a race condition that a BH gets scheduled
2230 * after sw writes TxE and before hw re-load the last
2231 * descriptor to get the newly chained one.
2232 * Software must keep the last DONE descriptor as a
2233 * holding descriptor - software does so by marking
2234 * it with the STALE flag.
2239 if (list_is_last(&bf_held->list, &txq->axq_q))
2242 bf = list_entry(bf_held->list.next, struct ath_buf,
2246 lastbf = bf->bf_lastbf;
2247 ds = lastbf->bf_desc;
2249 memset(&ts, 0, sizeof(ts));
2250 status = ath9k_hw_txprocdesc(ah, ds, &ts);
2251 if (status == -EINPROGRESS)
2254 TX_STAT_INC(txq->axq_qnum, txprocdesc);
2257 * Remove ath_buf's of the same transmit unit from txq,
2258 * however leave the last descriptor back as the holding
2259 * descriptor for hw.
2261 lastbf->bf_stale = true;
2262 INIT_LIST_HEAD(&bf_head);
2263 if (!list_is_singular(&lastbf->list))
2264 list_cut_position(&bf_head,
2265 &txq->axq_q, lastbf->list.prev);
2268 list_del(&bf_held->list);
2269 ath_tx_return_buffer(sc, bf_held);
2272 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2274 ath_txq_unlock_complete(sc, txq);
2277 void ath_tx_tasklet(struct ath_softc *sc)
2279 struct ath_hw *ah = sc->sc_ah;
2280 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
2283 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2284 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2285 ath_tx_processq(sc, &sc->tx.txq[i]);
2289 void ath_tx_edma_tasklet(struct ath_softc *sc)
2291 struct ath_tx_status ts;
2292 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2293 struct ath_hw *ah = sc->sc_ah;
2294 struct ath_txq *txq;
2295 struct ath_buf *bf, *lastbf;
2296 struct list_head bf_head;
2300 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
2303 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
2304 if (status == -EINPROGRESS)
2306 if (status == -EIO) {
2307 ath_dbg(common, XMIT, "Error processing tx status\n");
2311 /* Process beacon completions separately */
2312 if (ts.qid == sc->beacon.beaconq) {
2313 sc->beacon.tx_processed = true;
2314 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2318 txq = &sc->tx.txq[ts.qid];
2320 ath_txq_lock(sc, txq);
2322 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2323 ath_txq_unlock(sc, txq);
2327 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2328 struct ath_buf, list);
2329 lastbf = bf->bf_lastbf;
2331 INIT_LIST_HEAD(&bf_head);
2332 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2335 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2336 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2338 if (!list_empty(&txq->axq_q)) {
2339 struct list_head bf_q;
2341 INIT_LIST_HEAD(&bf_q);
2342 txq->axq_link = NULL;
2343 list_splice_tail_init(&txq->axq_q, &bf_q);
2344 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2348 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2349 ath_txq_unlock_complete(sc, txq);
2357 static int ath_txstatus_setup(struct ath_softc *sc, int size)
2359 struct ath_descdma *dd = &sc->txsdma;
2360 u8 txs_len = sc->sc_ah->caps.txs_len;
2362 dd->dd_desc_len = size * txs_len;
2363 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2364 &dd->dd_desc_paddr, GFP_KERNEL);
2371 static int ath_tx_edma_init(struct ath_softc *sc)
2375 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2377 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2378 sc->txsdma.dd_desc_paddr,
2379 ATH_TXSTATUS_RING_SIZE);
2384 static void ath_tx_edma_cleanup(struct ath_softc *sc)
2386 struct ath_descdma *dd = &sc->txsdma;
2388 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2392 int ath_tx_init(struct ath_softc *sc, int nbufs)
2394 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2397 spin_lock_init(&sc->tx.txbuflock);
2399 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2403 "Failed to allocate tx descriptors: %d\n", error);
2407 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2408 "beacon", ATH_BCBUF, 1, 1);
2411 "Failed to allocate beacon descriptors: %d\n", error);
2415 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2417 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2418 error = ath_tx_edma_init(sc);
2430 void ath_tx_cleanup(struct ath_softc *sc)
2432 if (sc->beacon.bdma.dd_desc_len != 0)
2433 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2435 if (sc->tx.txdma.dd_desc_len != 0)
2436 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2438 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2439 ath_tx_edma_cleanup(sc);
2442 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2444 struct ath_atx_tid *tid;
2445 struct ath_atx_ac *ac;
2448 for (tidno = 0, tid = &an->tid[tidno];
2449 tidno < WME_NUM_TID;
2453 tid->seq_start = tid->seq_next = 0;
2454 tid->baw_size = WME_MAX_BA;
2455 tid->baw_head = tid->baw_tail = 0;
2457 tid->paused = false;
2458 tid->state &= ~AGGR_CLEANUP;
2459 __skb_queue_head_init(&tid->buf_q);
2460 acno = TID_TO_WME_AC(tidno);
2461 tid->ac = &an->ac[acno];
2462 tid->state &= ~AGGR_ADDBA_COMPLETE;
2463 tid->state &= ~AGGR_ADDBA_PROGRESS;
2466 for (acno = 0, ac = &an->ac[acno];
2467 acno < WME_NUM_AC; acno++, ac++) {
2469 ac->txq = sc->tx.txq_map[acno];
2470 INIT_LIST_HEAD(&ac->tid_q);
2474 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2476 struct ath_atx_ac *ac;
2477 struct ath_atx_tid *tid;
2478 struct ath_txq *txq;
2481 for (tidno = 0, tid = &an->tid[tidno];
2482 tidno < WME_NUM_TID; tidno++, tid++) {
2487 ath_txq_lock(sc, txq);
2490 list_del(&tid->list);
2495 list_del(&ac->list);
2496 tid->ac->sched = false;
2499 ath_tid_drain(sc, txq, tid);
2500 tid->state &= ~AGGR_ADDBA_COMPLETE;
2501 tid->state &= ~AGGR_CLEANUP;
2503 ath_txq_unlock(sc, txq);