Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(189)

Side by Side Diff: chromeos/compat-wireless/drivers/net/wireless/ath/ath9k/xmit.c

Issue 5326002: Update compat-wireless to 2.6.36-5-spn (Closed) Base URL: http://git.chromium.org/git/kernel.git@master
Patch Set: Fixes for !ACK handling, missing local changes, log message fixes Created 10 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies. 6 * copyright notice and this permission notice appear in all copies.
7 * 7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
117 117
118 if (ac->sched) 118 if (ac->sched)
119 return; 119 return;
120 120
121 ac->sched = true; 121 ac->sched = true;
122 list_add_tail(&ac->list, &txq->axq_acq); 122 list_add_tail(&ac->list, &txq->axq_acq);
123 } 123 }
124 124
125 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 125 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
126 { 126 {
127 » struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 127 » struct ath_txq *txq = tid->ac->txq;
128 128
129 WARN_ON(!tid->paused); 129 WARN_ON(!tid->paused);
130 130
131 spin_lock_bh(&txq->axq_lock); 131 spin_lock_bh(&txq->axq_lock);
132 tid->paused = false; 132 tid->paused = false;
133 133
134 if (list_empty(&tid->buf_q)) 134 if (list_empty(&tid->buf_q))
135 goto unlock; 135 goto unlock;
136 136
137 ath_tx_queue_tid(txq, tid); 137 ath_tx_queue_tid(txq, tid);
138 ath_txq_schedule(sc, txq); 138 ath_txq_schedule(sc, txq);
139 unlock: 139 unlock:
140 spin_unlock_bh(&txq->axq_lock); 140 spin_unlock_bh(&txq->axq_lock);
141 } 141 }
142 142
143 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 143 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
144 { 144 {
145 » struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 145 » struct ath_txq *txq = tid->ac->txq;
146 struct ath_buf *bf; 146 struct ath_buf *bf;
147 struct list_head bf_head; 147 struct list_head bf_head;
148 struct ath_tx_status ts; 148 struct ath_tx_status ts;
149 149
150 INIT_LIST_HEAD(&bf_head); 150 INIT_LIST_HEAD(&bf_head);
151 151
152 memset(&ts, 0, sizeof(ts)); 152 memset(&ts, 0, sizeof(ts));
153 spin_lock_bh(&txq->axq_lock); 153 spin_lock_bh(&txq->axq_lock);
154 154
155 while (!list_empty(&tid->buf_q)) { 155 while (!list_empty(&tid->buf_q)) {
(...skipping 12 matching lines...) Expand all
168 } 168 }
169 169
170 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 170 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
171 int seqno) 171 int seqno)
172 { 172 {
173 int index, cindex; 173 int index, cindex;
174 174
175 index = ATH_BA_INDEX(tid->seq_start, seqno); 175 index = ATH_BA_INDEX(tid->seq_start, seqno);
176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
177 177
178 » __clear_bit(cindex, tid->tx_buf); 178 » tid->tx_buf[cindex] = NULL;
179 179
180 » while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->t x_buf)) { 180 » while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
181 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
182 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
183 } 183 }
184 } 184 }
185 185
186 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 186 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf) 187 struct ath_buf *bf)
188 { 188 {
189 int index, cindex; 189 int index, cindex;
190 190
191 if (bf_isretried(bf)) 191 if (bf_isretried(bf))
192 return; 192 return;
193 193
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
196 » __set_bit(cindex, tid->tx_buf); 196
197 » BUG_ON(tid->tx_buf[cindex] != NULL);
198 » tid->tx_buf[cindex] = bf;
197 199
198 if (index >= ((tid->baw_tail - tid->baw_head) & 200 if (index >= ((tid->baw_tail - tid->baw_head) &
199 (ATH_TID_MAX_BUFS - 1))) { 201 (ATH_TID_MAX_BUFS - 1))) {
200 tid->baw_tail = cindex; 202 tid->baw_tail = cindex;
201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 203 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
202 } 204 }
203 } 205 }
204 206
205 /* 207 /*
206 * TODO: For frame(s) that are in the retry state, we will reuse the 208 * TODO: For frame(s) that are in the retry state, we will reuse the
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
310 struct ieee80211_hdr *hdr; 312 struct ieee80211_hdr *hdr;
311 struct ieee80211_tx_info *tx_info; 313 struct ieee80211_tx_info *tx_info;
312 struct ath_atx_tid *tid = NULL; 314 struct ath_atx_tid *tid = NULL;
313 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 315 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
314 struct list_head bf_head, bf_pending; 316 struct list_head bf_head, bf_pending;
315 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 317 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
316 u32 ba[WME_BA_BMP_SIZE >> 5]; 318 u32 ba[WME_BA_BMP_SIZE >> 5];
317 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 319 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
318 bool rc_update = true; 320 bool rc_update = true;
319 struct ieee80211_tx_rate rates[4]; 321 struct ieee80211_tx_rate rates[4];
322 int nframes;
320 323
321 skb = bf->bf_mpdu; 324 skb = bf->bf_mpdu;
322 hdr = (struct ieee80211_hdr *)skb->data; 325 hdr = (struct ieee80211_hdr *)skb->data;
323 326
324 tx_info = IEEE80211_SKB_CB(skb); 327 tx_info = IEEE80211_SKB_CB(skb);
325 hw = bf->aphy->hw; 328 hw = bf->aphy->hw;
326 329
327 memcpy(rates, tx_info->control.rates, sizeof(rates)); 330 memcpy(rates, tx_info->control.rates, sizeof(rates));
331 nframes = bf->bf_nframes;
328 332
329 rcu_read_lock(); 333 rcu_read_lock();
330 334
331 /* XXX: use ieee80211_find_sta! */ 335 /* XXX: use ieee80211_find_sta! */
332 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1); 336 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
333 if (!sta) { 337 if (!sta) {
334 rcu_read_unlock(); 338 rcu_read_unlock();
335 339
336 INIT_LIST_HEAD(&bf_head); 340 INIT_LIST_HEAD(&bf_head);
337 while (bf) { 341 while (bf) {
338 bf_next = bf->bf_next; 342 bf_next = bf->bf_next;
339 343
340 bf->bf_state.bf_type |= BUF_XRETRY; 344 bf->bf_state.bf_type |= BUF_XRETRY;
341 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || 345 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
342 !bf->bf_stale || bf_next != NULL) 346 !bf->bf_stale || bf_next != NULL)
343 list_move_tail(&bf->list, &bf_head); 347 list_move_tail(&bf->list, &bf_head);
344 348
345 » » » ath_tx_rc_status(bf, ts, 0, 0, false); 349 » » » ath_tx_rc_status(bf, ts, 1, 0, false);
346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 350 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
347 0, 0); 351 0, 0);
348 352
349 bf = bf_next; 353 bf = bf_next;
350 } 354 }
351 return; 355 return;
352 } 356 }
353 357
354 an = (struct ath_node *)sta->drv_priv; 358 an = (struct ath_node *)sta->drv_priv;
355 tid = ATH_AN_2_TID(an, bf->bf_tidno); 359 tid = ATH_AN_2_TID(an, bf->bf_tidno);
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
440 /* 444 /*
441 * complete the acked-ones/xretried ones; update 445 * complete the acked-ones/xretried ones; update
442 * block-ack window 446 * block-ack window
443 */ 447 */
444 spin_lock_bh(&txq->axq_lock); 448 spin_lock_bh(&txq->axq_lock);
445 ath_tx_update_baw(sc, tid, bf->bf_seqno); 449 ath_tx_update_baw(sc, tid, bf->bf_seqno);
446 spin_unlock_bh(&txq->axq_lock); 450 spin_unlock_bh(&txq->axq_lock);
447 451
448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 452 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
449 memcpy(tx_info->control.rates, rates, sizeof(rat es)); 453 memcpy(tx_info->control.rates, rates, sizeof(rat es));
454 bf->bf_nframes = nframes;
450 ath_tx_rc_status(bf, ts, nbad, txok, true); 455 ath_tx_rc_status(bf, ts, nbad, txok, true);
451 rc_update = false; 456 rc_update = false;
452 } else { 457 } else {
453 ath_tx_rc_status(bf, ts, nbad, txok, false); 458 ath_tx_rc_status(bf, ts, nbad, txok, false);
454 } 459 }
455 460
456 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 461 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
457 !txfail, sendbar); 462 !txfail, sendbar);
458 } else { 463 } else {
459 /* retry the un-acked ones */ 464 /* retry the un-acked ones */
(...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after
803 txtid->paused = true; 808 txtid->paused = true;
804 *ssn = txtid->seq_start; 809 *ssn = txtid->seq_start;
805 810
806 return 0; 811 return 0;
807 } 812 }
808 813
809 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 814 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
810 { 815 {
811 struct ath_node *an = (struct ath_node *)sta->drv_priv; 816 struct ath_node *an = (struct ath_node *)sta->drv_priv;
812 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 817 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
813 » struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 818 » struct ath_txq *txq = txtid->ac->txq;
814 819
815 if (txtid->state & AGGR_CLEANUP) 820 if (txtid->state & AGGR_CLEANUP)
816 return; 821 return;
817 822
818 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 823 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
819 txtid->state &= ~AGGR_ADDBA_PROGRESS; 824 txtid->state &= ~AGGR_ADDBA_PROGRESS;
820 return; 825 return;
821 } 826 }
822 827
823 spin_lock_bh(&txq->axq_lock); 828 spin_lock_bh(&txq->axq_lock);
(...skipping 24 matching lines...) Expand all
848 if (sc->sc_flags & SC_OP_TXAGGR) { 853 if (sc->sc_flags & SC_OP_TXAGGR) {
849 txtid = ATH_AN_2_TID(an, tid); 854 txtid = ATH_AN_2_TID(an, tid);
850 txtid->baw_size = 855 txtid->baw_size =
851 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 856 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
852 txtid->state |= AGGR_ADDBA_COMPLETE; 857 txtid->state |= AGGR_ADDBA_COMPLETE;
853 txtid->state &= ~AGGR_ADDBA_PROGRESS; 858 txtid->state &= ~AGGR_ADDBA_PROGRESS;
854 ath_tx_resume_tid(sc, txtid); 859 ath_tx_resume_tid(sc, txtid);
855 } 860 }
856 } 861 }
857 862
863 bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
864 {
865 struct ath_atx_tid *txtid;
866
867 if (!(sc->sc_flags & SC_OP_TXAGGR))
868 return false;
869
870 txtid = ATH_AN_2_TID(an, tidno);
871
872 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
873 return true;
874 return false;
875 }
876
858 /********************/ 877 /********************/
859 /* Queue Management */ 878 /* Queue Management */
860 /********************/ 879 /********************/
861 880
862 static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 881 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
863 struct ath_txq *txq) 882 struct ath_txq *txq)
864 { 883 {
865 struct ath_atx_ac *ac, *ac_tmp; 884 struct ath_atx_ac *ac, *ac_tmp;
866 struct ath_atx_tid *tid, *tid_tmp; 885 struct ath_atx_tid *tid, *tid_tmp;
867 886
868 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 887 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
869 list_del(&ac->list); 888 list_del(&ac->list);
870 ac->sched = false; 889 ac->sched = false;
871 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 890 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
872 list_del(&tid->list); 891 list_del(&tid->list);
873 tid->sched = false; 892 tid->sched = false;
874 ath_tid_drain(sc, txq, tid); 893 ath_tid_drain(sc, txq, tid);
875 } 894 }
876 } 895 }
877 } 896 }
878 897
879 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 898 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
880 { 899 {
881 struct ath_hw *ah = sc->sc_ah; 900 struct ath_hw *ah = sc->sc_ah;
882 struct ath_common *common = ath9k_hw_common(ah); 901 struct ath_common *common = ath9k_hw_common(ah);
883 struct ath9k_tx_queue_info qi; 902 struct ath9k_tx_queue_info qi;
903 static const int subtype_txq_to_hwq[] = {
904 [WME_AC_BE] = ATH_TXQ_AC_BE,
905 [WME_AC_BK] = ATH_TXQ_AC_BK,
906 [WME_AC_VI] = ATH_TXQ_AC_VI,
907 [WME_AC_VO] = ATH_TXQ_AC_VO,
908 };
884 int qnum, i; 909 int qnum, i;
885 910
886 memset(&qi, 0, sizeof(qi)); 911 memset(&qi, 0, sizeof(qi));
887 » qi.tqi_subtype = subtype; 912 » qi.tqi_subtype = subtype_txq_to_hwq[subtype];
888 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 913 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
889 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 914 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
890 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 915 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
891 qi.tqi_physCompBuf = 0; 916 qi.tqi_physCompBuf = 0;
892 917
893 /* 918 /*
894 * Enable interrupts only for EOL and DESC conditions. 919 * Enable interrupts only for EOL and DESC conditions.
895 * We mark tx descriptors to receive a DESC interrupt 920 * We mark tx descriptors to receive a DESC interrupt
896 * when a tx queue gets deep; otherwise waiting for the 921 * when a tx queue gets deep; otherwise waiting for the
897 * EOL to reap descriptors. Note that this is done to 922 * EOL to reap descriptors. Note that this is done to
(...skipping 28 matching lines...) Expand all
926 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 951 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
927 ath_print(common, ATH_DBG_FATAL, 952 ath_print(common, ATH_DBG_FATAL,
928 "qnum %u out of range, max %u!\n", 953 "qnum %u out of range, max %u!\n",
929 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); 954 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
930 ath9k_hw_releasetxqueue(ah, qnum); 955 ath9k_hw_releasetxqueue(ah, qnum);
931 return NULL; 956 return NULL;
932 } 957 }
933 if (!ATH_TXQ_SETUP(sc, qnum)) { 958 if (!ATH_TXQ_SETUP(sc, qnum)) {
934 struct ath_txq *txq = &sc->tx.txq[qnum]; 959 struct ath_txq *txq = &sc->tx.txq[qnum];
935 960
936 txq->axq_class = subtype;
937 txq->axq_qnum = qnum; 961 txq->axq_qnum = qnum;
938 txq->axq_link = NULL; 962 txq->axq_link = NULL;
939 INIT_LIST_HEAD(&txq->axq_q); 963 INIT_LIST_HEAD(&txq->axq_q);
940 INIT_LIST_HEAD(&txq->axq_acq); 964 INIT_LIST_HEAD(&txq->axq_acq);
941 spin_lock_init(&txq->axq_lock); 965 spin_lock_init(&txq->axq_lock);
942 txq->axq_depth = 0; 966 txq->axq_depth = 0;
943 txq->axq_tx_inprogress = false; 967 txq->axq_tx_inprogress = false;
944 sc->tx.txqsetup |= 1<<qnum; 968 sc->tx.txqsetup |= 1<<qnum;
945 969
946 txq->txq_headidx = txq->txq_tailidx = 0; 970 txq->txq_headidx = txq->txq_tailidx = 0;
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
1075 if (bf_isampdu(bf)) 1099 if (bf_isampdu(bf))
1076 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0); 1100 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
1077 else 1101 else
1078 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1102 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1079 } 1103 }
1080 1104
1081 spin_lock_bh(&txq->axq_lock); 1105 spin_lock_bh(&txq->axq_lock);
1082 txq->axq_tx_inprogress = false; 1106 txq->axq_tx_inprogress = false;
1083 spin_unlock_bh(&txq->axq_lock); 1107 spin_unlock_bh(&txq->axq_lock);
1084 1108
1085 /* flush any pending frames if aggregation is enabled */
1086 if (sc->sc_flags & SC_OP_TXAGGR) {
1087 if (!retry_tx) {
1088 spin_lock_bh(&txq->axq_lock);
1089 ath_txq_drain_pending_buffers(sc, txq);
1090 spin_unlock_bh(&txq->axq_lock);
1091 }
1092 }
1093
1094 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1109 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1095 spin_lock_bh(&txq->axq_lock); 1110 spin_lock_bh(&txq->axq_lock);
1096 while (!list_empty(&txq->txq_fifo_pending)) { 1111 while (!list_empty(&txq->txq_fifo_pending)) {
1097 bf = list_first_entry(&txq->txq_fifo_pending, 1112 bf = list_first_entry(&txq->txq_fifo_pending,
1098 struct ath_buf, list); 1113 struct ath_buf, list);
1099 list_cut_position(&bf_head, 1114 list_cut_position(&bf_head,
1100 &txq->txq_fifo_pending, 1115 &txq->txq_fifo_pending,
1101 &bf->bf_lastbf->list); 1116 &bf->bf_lastbf->list);
1102 spin_unlock_bh(&txq->axq_lock); 1117 spin_unlock_bh(&txq->axq_lock);
1103 1118
1104 if (bf_isampdu(bf)) 1119 if (bf_isampdu(bf))
1105 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 1120 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1106 &ts, 0); 1121 &ts, 0);
1107 else 1122 else
1108 ath_tx_complete_buf(sc, bf, txq, &bf_head, 1123 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1109 &ts, 0, 0); 1124 &ts, 0, 0);
1110 spin_lock_bh(&txq->axq_lock); 1125 spin_lock_bh(&txq->axq_lock);
1111 } 1126 }
1112 spin_unlock_bh(&txq->axq_lock); 1127 spin_unlock_bh(&txq->axq_lock);
1113 } 1128 }
1129
1130 /* flush any pending frames if aggregation is enabled */
1131 if (sc->sc_flags & SC_OP_TXAGGR) {
1132 if (!retry_tx) {
1133 spin_lock_bh(&txq->axq_lock);
1134 ath_txq_drain_pending_buffers(sc, txq);
1135 spin_unlock_bh(&txq->axq_lock);
1136 }
1137 }
1114 } 1138 }
1115 1139
1116 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1140 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1117 { 1141 {
1118 struct ath_hw *ah = sc->sc_ah; 1142 struct ath_hw *ah = sc->sc_ah;
1119 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1143 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1120 struct ath_txq *txq; 1144 struct ath_txq *txq;
1121 int i, npend = 0; 1145 int i, npend = 0;
1122 1146
1123 if (sc->sc_flags & SC_OP_INVALID) 1147 if (sc->sc_flags & SC_OP_INVALID)
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
1196 } while (!list_empty(&ac->tid_q)); 1220 } while (!list_empty(&ac->tid_q));
1197 1221
1198 if (!list_empty(&ac->tid_q)) { 1222 if (!list_empty(&ac->tid_q)) {
1199 if (!ac->sched) { 1223 if (!ac->sched) {
1200 ac->sched = true; 1224 ac->sched = true;
1201 list_add_tail(&ac->list, &txq->axq_acq); 1225 list_add_tail(&ac->list, &txq->axq_acq);
1202 } 1226 }
1203 } 1227 }
1204 } 1228 }
1205 1229
1206 int ath_tx_setup(struct ath_softc *sc, int haltype)
1207 {
1208 struct ath_txq *txq;
1209
1210 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1211 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1212 "HAL AC %u out of range, max %zu!\n",
1213 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1214 return 0;
1215 }
1216 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1217 if (txq != NULL) {
1218 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1219 return 1;
1220 } else
1221 return 0;
1222 }
1223
1224 /***********/ 1230 /***********/
1225 /* TX, DMA */ 1231 /* TX, DMA */
1226 /***********/ 1232 /***********/
1227 1233
1228 /* 1234 /*
1229 * Insert a chain of ath_buf (descriptors) on a txq and 1235 * Insert a chain of ath_buf (descriptors) on a txq and
1230 * assume the descriptors are already chained together by caller. 1236 * assume the descriptors are already chained together by caller.
1231 */ 1237 */
1232 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1238 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1233 struct list_head *head) 1239 struct list_head *head)
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after
1718 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && 1724 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1719 tx_info->control.sta) { 1725 tx_info->control.sta) {
1720 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1726 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1721 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1727 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1722 1728
1723 if (!ieee80211_is_data_qos(fc)) { 1729 if (!ieee80211_is_data_qos(fc)) {
1724 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1730 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1725 goto tx_done; 1731 goto tx_done;
1726 } 1732 }
1727 1733
1734 WARN_ON(tid->ac->txq != txctl->txq);
1728 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1735 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1729 /* 1736 /*
1730 * Try aggregation if it's a unicast data frame 1737 * Try aggregation if it's a unicast data frame
1731 * and the destination is HT capable. 1738 * and the destination is HT capable.
1732 */ 1739 */
1733 ath_tx_send_ampdu(sc, tid, &bf_head, txctl); 1740 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1734 } else { 1741 } else {
1735 /* 1742 /*
1736 * Send this frame as regular when ADDBA 1743 * Send this frame as regular when ADDBA
1737 * exchange is neither complete nor pending. 1744 * exchange is neither complete nor pending.
(...skipping 19 matching lines...) Expand all
1757 struct ath_txq *txq = txctl->txq; 1764 struct ath_txq *txq = txctl->txq;
1758 struct ath_buf *bf; 1765 struct ath_buf *bf;
1759 int q, r; 1766 int q, r;
1760 1767
1761 bf = ath_tx_get_buffer(sc); 1768 bf = ath_tx_get_buffer(sc);
1762 if (!bf) { 1769 if (!bf) {
1763 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1770 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1764 return -1; 1771 return -1;
1765 } 1772 }
1766 1773
1774 q = skb_get_queue_mapping(skb);
1767 r = ath_tx_setup_buffer(hw, bf, skb, txctl); 1775 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
1768 if (unlikely(r)) { 1776 if (unlikely(r)) {
1769 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1777 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
1770 1778
1771 /* upon ath_tx_processq() this TX queue will be resumed, we 1779 /* upon ath_tx_processq() this TX queue will be resumed, we
1772 * guarantee this will happen by knowing beforehand that 1780 * guarantee this will happen by knowing beforehand that
1773 * we will at least have to run TX completionon one buffer 1781 * we will at least have to run TX completionon one buffer
1774 * on the queue */ 1782 * on the queue */
1775 spin_lock_bh(&txq->axq_lock); 1783 spin_lock_bh(&txq->axq_lock);
1776 » » if (!txq->stopped && txq->axq_depth > 1) { 1784 » » if (txq == sc->tx.txq_map[q] && !txq->stopped &&
1777 » » » ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1785 » » txq->axq_depth > 1) {
1786 » » » ath_mac80211_stop_queue(sc, q);
1778 txq->stopped = 1; 1787 txq->stopped = 1;
1779 } 1788 }
1780 spin_unlock_bh(&txq->axq_lock); 1789 spin_unlock_bh(&txq->axq_lock);
1781 1790
1782 ath_tx_return_buffer(sc, bf); 1791 ath_tx_return_buffer(sc, bf);
1783 1792
1784 return r; 1793 return r;
1785 } 1794 }
1786 1795
1787 q = skb_get_queue_mapping(skb);
1788 if (q >= 4)
1789 q = 0;
1790
1791 spin_lock_bh(&txq->axq_lock); 1796 spin_lock_bh(&txq->axq_lock);
1792 » if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) { 1797 » if (txq == sc->tx.txq_map[q] &&
1793 » » ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1798 » ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1799 » » ath_mac80211_stop_queue(sc, q);
1794 txq->stopped = 1; 1800 txq->stopped = 1;
1795 } 1801 }
1796 spin_unlock_bh(&txq->axq_lock); 1802 spin_unlock_bh(&txq->axq_lock);
1797 1803
1798 ath_tx_start_dma(sc, bf, txctl); 1804 ath_tx_start_dma(sc, bf, txctl);
1799 1805
1800 return 0; 1806 return 0;
1801 } 1807 }
1802 1808
1803 void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) 1809 void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1851 return; 1857 return;
1852 exit: 1858 exit:
1853 dev_kfree_skb_any(skb); 1859 dev_kfree_skb_any(skb);
1854 } 1860 }
1855 1861
1856 /*****************/ 1862 /*****************/
1857 /* TX Completion */ 1863 /* TX Completion */
1858 /*****************/ 1864 /*****************/
1859 1865
1860 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1866 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1861 » » » struct ath_wiphy *aphy, int tx_flags) 1867 » » » struct ath_wiphy *aphy, int tx_flags,
1868 » » » struct ath_txq *txq)
1862 { 1869 {
1863 struct ieee80211_hw *hw = sc->hw; 1870 struct ieee80211_hw *hw = sc->hw;
1864 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1871 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1865 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1872 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1866 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1873 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1867 int q, padpos, padsize; 1874 int q, padpos, padsize;
1868 1875
1869 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1876 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1870 1877
1871 if (aphy) 1878 if (aphy)
(...skipping 26 matching lines...) Expand all
1898 sc->ps_flags & (PS_WAIT_FOR_BEACON | 1905 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1899 PS_WAIT_FOR_CAB | 1906 PS_WAIT_FOR_CAB |
1900 PS_WAIT_FOR_PSPOLL_DATA | 1907 PS_WAIT_FOR_PSPOLL_DATA |
1901 PS_WAIT_FOR_TX_ACK)); 1908 PS_WAIT_FOR_TX_ACK));
1902 } 1909 }
1903 1910
1904 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1911 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
1905 ath9k_tx_status(hw, skb); 1912 ath9k_tx_status(hw, skb);
1906 else { 1913 else {
1907 q = skb_get_queue_mapping(skb); 1914 q = skb_get_queue_mapping(skb);
1908 » » if (q >= 4) 1915 » » if (txq == sc->tx.txq_map[q]) {
1909 » » » q = 0; 1916 » » » spin_lock_bh(&txq->axq_lock);
1910 1917 » » » if (WARN_ON(--txq->pending_frames < 0))
1911 » » if (--sc->tx.pending_frames[q] < 0) 1918 » » » » txq->pending_frames = 0;
1912 » » » sc->tx.pending_frames[q] = 0; 1919 » » » spin_unlock_bh(&txq->axq_lock);
1920 » » }
1913 1921
1914 ieee80211_tx_status(hw, skb); 1922 ieee80211_tx_status(hw, skb);
1915 } 1923 }
1916 } 1924 }
1917 1925
1918 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1926 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1919 struct ath_txq *txq, struct list_head *bf_q, 1927 struct ath_txq *txq, struct list_head *bf_q,
1920 struct ath_tx_status *ts, int txok, int sendbar) 1928 struct ath_tx_status *ts, int txok, int sendbar)
1921 { 1929 {
1922 struct sk_buff *skb = bf->bf_mpdu; 1930 struct sk_buff *skb = bf->bf_mpdu;
(...skipping 13 matching lines...) Expand all
1936 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1944 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1937 1945
1938 if (bf->bf_state.bfs_paprd) { 1946 if (bf->bf_state.bfs_paprd) {
1939 if (time_after(jiffies, 1947 if (time_after(jiffies,
1940 bf->bf_state.bfs_paprd_timestamp + 1948 bf->bf_state.bfs_paprd_timestamp +
1941 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 1949 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1942 dev_kfree_skb_any(skb); 1950 dev_kfree_skb_any(skb);
1943 else 1951 else
1944 complete(&sc->paprd_complete); 1952 complete(&sc->paprd_complete);
1945 } else { 1953 } else {
1946 » » ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1954 » » ath_debug_stat_tx(sc, bf, ts);
1947 » » ath_debug_stat_tx(sc, txq, bf, ts); 1955 » » ath_tx_complete(sc, skb, bf->aphy, tx_flags, txq);
1956
1948 } 1957 }
1949 1958
1950 /* 1959 /*
1951 * Return the list of ath_buf of this mpdu to free queue 1960 * Return the list of ath_buf of this mpdu to free queue
1952 */ 1961 */
1953 spin_lock_irqsave(&sc->tx.txbuflock, flags); 1962 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1954 list_splice_tail_init(bf_q, &sc->tx.txbuf); 1963 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1955 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 1964 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1956 } 1965 }
1957 1966
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1994 u8 i, tx_rateindex; 2003 u8 i, tx_rateindex;
1995 2004
1996 if (txok) 2005 if (txok)
1997 tx_info->status.ack_signal = ts->ts_rssi; 2006 tx_info->status.ack_signal = ts->ts_rssi;
1998 2007
1999 tx_rateindex = ts->ts_rateindex; 2008 tx_rateindex = ts->ts_rateindex;
2000 WARN_ON(tx_rateindex >= hw->max_rates); 2009 WARN_ON(tx_rateindex >= hw->max_rates);
2001 2010
2002 if (ts->ts_status & ATH9K_TXERR_FILT) 2011 if (ts->ts_status & ATH9K_TXERR_FILT)
2003 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2012 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2004 » if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) 2013 » if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
2005 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2014 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2006 2015
2016 BUG_ON(nbad > bf->bf_nframes);
2017
2018 tx_info->status.ampdu_len = bf->bf_nframes;
2019 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
2020 }
2021
2007 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2022 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2008 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 2023 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
2009 if (ieee80211_is_data(hdr->frame_control)) { 2024 if (ieee80211_is_data(hdr->frame_control)) {
2010 if (ts->ts_flags & 2025 if (ts->ts_flags &
2011 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 2026 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2012 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 2027 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
2013 if ((ts->ts_status & ATH9K_TXERR_XRETRY) || 2028 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2014 (ts->ts_status & ATH9K_TXERR_FIFO)) 2029 (ts->ts_status & ATH9K_TXERR_FIFO))
2015 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 2030 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2016 tx_info->status.ampdu_len = bf->bf_nframes;
2017 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
2018 } 2031 }
2019 } 2032 }
2020 2033
2021 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2034 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2022 tx_info->status.rates[i].count = 0; 2035 tx_info->status.rates[i].count = 0;
2023 tx_info->status.rates[i].idx = -1; 2036 tx_info->status.rates[i].idx = -1;
2024 } 2037 }
2025 2038
2026 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2039 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2027 } 2040 }
2028 2041
2029 static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 2042 static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
2030 { 2043 {
2031 » int qnum; 2044 » struct ath_txq *txq;
2032 2045
2033 » qnum = ath_get_mac80211_qnum(txq->axq_class, sc); 2046 » txq = sc->tx.txq_map[qnum];
2034 » if (qnum == -1)
2035 » » return;
2036
2037 spin_lock_bh(&txq->axq_lock); 2047 spin_lock_bh(&txq->axq_lock);
2038 » if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) { 2048 » if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2039 if (ath_mac80211_start_queue(sc, qnum)) 2049 if (ath_mac80211_start_queue(sc, qnum))
2040 txq->stopped = 0; 2050 txq->stopped = 0;
2041 } 2051 }
2042 spin_unlock_bh(&txq->axq_lock); 2052 spin_unlock_bh(&txq->axq_lock);
2043 } 2053 }
2044 2054
2045 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2055 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2046 { 2056 {
2047 struct ath_hw *ah = sc->sc_ah; 2057 struct ath_hw *ah = sc->sc_ah;
2048 struct ath_common *common = ath9k_hw_common(ah); 2058 struct ath_common *common = ath9k_hw_common(ah);
2049 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2059 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2050 struct list_head bf_head; 2060 struct list_head bf_head;
2051 struct ath_desc *ds; 2061 struct ath_desc *ds;
2052 struct ath_tx_status ts; 2062 struct ath_tx_status ts;
2053 int txok; 2063 int txok;
2054 int status; 2064 int status;
2065 int qnum;
2055 2066
2056 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2067 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2057 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2068 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2058 txq->axq_link); 2069 txq->axq_link);
2059 2070
2060 for (;;) { 2071 for (;;) {
2061 spin_lock_bh(&txq->axq_lock); 2072 spin_lock_bh(&txq->axq_lock);
2062 if (list_empty(&txq->axq_q)) { 2073 if (list_empty(&txq->axq_q)) {
2063 txq->axq_link = NULL; 2074 txq->axq_link = NULL;
2064 spin_unlock_bh(&txq->axq_lock); 2075 spin_unlock_bh(&txq->axq_lock);
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
2129 if (bf_held) 2140 if (bf_held)
2130 ath_tx_return_buffer(sc, bf_held); 2141 ath_tx_return_buffer(sc, bf_held);
2131 2142
2132 if (!bf_isampdu(bf)) { 2143 if (!bf_isampdu(bf)) {
2133 /* 2144 /*
2134 * This frame is sent out as a single frame. 2145 * This frame is sent out as a single frame.
2135 * Use hardware retry status for this frame. 2146 * Use hardware retry status for this frame.
2136 */ 2147 */
2137 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2148 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2138 bf->bf_state.bf_type |= BUF_XRETRY; 2149 bf->bf_state.bf_type |= BUF_XRETRY;
2139 » » » ath_tx_rc_status(bf, &ts, 0, txok, true); 2150 » » » ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
2140 } 2151 }
2141 2152
2153 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2154
2142 if (bf_isampdu(bf)) 2155 if (bf_isampdu(bf))
2143 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok); 2156 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
2144 else 2157 else
2145 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0) ; 2158 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0) ;
2146 2159
2147 » » ath_wake_mac80211_queue(sc, txq); 2160 » » if (txq == sc->tx.txq_map[qnum])
2161 » » » ath_wake_mac80211_queue(sc, qnum);
2148 2162
2149 spin_lock_bh(&txq->axq_lock); 2163 spin_lock_bh(&txq->axq_lock);
2150 if (sc->sc_flags & SC_OP_TXAGGR) 2164 if (sc->sc_flags & SC_OP_TXAGGR)
2151 ath_txq_schedule(sc, txq); 2165 ath_txq_schedule(sc, txq);
2152 spin_unlock_bh(&txq->axq_lock); 2166 spin_unlock_bh(&txq->axq_lock);
2153 } 2167 }
2154 } 2168 }
2155 2169
2156 static void ath_tx_complete_poll_work(struct work_struct *work) 2170 static void ath_tx_complete_poll_work(struct work_struct *work)
2157 { 2171 {
(...skipping 16 matching lines...) Expand all
2174 txq->axq_tx_inprogress = true; 2188 txq->axq_tx_inprogress = true;
2175 } 2189 }
2176 } 2190 }
2177 spin_unlock_bh(&txq->axq_lock); 2191 spin_unlock_bh(&txq->axq_lock);
2178 } 2192 }
2179 2193
2180 if (needreset) { 2194 if (needreset) {
2181 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2195 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2182 "tx hung, resetting the chip\n"); 2196 "tx hung, resetting the chip\n");
2183 ath9k_ps_wakeup(sc); 2197 ath9k_ps_wakeup(sc);
2184 » » ath_reset(sc, false); 2198 » » ath_reset(sc, true);
2185 ath9k_ps_restore(sc); 2199 ath9k_ps_restore(sc);
2186 } 2200 }
2187 2201
2188 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2202 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2189 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); 2203 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2190 } 2204 }
2191 2205
2192 2206
2193 2207
2194 void ath_tx_tasklet(struct ath_softc *sc) 2208 void ath_tx_tasklet(struct ath_softc *sc)
(...skipping 12 matching lines...) Expand all
2207 void ath_tx_edma_tasklet(struct ath_softc *sc) 2221 void ath_tx_edma_tasklet(struct ath_softc *sc)
2208 { 2222 {
2209 struct ath_tx_status txs; 2223 struct ath_tx_status txs;
2210 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2224 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2211 struct ath_hw *ah = sc->sc_ah; 2225 struct ath_hw *ah = sc->sc_ah;
2212 struct ath_txq *txq; 2226 struct ath_txq *txq;
2213 struct ath_buf *bf, *lastbf; 2227 struct ath_buf *bf, *lastbf;
2214 struct list_head bf_head; 2228 struct list_head bf_head;
2215 int status; 2229 int status;
2216 int txok; 2230 int txok;
2231 int qnum;
2217 2232
2218 for (;;) { 2233 for (;;) {
2219 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2234 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2220 if (status == -EINPROGRESS) 2235 if (status == -EINPROGRESS)
2221 break; 2236 break;
2222 if (status == -EIO) { 2237 if (status == -EIO) {
2223 ath_print(common, ATH_DBG_XMIT, 2238 ath_print(common, ATH_DBG_XMIT,
2224 "Error processing tx status\n"); 2239 "Error processing tx status\n");
2225 break; 2240 break;
2226 } 2241 }
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2258 if (bf->bf_isnullfunc && txok) { 2273 if (bf->bf_isnullfunc && txok) {
2259 if ((sc->ps_flags & PS_ENABLED)) 2274 if ((sc->ps_flags & PS_ENABLED))
2260 ath9k_enable_ps(sc); 2275 ath9k_enable_ps(sc);
2261 else 2276 else
2262 sc->ps_flags |= PS_NULLFUNC_COMPLETED; 2277 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2263 } 2278 }
2264 2279
2265 if (!bf_isampdu(bf)) { 2280 if (!bf_isampdu(bf)) {
2266 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2281 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2267 bf->bf_state.bf_type |= BUF_XRETRY; 2282 bf->bf_state.bf_type |= BUF_XRETRY;
2268 » » » ath_tx_rc_status(bf, &txs, 0, txok, true); 2283 » » » ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
2269 } 2284 }
2270 2285
2286 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2287
2271 if (bf_isampdu(bf)) 2288 if (bf_isampdu(bf))
2272 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok); 2289 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2273 else 2290 else
2274 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2291 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2275 &txs, txok, 0); 2292 &txs, txok, 0);
2276 2293
2277 » » ath_wake_mac80211_queue(sc, txq); 2294 » » if (txq == sc->tx.txq_map[qnum])
2295 » » » ath_wake_mac80211_queue(sc, qnum);
2278 2296
2279 spin_lock_bh(&txq->axq_lock); 2297 spin_lock_bh(&txq->axq_lock);
2280 if (!list_empty(&txq->txq_fifo_pending)) { 2298 if (!list_empty(&txq->txq_fifo_pending)) {
2281 INIT_LIST_HEAD(&bf_head); 2299 INIT_LIST_HEAD(&bf_head);
2282 bf = list_first_entry(&txq->txq_fifo_pending, 2300 bf = list_first_entry(&txq->txq_fifo_pending,
2283 struct ath_buf, list); 2301 struct ath_buf, list);
2284 list_cut_position(&bf_head, &txq->txq_fifo_pending, 2302 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2285 &bf->bf_lastbf->list); 2303 &bf->bf_lastbf->list);
2286 ath_tx_txqaddbuf(sc, txq, &bf_head); 2304 ath_tx_txqaddbuf(sc, txq, &bf_head);
2287 } else if (sc->sc_flags & SC_OP_TXAGGR) 2305 } else if (sc->sc_flags & SC_OP_TXAGGR)
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
2399 INIT_LIST_HEAD(&tid->buf_q); 2417 INIT_LIST_HEAD(&tid->buf_q);
2400 acno = TID_TO_WME_AC(tidno); 2418 acno = TID_TO_WME_AC(tidno);
2401 tid->ac = &an->ac[acno]; 2419 tid->ac = &an->ac[acno];
2402 tid->state &= ~AGGR_ADDBA_COMPLETE; 2420 tid->state &= ~AGGR_ADDBA_COMPLETE;
2403 tid->state &= ~AGGR_ADDBA_PROGRESS; 2421 tid->state &= ~AGGR_ADDBA_PROGRESS;
2404 } 2422 }
2405 2423
2406 for (acno = 0, ac = &an->ac[acno]; 2424 for (acno = 0, ac = &an->ac[acno];
2407 acno < WME_NUM_AC; acno++, ac++) { 2425 acno < WME_NUM_AC; acno++, ac++) {
2408 ac->sched = false; 2426 ac->sched = false;
2409 » » ac->qnum = sc->tx.hwq_map[acno]; 2427 » » ac->txq = sc->tx.txq_map[acno];
2410 INIT_LIST_HEAD(&ac->tid_q); 2428 INIT_LIST_HEAD(&ac->tid_q);
2411 } 2429 }
2412 } 2430 }
2413 2431
2414 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2432 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2415 { 2433 {
2416 struct ath_atx_ac *ac; 2434 struct ath_atx_ac *ac;
2417 struct ath_atx_tid *tid; 2435 struct ath_atx_tid *tid;
2418 struct ath_txq *txq; 2436 struct ath_txq *txq;
2419 » int i, tidno; 2437 » int tidno;
2420 2438
2421 for (tidno = 0, tid = &an->tid[tidno]; 2439 for (tidno = 0, tid = &an->tid[tidno];
2422 tidno < WME_NUM_TID; tidno++, tid++) { 2440 tidno < WME_NUM_TID; tidno++, tid++) {
2423 i = tid->ac->qnum;
2424 2441
2425 if (!ATH_TXQ_SETUP(sc, i))
2426 continue;
2427
2428 txq = &sc->tx.txq[i];
2429 ac = tid->ac; 2442 ac = tid->ac;
2443 txq = ac->txq;
2430 2444
2431 spin_lock_bh(&txq->axq_lock); 2445 spin_lock_bh(&txq->axq_lock);
2432 2446
2433 if (tid->sched) { 2447 if (tid->sched) {
2434 list_del(&tid->list); 2448 list_del(&tid->list);
2435 tid->sched = false; 2449 tid->sched = false;
2436 } 2450 }
2437 2451
2438 if (ac->sched) { 2452 if (ac->sched) {
2439 list_del(&ac->list); 2453 list_del(&ac->list);
2440 tid->ac->sched = false; 2454 tid->ac->sched = false;
2441 } 2455 }
2442 2456
2443 ath_tid_drain(sc, txq, tid); 2457 ath_tid_drain(sc, txq, tid);
2444 tid->state &= ~AGGR_ADDBA_COMPLETE; 2458 tid->state &= ~AGGR_ADDBA_COMPLETE;
2445 tid->state &= ~AGGR_CLEANUP; 2459 tid->state &= ~AGGR_CLEANUP;
2446 2460
2447 spin_unlock_bh(&txq->axq_lock); 2461 spin_unlock_bh(&txq->axq_lock);
2448 } 2462 }
2449 } 2463 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698