1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5
6 #include "mt76.h"
7
8 static int
mt76_txq_get_qid(struct ieee80211_txq * txq)9 mt76_txq_get_qid(struct ieee80211_txq *txq)
10 {
11 if (!txq->sta)
12 return MT_TXQ_BE;
13
14 return txq->ac;
15 }
16
17 void
mt76_tx_check_agg_ssn(struct ieee80211_sta * sta,struct sk_buff * skb)18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
19 {
20 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21 struct ieee80211_txq *txq;
22 struct mt76_txq *mtxq;
23 u8 tid;
24
25 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26 !ieee80211_is_data_present(hdr->frame_control))
27 return;
28
29 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
30 txq = sta->txq[tid];
31 mtxq = (struct mt76_txq *)txq->drv_priv;
32 if (!mtxq->aggr)
33 return;
34
35 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
36 }
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
38
39 void
mt76_tx_status_lock(struct mt76_dev * dev,struct sk_buff_head * list)40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41 __acquires(&dev->status_lock)
42 {
43 __skb_queue_head_init(list);
44 spin_lock_bh(&dev->status_lock);
45 }
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
47
48 void
mt76_tx_status_unlock(struct mt76_dev * dev,struct sk_buff_head * list)49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50 __releases(&dev->status_lock)
51 {
52 struct ieee80211_hw *hw;
53 struct sk_buff *skb;
54
55 spin_unlock_bh(&dev->status_lock);
56
57 rcu_read_lock();
58 while ((skb = __skb_dequeue(list)) != NULL) {
59 struct ieee80211_tx_status status = {
60 .skb = skb,
61 .info = IEEE80211_SKB_CB(skb),
62 };
63 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
64 struct mt76_wcid *wcid;
65
66 wcid = rcu_dereference(dev->wcid[cb->wcid]);
67 if (wcid) {
68 status.sta = wcid_to_sta(wcid);
69
70 if (status.sta)
71 status.rate = &wcid->rate;
72 }
73
74 hw = mt76_tx_status_get_hw(dev, skb);
75 ieee80211_tx_status_ext(hw, &status);
76 }
77 rcu_read_unlock();
78 }
79 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
80
81 static void
__mt76_tx_status_skb_done(struct mt76_dev * dev,struct sk_buff * skb,u8 flags,struct sk_buff_head * list)82 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
83 struct sk_buff_head *list)
84 {
85 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
86 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
87 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
88
89 flags |= cb->flags;
90 cb->flags = flags;
91
92 if ((flags & done) != done)
93 return;
94
95 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
96 if (flags & MT_TX_CB_TXS_FAILED) {
97 info->status.rates[0].count = 0;
98 info->status.rates[0].idx = -1;
99 info->flags |= IEEE80211_TX_STAT_ACK;
100 }
101
102 __skb_queue_tail(list, skb);
103 }
104
105 void
mt76_tx_status_skb_done(struct mt76_dev * dev,struct sk_buff * skb,struct sk_buff_head * list)106 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
107 struct sk_buff_head *list)
108 {
109 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
110 }
111 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
112
113 int
mt76_tx_status_skb_add(struct mt76_dev * dev,struct mt76_wcid * wcid,struct sk_buff * skb)114 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
115 struct sk_buff *skb)
116 {
117 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
118 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
119 int pid;
120
121 memset(cb, 0, sizeof(*cb));
122
123 if (!wcid)
124 return MT_PACKET_ID_NO_ACK;
125
126 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
127 return MT_PACKET_ID_NO_ACK;
128
129 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
130 IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
131 return MT_PACKET_ID_NO_SKB;
132
133 spin_lock_bh(&dev->status_lock);
134
135 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST,
136 MT_PACKET_ID_MASK, GFP_ATOMIC);
137 if (pid < 0) {
138 pid = MT_PACKET_ID_NO_SKB;
139 goto out;
140 }
141
142 cb->wcid = wcid->idx;
143 cb->pktid = pid;
144
145 if (list_empty(&wcid->list))
146 list_add_tail(&wcid->list, &dev->wcid_list);
147
148 out:
149 spin_unlock_bh(&dev->status_lock);
150
151 return pid;
152 }
153 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
154
155 struct sk_buff *
mt76_tx_status_skb_get(struct mt76_dev * dev,struct mt76_wcid * wcid,int pktid,struct sk_buff_head * list)156 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
157 struct sk_buff_head *list)
158 {
159 struct sk_buff *skb;
160 int id;
161
162 lockdep_assert_held(&dev->status_lock);
163
164 skb = idr_remove(&wcid->pktid, pktid);
165 if (skb)
166 goto out;
167
168 /* look for stale entries in the wcid idr queue */
169 idr_for_each_entry(&wcid->pktid, skb, id) {
170 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
171
172 if (pktid >= 0) {
173 if (!(cb->flags & MT_TX_CB_DMA_DONE))
174 continue;
175
176 if (time_is_after_jiffies(cb->jiffies +
177 MT_TX_STATUS_SKB_TIMEOUT))
178 continue;
179 }
180
181 /* It has been too long since DMA_DONE, time out this packet
182 * and stop waiting for TXS callback.
183 */
184 idr_remove(&wcid->pktid, cb->pktid);
185 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
186 MT_TX_CB_TXS_DONE, list);
187 }
188
189 out:
190 if (idr_is_empty(&wcid->pktid))
191 list_del_init(&wcid->list);
192
193 return skb;
194 }
195 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
196
197 void
mt76_tx_status_check(struct mt76_dev * dev,bool flush)198 mt76_tx_status_check(struct mt76_dev *dev, bool flush)
199 {
200 struct mt76_wcid *wcid, *tmp;
201 struct sk_buff_head list;
202
203 mt76_tx_status_lock(dev, &list);
204 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list)
205 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
206 mt76_tx_status_unlock(dev, &list);
207 }
208 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
209
210 static void
mt76_tx_check_non_aql(struct mt76_dev * dev,struct mt76_wcid * wcid,struct sk_buff * skb)211 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
212 struct sk_buff *skb)
213 {
214 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
215 int pending;
216
217 if (!wcid || info->tx_time_est)
218 return;
219
220 pending = atomic_dec_return(&wcid->non_aql_packets);
221 if (pending < 0)
222 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
223 }
224
__mt76_tx_complete_skb(struct mt76_dev * dev,u16 wcid_idx,struct sk_buff * skb,struct list_head * free_list)225 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
226 struct list_head *free_list)
227 {
228 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
229 struct ieee80211_tx_status status = {
230 .skb = skb,
231 .free_list = free_list,
232 };
233 struct mt76_wcid *wcid = NULL;
234 struct ieee80211_hw *hw;
235 struct sk_buff_head list;
236
237 rcu_read_lock();
238
239 if (wcid_idx < ARRAY_SIZE(dev->wcid))
240 wcid = rcu_dereference(dev->wcid[wcid_idx]);
241
242 mt76_tx_check_non_aql(dev, wcid, skb);
243
244 #ifdef CONFIG_NL80211_TESTMODE
245 if (mt76_is_testmode_skb(dev, skb, &hw)) {
246 struct mt76_phy *phy = hw->priv;
247
248 if (skb == phy->test.tx_skb)
249 phy->test.tx_done++;
250 if (phy->test.tx_queued == phy->test.tx_done)
251 wake_up(&dev->tx_wait);
252
253 dev_kfree_skb_any(skb);
254 goto out;
255 }
256 #endif
257
258 if (cb->pktid < MT_PACKET_ID_FIRST) {
259 hw = mt76_tx_status_get_hw(dev, skb);
260 status.sta = wcid_to_sta(wcid);
261 ieee80211_tx_status_ext(hw, &status);
262 goto out;
263 }
264
265 mt76_tx_status_lock(dev, &list);
266 cb->jiffies = jiffies;
267 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
268 mt76_tx_status_unlock(dev, &list);
269
270 out:
271 rcu_read_unlock();
272 }
273 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
274
275 static int
__mt76_tx_queue_skb(struct mt76_phy * phy,int qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta,bool * stop)276 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
277 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
278 bool *stop)
279 {
280 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
281 struct mt76_queue *q = phy->q_tx[qid];
282 struct mt76_dev *dev = phy->dev;
283 bool non_aql;
284 int pending;
285 int idx;
286
287 non_aql = !info->tx_time_est;
288 idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
289 if (idx < 0 || !sta)
290 return idx;
291
292 wcid = (struct mt76_wcid *)sta->drv_priv;
293 q->entry[idx].wcid = wcid->idx;
294
295 if (!non_aql)
296 return idx;
297
298 pending = atomic_inc_return(&wcid->non_aql_packets);
299 if (stop && pending >= MT_MAX_NON_AQL_PKT)
300 *stop = true;
301
302 return idx;
303 }
304
305 void
mt76_tx(struct mt76_phy * phy,struct ieee80211_sta * sta,struct mt76_wcid * wcid,struct sk_buff * skb)306 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
307 struct mt76_wcid *wcid, struct sk_buff *skb)
308 {
309 struct mt76_dev *dev = phy->dev;
310 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
312 struct mt76_queue *q;
313 int qid = skb_get_queue_mapping(skb);
314 bool ext_phy = phy != &dev->phy;
315
316 if (mt76_testmode_enabled(phy)) {
317 ieee80211_free_txskb(phy->hw, skb);
318 return;
319 }
320
321 if (WARN_ON(qid >= MT_TXQ_PSD)) {
322 qid = MT_TXQ_BE;
323 skb_set_queue_mapping(skb, qid);
324 }
325
326 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
327 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
328 !ieee80211_is_data(hdr->frame_control) &&
329 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
330 qid = MT_TXQ_PSD;
331 skb_set_queue_mapping(skb, qid);
332 }
333
334 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
335 ieee80211_get_tx_rates(info->control.vif, sta, skb,
336 info->control.rates, 1);
337
338 if (ext_phy)
339 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
340
341 q = phy->q_tx[qid];
342
343 spin_lock_bh(&q->lock);
344 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
345 dev->queue_ops->kick(dev, q);
346 spin_unlock_bh(&q->lock);
347 }
348 EXPORT_SYMBOL_GPL(mt76_tx);
349
350 static struct sk_buff *
mt76_txq_dequeue(struct mt76_phy * phy,struct mt76_txq * mtxq)351 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
352 {
353 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
354 struct ieee80211_tx_info *info;
355 bool ext_phy = phy != &phy->dev->phy;
356 struct sk_buff *skb;
357
358 skb = ieee80211_tx_dequeue(phy->hw, txq);
359 if (!skb)
360 return NULL;
361
362 info = IEEE80211_SKB_CB(skb);
363 if (ext_phy)
364 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
365
366 return skb;
367 }
368
369 static void
mt76_queue_ps_skb(struct mt76_phy * phy,struct ieee80211_sta * sta,struct sk_buff * skb,bool last)370 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
371 struct sk_buff *skb, bool last)
372 {
373 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
374 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
375
376 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
377 if (last)
378 info->flags |= IEEE80211_TX_STATUS_EOSP |
379 IEEE80211_TX_CTL_REQ_TX_STATUS;
380
381 mt76_skb_set_moredata(skb, !last);
382 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
383 }
384
385 void
mt76_release_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int nframes,enum ieee80211_frame_release_type reason,bool more_data)386 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
387 u16 tids, int nframes,
388 enum ieee80211_frame_release_type reason,
389 bool more_data)
390 {
391 struct mt76_phy *phy = hw->priv;
392 struct mt76_dev *dev = phy->dev;
393 struct sk_buff *last_skb = NULL;
394 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
395 int i;
396
397 spin_lock_bh(&hwq->lock);
398 for (i = 0; tids && nframes; i++, tids >>= 1) {
399 struct ieee80211_txq *txq = sta->txq[i];
400 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
401 struct sk_buff *skb;
402
403 if (!(tids & 1))
404 continue;
405
406 do {
407 skb = mt76_txq_dequeue(phy, mtxq);
408 if (!skb)
409 break;
410
411 nframes--;
412 if (last_skb)
413 mt76_queue_ps_skb(phy, sta, last_skb, false);
414
415 last_skb = skb;
416 } while (nframes);
417 }
418
419 if (last_skb) {
420 mt76_queue_ps_skb(phy, sta, last_skb, true);
421 dev->queue_ops->kick(dev, hwq);
422 } else {
423 ieee80211_sta_eosp(sta);
424 }
425
426 spin_unlock_bh(&hwq->lock);
427 }
428 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
429
430 static bool
mt76_txq_stopped(struct mt76_queue * q)431 mt76_txq_stopped(struct mt76_queue *q)
432 {
433 return q->stopped || q->blocked ||
434 q->queued + MT_TXQ_FREE_THR >= q->ndesc;
435 }
436
437 static int
mt76_txq_send_burst(struct mt76_phy * phy,struct mt76_queue * q,struct mt76_txq * mtxq)438 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
439 struct mt76_txq *mtxq)
440 {
441 struct mt76_dev *dev = phy->dev;
442 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
443 enum mt76_txq_id qid = mt76_txq_get_qid(txq);
444 struct mt76_wcid *wcid = mtxq->wcid;
445 struct ieee80211_tx_info *info;
446 struct sk_buff *skb;
447 int n_frames = 1;
448 bool stop = false;
449 int idx;
450
451 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
452 return 0;
453
454 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
455 return 0;
456
457 skb = mt76_txq_dequeue(phy, mtxq);
458 if (!skb)
459 return 0;
460
461 info = IEEE80211_SKB_CB(skb);
462 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
463 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
464 info->control.rates, 1);
465
466 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
467 if (idx < 0)
468 return idx;
469
470 do {
471 if (test_bit(MT76_RESET, &phy->state))
472 return -EBUSY;
473
474 if (stop || mt76_txq_stopped(q))
475 break;
476
477 skb = mt76_txq_dequeue(phy, mtxq);
478 if (!skb)
479 break;
480
481 info = IEEE80211_SKB_CB(skb);
482 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
483 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
484 info->control.rates, 1);
485
486 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
487 if (idx < 0)
488 break;
489
490 n_frames++;
491 } while (1);
492
493 dev->queue_ops->kick(dev, q);
494
495 return n_frames;
496 }
497
498 static int
mt76_txq_schedule_list(struct mt76_phy * phy,enum mt76_txq_id qid)499 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
500 {
501 struct mt76_queue *q = phy->q_tx[qid];
502 struct mt76_dev *dev = phy->dev;
503 struct ieee80211_txq *txq;
504 struct mt76_txq *mtxq;
505 struct mt76_wcid *wcid;
506 int ret = 0;
507
508 while (1) {
509 int n_frames = 0;
510
511 if (test_bit(MT76_RESET, &phy->state))
512 return -EBUSY;
513
514 if (dev->queue_ops->tx_cleanup &&
515 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
516 dev->queue_ops->tx_cleanup(dev, q, false);
517 }
518
519 txq = ieee80211_next_txq(phy->hw, qid);
520 if (!txq)
521 break;
522
523 mtxq = (struct mt76_txq *)txq->drv_priv;
524 wcid = mtxq->wcid;
525 if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
526 continue;
527
528 spin_lock_bh(&q->lock);
529
530 if (mtxq->send_bar && mtxq->aggr) {
531 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
532 struct ieee80211_sta *sta = txq->sta;
533 struct ieee80211_vif *vif = txq->vif;
534 u16 agg_ssn = mtxq->agg_ssn;
535 u8 tid = txq->tid;
536
537 mtxq->send_bar = false;
538 spin_unlock_bh(&q->lock);
539 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
540 spin_lock_bh(&q->lock);
541 }
542
543 if (!mt76_txq_stopped(q))
544 n_frames = mt76_txq_send_burst(phy, q, mtxq);
545
546 spin_unlock_bh(&q->lock);
547
548 ieee80211_return_txq(phy->hw, txq, false);
549
550 if (unlikely(n_frames < 0))
551 return n_frames;
552
553 ret += n_frames;
554 }
555
556 return ret;
557 }
558
mt76_txq_schedule(struct mt76_phy * phy,enum mt76_txq_id qid)559 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
560 {
561 int len;
562
563 if (qid >= 4)
564 return;
565
566 rcu_read_lock();
567
568 do {
569 ieee80211_txq_schedule_start(phy->hw, qid);
570 len = mt76_txq_schedule_list(phy, qid);
571 ieee80211_txq_schedule_end(phy->hw, qid);
572 } while (len > 0);
573
574 rcu_read_unlock();
575 }
576 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
577
mt76_txq_schedule_all(struct mt76_phy * phy)578 void mt76_txq_schedule_all(struct mt76_phy *phy)
579 {
580 int i;
581
582 for (i = 0; i <= MT_TXQ_BK; i++)
583 mt76_txq_schedule(phy, i);
584 }
585 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
586
mt76_tx_worker_run(struct mt76_dev * dev)587 void mt76_tx_worker_run(struct mt76_dev *dev)
588 {
589 mt76_txq_schedule_all(&dev->phy);
590 if (dev->phy2)
591 mt76_txq_schedule_all(dev->phy2);
592
593 #ifdef CONFIG_NL80211_TESTMODE
594 if (dev->phy.test.tx_pending)
595 mt76_testmode_tx_pending(&dev->phy);
596 if (dev->phy2 && dev->phy2->test.tx_pending)
597 mt76_testmode_tx_pending(dev->phy2);
598 #endif
599 }
600 EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
601
mt76_tx_worker(struct mt76_worker * w)602 void mt76_tx_worker(struct mt76_worker *w)
603 {
604 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
605
606 mt76_tx_worker_run(dev);
607 }
608
mt76_stop_tx_queues(struct mt76_phy * phy,struct ieee80211_sta * sta,bool send_bar)609 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
610 bool send_bar)
611 {
612 int i;
613
614 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
615 struct ieee80211_txq *txq = sta->txq[i];
616 struct mt76_queue *hwq;
617 struct mt76_txq *mtxq;
618
619 if (!txq)
620 continue;
621
622 hwq = phy->q_tx[mt76_txq_get_qid(txq)];
623 mtxq = (struct mt76_txq *)txq->drv_priv;
624
625 spin_lock_bh(&hwq->lock);
626 mtxq->send_bar = mtxq->aggr && send_bar;
627 spin_unlock_bh(&hwq->lock);
628 }
629 }
630 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
631
mt76_wake_tx_queue(struct ieee80211_hw * hw,struct ieee80211_txq * txq)632 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
633 {
634 struct mt76_phy *phy = hw->priv;
635 struct mt76_dev *dev = phy->dev;
636
637 if (!test_bit(MT76_STATE_RUNNING, &phy->state))
638 return;
639
640 mt76_worker_schedule(&dev->tx_worker);
641 }
642 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
643
mt76_ac_to_hwq(u8 ac)644 u8 mt76_ac_to_hwq(u8 ac)
645 {
646 static const u8 wmm_queue_map[] = {
647 [IEEE80211_AC_BE] = 0,
648 [IEEE80211_AC_BK] = 1,
649 [IEEE80211_AC_VI] = 2,
650 [IEEE80211_AC_VO] = 3,
651 };
652
653 if (WARN_ON(ac >= IEEE80211_NUM_ACS))
654 return 0;
655
656 return wmm_queue_map[ac];
657 }
658 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
659
mt76_skb_adjust_pad(struct sk_buff * skb,int pad)660 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
661 {
662 struct sk_buff *iter, *last = skb;
663
664 /* First packet of a A-MSDU burst keeps track of the whole burst
665 * length, need to update length of it and the last packet.
666 */
667 skb_walk_frags(skb, iter) {
668 last = iter;
669 if (!iter->next) {
670 skb->data_len += pad;
671 skb->len += pad;
672 break;
673 }
674 }
675
676 if (skb_pad(last, pad))
677 return -ENOMEM;
678
679 __skb_put(last, pad);
680
681 return 0;
682 }
683 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
684
mt76_queue_tx_complete(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_entry * e)685 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
686 struct mt76_queue_entry *e)
687 {
688 if (e->skb)
689 dev->drv->tx_complete_skb(dev, e);
690
691 spin_lock_bh(&q->lock);
692 q->tail = (q->tail + 1) % q->ndesc;
693 q->queued--;
694 spin_unlock_bh(&q->lock);
695 }
696 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
697
__mt76_set_tx_blocked(struct mt76_dev * dev,bool blocked)698 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
699 {
700 struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
701 struct mt76_queue *q, *q2 = NULL;
702
703 q = phy->q_tx[0];
704 if (blocked == q->blocked)
705 return;
706
707 q->blocked = blocked;
708 if (phy2) {
709 q2 = phy2->q_tx[0];
710 q2->blocked = blocked;
711 }
712
713 if (!blocked)
714 mt76_worker_schedule(&dev->tx_worker);
715 }
716 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
717
mt76_token_consume(struct mt76_dev * dev,struct mt76_txwi_cache ** ptxwi)718 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
719 {
720 int token;
721
722 spin_lock_bh(&dev->token_lock);
723
724 token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
725 GFP_ATOMIC);
726 if (token >= 0)
727 dev->token_count++;
728
729 if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
730 __mt76_set_tx_blocked(dev, true);
731
732 spin_unlock_bh(&dev->token_lock);
733
734 return token;
735 }
736 EXPORT_SYMBOL_GPL(mt76_token_consume);
737
738 struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev * dev,int token,bool * wake)739 mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
740 {
741 struct mt76_txwi_cache *txwi;
742
743 spin_lock_bh(&dev->token_lock);
744
745 txwi = idr_remove(&dev->token, token);
746 if (txwi)
747 dev->token_count--;
748
749 if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
750 dev->phy.q_tx[0]->blocked)
751 *wake = true;
752
753 spin_unlock_bh(&dev->token_lock);
754
755 return txwi;
756 }
757 EXPORT_SYMBOL_GPL(mt76_token_release);
758