-
Notifications
You must be signed in to change notification settings - Fork 2
/
bl_tx.c
1582 lines (1353 loc) · 51.5 KB
/
bl_tx.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/**
******************************************************************************
*
* @file bl_tx.c
*
* Copyright (C) BouffaloLab 2017-2018
*
******************************************************************************
*/
#include <linux/version.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include "bl_defs.h"
#include "bl_tx.h"
#include "bl_msg_tx.h"
#include "bl_events.h"
#include "bl_sdio.h"
#include "bl_irqs.h"
static u16 g_pkt_sn = 0;
/******************************************************************************
* Power Save functions
*****************************************************************************/
/**
* bl_set_traffic_status - Inform FW if traffic is available for STA in PS
*
* @bl_hw: Driver main data
* @sta: Sta in PS mode
* @available: whether traffic is buffered for the STA
* @ps_id: type of PS data requested (@LEGACY_PS_ID or @UAPSD_ID)
*/
void bl_set_traffic_status(struct bl_hw *bl_hw,
struct bl_sta *sta,
bool available,
u8 ps_id)
{
bool uapsd = (ps_id != LEGACY_PS_ID);
bl_send_me_traffic_ind(bl_hw, sta->sta_idx, uapsd, available);
trace_ps_traffic_update(sta->sta_idx, available, uapsd);
}
/**
* bl_ps_bh_enable - Enable/disable PS mode for one STA
*
* @bl_hw: Driver main data
* @sta: Sta which enters/leaves PS mode
* @enable: PS mode status
*
* This function will enable/disable PS mode for one STA.
* When enabling PS mode:
* - Stop all STA's txq for BL_TXQ_STOP_STA_PS reason
* - Count how many buffers are already ready for this STA
* - For BC/MC sta, update all queued SKB to use hw_queue BCMC
* - Update TIM if some packet are ready
*
* When disabling PS mode:
* - Start all STA's txq for BL_TXQ_STOP_STA_PS reason
* - For BC/MC sta, update all queued SKB to use hw_queue AC_BE
* - Update TIM if some packet are ready (otherwise fw will not update TIM
* in beacon for this STA)
*
* All counter/skb updates are protected from TX path by taking tx_lock
*
* NOTE: _bh_ in function name indicates that this function is called
* from a bottom_half tasklet.
*/
void bl_ps_bh_enable(struct bl_hw *bl_hw, struct bl_sta *sta,
bool enable)
{
struct bl_txq *txq = NULL;
txq = bl_txq_sta_get(sta, 0, NULL, bl_hw);
if (enable) {
trace_ps_enable(sta);
spin_lock(&bl_hw->tx_lock);
sta->ps.active = true;
sta->ps.sp_cnt[LEGACY_PS_ID] = 0;
sta->ps.sp_cnt[UAPSD_ID] = 0;
bl_txq_sta_stop(sta, BL_TXQ_STOP_STA_PS, bl_hw);
if (is_multicast_sta(sta->sta_idx)) {
sta->ps.pkt_ready[LEGACY_PS_ID] = skb_queue_len(&txq->sk_list);
sta->ps.pkt_ready[UAPSD_ID] = 0;
txq->hwq = &bl_hw->hwq[BL_HWQ_BCMC];
} else {
int i;
sta->ps.pkt_ready[LEGACY_PS_ID] = 0;
sta->ps.pkt_ready[UAPSD_ID] = 0;
for (i = 0; i < NX_NB_TXQ_PER_STA; i++, txq++) {
sta->ps.pkt_ready[txq->ps_id] += skb_queue_len(&txq->sk_list);
}
}
spin_unlock(&bl_hw->tx_lock);
if (sta->ps.pkt_ready[LEGACY_PS_ID])
bl_set_traffic_status(bl_hw, sta, true, LEGACY_PS_ID);
if (sta->ps.pkt_ready[UAPSD_ID])
bl_set_traffic_status(bl_hw, sta, true, UAPSD_ID);
} else {
trace_ps_disable(sta->sta_idx);
spin_lock(&bl_hw->tx_lock);
sta->ps.active = false;
if (is_multicast_sta(sta->sta_idx)) {
txq->hwq = &bl_hw->hwq[BL_HWQ_BE];
txq->push_limit = 0;
} else {
int i;
for (i = 0; i < NX_NB_TXQ_PER_STA; i++, txq++) {
txq->push_limit = 0;
}
}
bl_txq_sta_start(sta, BL_TXQ_STOP_STA_PS, bl_hw);
spin_unlock(&bl_hw->tx_lock);
if (sta->ps.pkt_ready[LEGACY_PS_ID])
bl_set_traffic_status(bl_hw, sta, false, LEGACY_PS_ID);
if (sta->ps.pkt_ready[UAPSD_ID])
bl_set_traffic_status(bl_hw, sta, false, UAPSD_ID);
}
}
/**
* bl_ps_bh_traffic_req - Handle traffic request for STA in PS mode
*
* @bl_hw: Driver main data
* @sta: Sta which enters/leaves PS mode
* @pkt_req: number of pkt to push
* @ps_id: type of PS data requested (@LEGACY_PS_ID or @UAPSD_ID)
*
* This function will make sure that @pkt_req are pushed to fw
* whereas the STA is in PS mode.
* If request is 0, send all traffic
* If request is greater than available pkt, reduce request
* Note: request will also be reduce if txq credits are not available
*
* All counter updates are protected from TX path by taking tx_lock
*
* NOTE: _bh_ in function name indicates that this function is called
* from the bottom_half tasklet.
*/
void bl_ps_bh_traffic_req(struct bl_hw *bl_hw, struct bl_sta *sta,
u16 pkt_req, u8 ps_id)
{
int pkt_ready_all;
struct bl_txq *txq;
if (WARN(!sta->ps.active, "sta %pM is not in Power Save mode",
sta->mac_addr))
return;
trace_ps_traffic_req(sta, pkt_req, ps_id);
spin_lock(&bl_hw->tx_lock);
pkt_ready_all = (sta->ps.pkt_ready[ps_id] - sta->ps.sp_cnt[ps_id]);
/* Don't start SP until previous one is finished or we don't have
packet ready (which must not happen for U-APSD) */
if (sta->ps.sp_cnt[ps_id] || pkt_ready_all <= 0) {
goto done;
}
/* Adapt request to what is available. */
if (pkt_req == 0 || pkt_req > pkt_ready_all) {
pkt_req = pkt_ready_all;
}
/* Reset the SP counter */
sta->ps.sp_cnt[ps_id] = 0;
/* "dispatch" the request between txq */
txq = bl_txq_sta_get(sta, NX_NB_TXQ_PER_STA - 1, NULL, bl_hw);
if (is_multicast_sta(sta->sta_idx)) {
if (txq->credits <= 0)
goto done;
if (pkt_req > txq->credits)
pkt_req = txq->credits;
txq->push_limit = pkt_req;
sta->ps.sp_cnt[ps_id] = pkt_req;
bl_txq_add_to_hw_list(txq);
} else {
int i;
/* TODO: dispatch using correct txq priority */
for (i = NX_NB_TXQ_PER_STA - 1; i >= 0; i--, txq--) {
u16 txq_len = skb_queue_len(&txq->sk_list);
if (txq->ps_id != ps_id)
continue;
if (txq_len > txq->credits)
txq_len = txq->credits;
if (txq_len > 0) {
if (txq_len < pkt_req) {
/* Not enough pkt queued in this txq, add this
txq to hwq list and process next txq */
pkt_req -= txq_len;
txq->push_limit = txq_len;
sta->ps.sp_cnt[ps_id] += txq_len;
bl_txq_add_to_hw_list(txq);
} else {
/* Enough pkt in this txq to comlete the request
add this txq to hwq list and stop processing txq */
txq->push_limit = pkt_req;
sta->ps.sp_cnt[ps_id] += pkt_req;
bl_txq_add_to_hw_list(txq);
break;
}
}
}
}
done:
spin_unlock(&bl_hw->tx_lock);
}
/******************************************************************************
* TX functions
*****************************************************************************/
#define PRIO_STA_NULL 0xAA
static const int bl_down_hwq2tid[3] = {
[BL_HWQ_BK] = 2,
[BL_HWQ_BE] = 3,
[BL_HWQ_VI] = 5,
};
void bl_downgrade_ac(struct bl_sta *sta, struct sk_buff *skb)
{
int8_t ac = bl_tid2hwq[skb->priority];
if (WARN((ac > BL_HWQ_VO),
"Unexepcted ac %d for skb before downgrade", ac))
ac = BL_HWQ_VO;
while (sta->acm & BIT(ac)) {
if (ac == BL_HWQ_BK) {
skb->priority = 1;
return;
}
ac--;
skb->priority = bl_down_hwq2tid[ac];
}
}
/**
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
* Called to decide which queue to when device supports multiple
* transmit queues.
*/
u16 bl_select_queue(struct net_device *dev, struct sk_buff *skb
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
, struct net_device *sb_dev
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
, void *accel_priv
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
, select_queue_fallback_t fallback
#endif
)
{
struct bl_vif *bl_vif = netdev_priv(dev);
struct bl_hw *bl_hw = bl_vif->bl_hw;
struct wireless_dev *wdev = &bl_vif->wdev;
struct bl_sta *sta = NULL;
struct bl_txq *txq;
u16 netdev_queue;
BL_DBG(BL_FN_ENTRY_STR);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
{
struct ethhdr *eth;
eth = (struct ethhdr *)skb->data;
sta = bl_vif->sta.ap;
break;
}
case NL80211_IFTYPE_AP_VLAN:
if (bl_vif->ap_vlan.sta_4a) {
sta = bl_vif->ap_vlan.sta_4a;
break;
}
/* AP_VLAN interface is not used for a 4A STA,
fallback searching sta amongs all AP's clients */
bl_vif = bl_vif->ap_vlan.master;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
{
struct bl_sta *cur;
struct ethhdr *eth = (struct ethhdr *)skb->data;
if (is_multicast_ether_addr(eth->h_dest)) {
sta = &bl_hw->sta_table[bl_vif->ap.bcmc_index];
} else {
list_for_each_entry(cur, &bl_vif->ap.sta_list, list) {
if (!memcmp(cur->mac_addr, eth->h_dest, ETH_ALEN)) {
sta = cur;
break;
}
}
}
break;
}
default:
break;
}
if (sta && sta->qos)
{
/* use the data classifier to determine what 802.1d tag the data frame has */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
skb->priority = cfg80211_classify8021d(skb, NULL) & IEEE80211_QOS_CTL_TAG1D_MASK;
#else
skb->priority = cfg80211_classify8021d(skb) & IEEE80211_QOS_CTL_TAG1D_MASK;
#endif
if (sta->acm)
bl_downgrade_ac(sta, skb);
txq = bl_txq_sta_get(sta, skb->priority, NULL, bl_hw);
netdev_queue = txq->ndev_idx;
}
else if (sta)
{
skb->priority = 0xFF;
txq = bl_txq_sta_get(sta, 0, NULL, bl_hw);
netdev_queue = txq->ndev_idx;
}
else
{
/* This packet will be dropped in xmit function, still need to select
an active queue for xmit to be called. As it most likely to happen
for AP interface, select BCMC queue
(TODO: select another queue if BCMC queue is stopped) */
skb->priority = PRIO_STA_NULL;
netdev_queue = NX_BCMC_TXQ_NDEV_IDX;
}
BUG_ON(netdev_queue >= NX_NB_NDEV_TXQ);
return netdev_queue;
}
/**
* bl_set_more_data_flag - Update MORE_DATA flag in tx sw desc
*
* @bl_hw: Driver main data
* @sw_txhdr: Header for pkt to be pushed
*
* If STA is in PS mode
* - Set EOSP in case the packet is the last of the UAPSD service period
* - Set MORE_DATA flag if more pkt are ready for this sta
* - Update TIM if this is the last pkt buffered for this sta
*
* note: tx_lock already taken.
*/
static inline void bl_set_more_data_flag(struct bl_hw *bl_hw,
struct bl_sw_txhdr *sw_txhdr)
{
struct bl_sta *sta = sw_txhdr->bl_sta;
struct bl_txq *txq = sw_txhdr->txq;
if (unlikely(sta->ps.active)) {
sta->ps.pkt_ready[txq->ps_id]--;
sta->ps.sp_cnt[txq->ps_id]--;
trace_ps_push(sta);
if (((txq->ps_id == UAPSD_ID))
&& !sta->ps.sp_cnt[txq->ps_id]) {
sw_txhdr->desc.host.flags |= TXU_CNTRL_EOSP;
}
if (sta->ps.pkt_ready[txq->ps_id]) {
sw_txhdr->desc.host.flags |= TXU_CNTRL_MORE_DATA;
} else {
bl_set_traffic_status(bl_hw, sta, false, txq->ps_id);
}
}
}
/**
* bl_get_tx_info - Get STA and tid for one skb
*
* @bl_vif: vif ptr
* @skb: skb
* @tid: pointer updated with the tid to use for this skb
*
* @return: pointer on the destination STA (may be NULL)
*
* skb has already been parsed in bl_select_queue function
* simply re-read information form skb.
*/
static struct bl_sta *bl_get_tx_info(struct bl_vif *bl_vif,
struct sk_buff *skb,
u8 *tid)
{
struct bl_hw *bl_hw = bl_vif->bl_hw;
struct bl_sta *sta;
int sta_idx;
*tid = skb->priority;
if (unlikely(skb->priority == PRIO_STA_NULL)) {
return NULL;
} else {
int ndev_idx = skb_get_queue_mapping(skb);
if (ndev_idx == NX_BCMC_TXQ_NDEV_IDX)
sta_idx = NX_REMOTE_STA_MAX + master_vif_idx(bl_vif);
else
sta_idx = ndev_idx / NX_NB_TID_PER_STA;
sta = &bl_hw->sta_table[sta_idx];
}
return sta;
}
/**
* bl_tx_push - Push one packet to fw
*
* @bl_hw: Driver main data
* @txhdr: tx desc of the buffer to push
* @flags: push flags (see @bl_push_flags)
*
* Push one packet to fw. Sw desc of the packet has already been updated.
* Only MORE_DATA flag will be set if needed.
*/
void bl_tx_push(struct bl_hw *bl_hw, struct bl_txhdr *txhdr, int flags)
{
struct bl_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
struct txdesc_api *desc;
struct sk_buff *skb = sw_txhdr->skb;
struct bl_txq *txq = sw_txhdr->txq;
u16 hw_queue = txq->hwq->id;
int user = 0;
u32 wr_port;
u32 ret = 0;
lockdep_assert_held(&bl_hw->tx_lock);
/* RETRY flag is not always set so retest here */
if (txq->nb_retry) {
flags |= BL_PUSH_RETRY;
txq->nb_retry--;
if (txq->nb_retry == 0) {
WARN(skb != txq->last_retry_skb,
"last retry buffer is not the expected one");
txq->last_retry_skb = NULL;
}
} else if (!(flags & BL_PUSH_RETRY)) {
txq->pkt_sent++;
}
#ifdef CONFIG_BL_AMSDUS_TX
if (txq->amsdu == sw_txhdr) {
WARN((flags & BL_PUSH_RETRY), "End A-MSDU on a retry");
bl_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++;
txq->amsdu = NULL;
} else if (!(flags & BL_PUSH_RETRY) &&
!(sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU)) {
bl_hw->stats.amsdus[0].done++;
}
#endif /* CONFIG_BL_AMSDUS_TX */
/* Wait here to update hw_queue, as for multicast STA hwq may change
between queue and push (because of PS) */
sw_txhdr->hw_queue = hw_queue;
if (sw_txhdr->bl_sta) {
/* only for AP mode */
bl_set_more_data_flag(bl_hw, sw_txhdr);
}
trace_push_desc(skb, sw_txhdr, flags);
BL_DBG("bl_tx_push:txq->idx=%d, txq->status=0x%x, txq->credits: %d--->%d\n", txq->idx, txq->status, txq->credits, txq->credits-1);
txq->credits--;
txq->pkt_pushed[user]++;
if (txq->credits <= 0) {
bl_txq_stop(txq, BL_TXQ_STOP_FULL);
BL_DBG("delete txq from hwq, txq->idx=%d, txq->status=0x%x, txq-credits=%d\n", txq->idx, txq->status, txq->credits);
}
if (txq->push_limit)
txq->push_limit--;
/*use sdio interface to send the whole skb packet */
/* fisrt, we should ignore the txhdr, after skb_pull, we got the real data */
skb_pull(skb, sw_txhdr->headroom);
/*use sw_txhdr to override the sdio special header*/
sw_txhdr->hdr.queue_idx = txq->hwq->id; //update queue idx to avoid ps mode modify it.
memcpy((void *)skb->data, &sw_txhdr->hdr, sizeof(struct sdio_hdr));
memcpy((void *)skb->data + sizeof(struct sdio_hdr), &sw_txhdr->desc, sizeof(*desc));
/*when get wr_port failed*/
ret = bl_get_wr_port(bl_hw, &wr_port);
if (ret) {
printk("get wr port failed ret=%d, requeue skb=%p\n", ret, skb);
skb_push(skb, sw_txhdr->headroom);
bl_txq_queue_skb(skb, txq, bl_hw, false);
return;
}
BL_DBG("bl_tx_push: send skb=%p, skb->len=%d\n", skb, skb->len);
ret = bl_write_data_sync(bl_hw, skb->data, skb->len, bl_hw->plat->io_port + wr_port);
if(ret) {
printk("bl_write_data_sync failed, ret=%d\n", ret);
dev_kfree_skb_any(skb);
return;
}
/*restore skb, txcfm will use this skb*/
skb_push(skb, sw_txhdr->headroom);
ipc_host_txdesc_push(bl_hw->ipc_env, hw_queue, user, skb);
txq->hwq->credits[user]--;
bl_hw->stats.cfm_balance[hw_queue]++;
}
/** 2K buf size */
#define BL_TX_DATA_BUF_SIZE_16K 16*1024
#define BL_SDIO_MP_AGGR_PKT_LIMIT_MAX 8
#define BL_SDIO_MPA_ADDR_BASE 0x1000
//typedef struct _sdio_mpa_tx {
// u8 *buf;
// u32 buf_len;
// u32 pkt_cnt;
// u32 ports;
// u16 start_port;
// u16 mp_wr_info[BL_SDIO_MP_AGGR_PKT_LIMIT_MAX];
//}sdio_mpa_tx;
void bl_tx_multi_pkt_push(struct bl_hw *bl_hw, struct sk_buff_head *sk_list_push)
{
struct sk_buff *skb;
struct bl_txhdr *txhdr;
//struct bl_sw_txhdr *sw_txhdr;
sdio_mpa_tx mpa_tx_data = {0};
int ret;
u32 port;
u32 cmd53_port;
u32 buf_block_len;
int flags = 0;
/*fix alloc buf size, such as 16K(2*8(aggr num))*/
mpa_tx_data.buf = kzalloc(BL_TX_DATA_BUF_SIZE_16K, GFP_KERNEL);
if(mpa_tx_data.buf == NULL)
printk("alloc for aggr buf failed!\n");
/*copy multi skbs into one large buf*/
while ((skb = __skb_dequeue(sk_list_push)) != NULL) {
txhdr = (struct bl_txhdr *)skb->data;
/* RETRY flag is not always set so retest here */
if (txhdr->sw_hdr->txq->nb_retry) {
flags |= BL_PUSH_RETRY;
txhdr->sw_hdr->txq->nb_retry--;
if (txhdr->sw_hdr->txq->nb_retry == 0) {
WARN(skb != txhdr->sw_hdr->txq->last_retry_skb,
"last retry buffer is not the expected one");
txhdr->sw_hdr->txq->last_retry_skb = NULL;
}
} else if (!(flags & BL_PUSH_RETRY)) {
txhdr->sw_hdr->txq->pkt_sent++;
}
txhdr->sw_hdr->hw_queue = txhdr->sw_hdr->txq->hwq->id;
if(txhdr->sw_hdr->bl_sta)
bl_set_more_data_flag(bl_hw, txhdr->sw_hdr);
txhdr->sw_hdr->txq->credits--;
txhdr->sw_hdr->txq->pkt_pushed[0]++;
if(txhdr->sw_hdr->txq->credits <= 0)
bl_txq_stop(txhdr->sw_hdr->txq, BL_TXQ_STOP_FULL);
skb_pull(skb, txhdr->sw_hdr->headroom);
txhdr->sw_hdr->hdr.queue_idx = txhdr->sw_hdr->txq->hwq->id;
memcpy((void *)skb->data, &txhdr->sw_hdr->hdr, sizeof(struct sdio_hdr));
memcpy((void *)skb->data + sizeof(struct sdio_hdr), &txhdr->sw_hdr->desc, sizeof(struct txdesc_api));
buf_block_len = (skb->len + BL_SDIO_BLOCK_SIZE - 1) / BL_SDIO_BLOCK_SIZE;
memcpy((void *)&mpa_tx_data.buf[mpa_tx_data.buf_len], skb->data, buf_block_len * BL_SDIO_BLOCK_SIZE);
mpa_tx_data.buf_len += buf_block_len * BL_SDIO_BLOCK_SIZE;
//printk("###%d: skb->len: %d, pad_len: %d\n", mpa_tx_data.pkt_cnt, skb->len, buf_block_len*BL_SDIO_BLOCK_SIZE);
//mpa_tx_data.mp_wr_info[mpa_tx_data.pkt_cnt] = *(u16 *)skb->data;
bl_get_wr_port(bl_hw, &port);
if(!mpa_tx_data.pkt_cnt) {
mpa_tx_data.start_port = port;
}
if(mpa_tx_data.start_port <= port) {
mpa_tx_data.ports |= (1 << (mpa_tx_data.pkt_cnt));
} else {
mpa_tx_data.ports |= (1 << (mpa_tx_data.pkt_cnt + 1));
}
mpa_tx_data.pkt_cnt++;
skb_push(skb, txhdr->sw_hdr->headroom);
ipc_host_txdesc_push(bl_hw->ipc_env, txhdr->sw_hdr->hw_queue, 0, skb);
txhdr->sw_hdr->txq->hwq->credits[0]--;
bl_hw->stats.cfm_balance[txhdr->sw_hdr->hw_queue]++;
}
/*
printk("mpa_tx_data:ports=0x%02x, start_port=%d, buf=%p, buf_len=%d, pkt_cnt=%d\n",
mpa_tx_data.ports,
mpa_tx_data.start_port,
mpa_tx_data.buf,
mpa_tx_data.buf_len,
mpa_tx_data.pkt_cnt);
*/
/*send packet*/
cmd53_port = (bl_hw->plat->io_port | BL_SDIO_MPA_ADDR_BASE |
(mpa_tx_data.ports << 4)) + mpa_tx_data.start_port;
// printk("cmd53_port=0x%08x\n", cmd53_port);
ret = bl_write_data_sync(bl_hw, mpa_tx_data.buf, mpa_tx_data.buf_len, cmd53_port);
if(ret)
printk("bl_write_data_sync failed, ret=%d\n", ret);
kfree(mpa_tx_data.buf);
mpa_tx_data.buf = NULL;
mpa_tx_data.ports = 0;
mpa_tx_data.start_port = 0;
mpa_tx_data.buf_len = 0;
mpa_tx_data.pkt_cnt = 0;
}
/**
* bl_tx_retry - Push an AMPDU pkt that need to be retried
*
* @bl_hw: Driver main data
* @skb: pkt to re-push
* @txhdr: tx desc of the pkt to re-push
* @sw_retry: Indicates if fw decide to retry this buffer
* (i.e. it has never been transmitted over the air)
*
* Called when a packet needs to be repushed to the firmware.
* First update sw descriptor and then queue it in the retry list.
*/
static void bl_tx_retry(struct bl_hw *bl_hw, struct sk_buff *skb,
struct bl_txhdr *txhdr, bool sw_retry)
{
struct bl_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
struct tx_cfm_tag *cfm = &txhdr->hw_hdr.cfm;
struct bl_txq *txq = sw_txhdr->txq;
if (!sw_retry) {
/* update sw desc */
sw_txhdr->desc.host.sn = cfm->sn;
sw_txhdr->desc.host.pn[0] = cfm->pn[0];
sw_txhdr->desc.host.pn[1] = cfm->pn[1];
sw_txhdr->desc.host.pn[2] = cfm->pn[2];
sw_txhdr->desc.host.pn[3] = cfm->pn[3];
sw_txhdr->desc.host.timestamp = cfm->timestamp;
sw_txhdr->desc.host.flags |= TXU_CNTRL_RETRY;
#ifdef CONFIG_BL_AMSDUS_TX
if (sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU)
bl_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].failed++;
#endif
}
/* MORE_DATA will be re-set if needed when pkt will be repushed */
sw_txhdr->desc.host.flags &= ~TXU_CNTRL_MORE_DATA;
cfm->status.value = 0;
BL_DBG("bl_tx_retry: skb=%p, sn=%u\n", skb, sw_txhdr->desc.host.sn);
BL_DBG("bl_tx_retry: txq->idx=%d, txq->status=0x%x, txq->credits=%d-->%d\n", txq->idx, txq->status, txq->credits, txq->credits+1);
txq->credits++;
spin_lock(&bl_hw->txq_lock);
if (txq->credits > 0)
bl_txq_start(txq, BL_TXQ_STOP_FULL);
spin_unlock(&bl_hw->txq_lock);
/* Queue the buffer */
bl_txq_queue_skb(skb, txq, bl_hw, true);
}
#ifdef CONFIG_BL_AMSDUS_TX
/* return size of subframe (including header) */
static inline int bl_amsdu_subframe_length(struct ethhdr *eth, int eth_len)
{
/* ethernet header is replaced with amdsu header that have the same size
Only need to check if LLC/SNAP header will be added */
int len = eth_len;
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
len += sizeof(rfc1042_header) + 2;
}
return len;
}
static inline bool bl_amsdu_is_aggregable(struct sk_buff *skb)
{
/* need to add some check on buffer to see if it can be aggregated ? */
return true;
}
/**
* bl_amsdu_del_subframe_header - remove AMSDU header
*
* amsdu_txhdr: amsdu tx descriptor
*
* Move back the ethernet header at the "beginning" of the data buffer.
* (which has been moved in @bl_amsdu_add_subframe_header)
*/
static void bl_amsdu_del_subframe_header(struct bl_amsdu_txhdr *amsdu_txhdr)
{
struct sk_buff *skb = amsdu_txhdr->skb;
struct ethhdr *eth;
u8 *pos;
pos = skb->data;
pos += sizeof(struct bl_amsdu_txhdr);
eth = (struct ethhdr*)pos;
pos += amsdu_txhdr->pad + sizeof(struct ethhdr);
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
pos += sizeof(rfc1042_header) + 2;
}
memmove(pos, eth, sizeof(*eth));
skb_pull(skb, (pos - skb->data));
}
/**
* bl_amsdu_add_subframe_header - Add AMSDU header and link subframe
*
* @bl_hw Driver main data
* @skb Buffer to aggregate
* @sw_txhdr Tx descriptor for the first A-MSDU subframe
*
* return 0 on sucess, -1 otherwise
*
* This functions Add A-MSDU header and LLC/SNAP header in the buffer
* and update sw_txhdr of the first subframe to link this buffer.
* If an error happens, the buffer will be queued as a normal buffer.
*
*
* Before After
* +-------------+ +-------------+
* | HEADROOM | | HEADROOM |
* | | +-------------+ <- data
* | | | amsdu_txhdr |
* | | | * pad size |
* | | +-------------+
* | | | ETH hdr | keep original eth hdr
* | | | | to restore it once transmitted
* | | +-------------+ <- packet_addr[x]
* | | | Pad |
* | | +-------------+
* data -> +-------------+ | AMSDU HDR |
* | ETH hdr | +-------------+
* | | | LLC/SNAP |
* +-------------+ +-------------+
* | DATA | | DATA |
* | | | |
* +-------------+ +-------------+
*
* Called with tx_lock hold
*/
static int bl_amsdu_add_subframe_header(struct bl_hw *bl_hw,
struct sk_buff *skb,
struct bl_sw_txhdr *sw_txhdr)
{
struct bl_amsdu *amsdu = &sw_txhdr->amsdu;
struct bl_amsdu_txhdr *amsdu_txhdr;
struct ethhdr *amsdu_hdr, *eth = (struct ethhdr *)skb->data;
int headroom_need, map_len, msdu_len;
dma_addr_t dma_addr;
u8 *pos, *map_start;
msdu_len = skb->len - sizeof(*eth);
headroom_need = sizeof(*amsdu_txhdr) + amsdu->pad +
sizeof(*amsdu_hdr);
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
headroom_need += sizeof(rfc1042_header) + 2;
msdu_len += sizeof(rfc1042_header) + 2;
}
/* we should have enough headroom (checked in xmit) */
if (WARN_ON(skb_headroom(skb) < headroom_need)) {
return -1;
}
/* allocate headroom */
pos = skb_push(skb, headroom_need);
amsdu_txhdr = (struct bl_amsdu_txhdr *)pos;
pos += sizeof(*amsdu_txhdr);
/* move eth header */
memmove(pos, eth, sizeof(*eth));
eth = (struct ethhdr *)pos;
pos += sizeof(*eth);
/* Add padding from previous subframe */
map_start = pos;
memset(pos, 0, amsdu->pad);
pos += amsdu->pad;
/* Add AMSDU hdr */
amsdu_hdr = (struct ethhdr *)pos;
memcpy(amsdu_hdr->h_dest, eth->h_dest, ETH_ALEN);
memcpy(amsdu_hdr->h_source, eth->h_source, ETH_ALEN);
amsdu_hdr->h_proto = htons(msdu_len);
pos += sizeof(*amsdu_hdr);
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
memcpy(pos, rfc1042_header, sizeof(rfc1042_header));
pos += sizeof(rfc1042_header);
}
/* MAP (and sync) memory for DMA */
map_len = msdu_len + amsdu->pad + sizeof(*amsdu_hdr);
dma_addr = dma_map_single(bl_hw->dev, map_start, map_len,
DMA_BIDIRECTIONAL);
if (WARN_ON(dma_mapping_error(bl_hw->dev, dma_addr))) {
pos -= sizeof(*eth);
memmove(pos, eth, sizeof(*eth));
skb_pull(skb, headroom_need);
return -1;
}
/* update amdsu_txhdr */
amsdu_txhdr->map_len = map_len;
amsdu_txhdr->dma_addr = dma_addr;
amsdu_txhdr->skb = skb;
amsdu_txhdr->pad = amsdu->pad;
/* update bl_sw_txhdr (of the first subframe) */
BUG_ON(amsdu->nb != sw_txhdr->desc.host.packet_cnt);
sw_txhdr->desc.host.packet_addr[amsdu->nb] = dma_addr;
sw_txhdr->desc.host.packet_len[amsdu->nb] = map_len;
sw_txhdr->desc.host.packet_cnt++;
amsdu->nb++;
amsdu->pad = AMSDU_PADDING(map_len - amsdu->pad);
list_add_tail(&amsdu_txhdr->list, &amsdu->hdrs);
amsdu->len += map_len;
trace_amsdu_subframe(sw_txhdr);
return 0;
}
/**
* bl_amsdu_add_subframe - Add this buffer as an A-MSDU subframe if possible
*
* @bl_hw Driver main data
* @skb Buffer to aggregate if possible
* @sta Destination STA
* @txq sta's txq used for this buffer
*
* Tyr to aggregate the buffer in an A-MSDU. If it succeed then the
* buffer is added as a new A-MSDU subframe with AMSDU and LLC/SNAP
* headers added (so FW won't have to modify this subframe).
*
* To be added as subframe :
* - sta must allow amsdu
* - buffer must be aggregable (to be defined)
* - at least one other aggregable buffer is pending in the queue
* or an a-msdu (with enough free space) is currently in progress
*
* returns true if buffer has been added as A-MDSP subframe, false otherwise
*
*/
static bool bl_amsdu_add_subframe(struct bl_hw *bl_hw, struct sk_buff *skb,
struct bl_sta *sta, struct bl_txq *txq)
{
bool res = false;
struct ethhdr *eth;
/* immedialtely return if amsdu are not allowed for this sta */
if (!txq->amsdu_len || bl_hw->mod_params->amsdu_maxnb < 2 ||
!bl_amsdu_is_aggregable(skb))
return false;
spin_lock_bh(&bl_hw->tx_lock);
if (txq->amsdu) {
/* aggreagation already in progress, add this buffer if enough space
available, otherwise end the current amsdu */
struct bl_sw_txhdr *sw_txhdr = txq->amsdu;
eth = (struct ethhdr *)(skb->data);
if (((sw_txhdr->amsdu.len + sw_txhdr->amsdu.pad +
bl_amsdu_subframe_length(eth, skb->len)) > txq->amsdu_len) ||
bl_amsdu_add_subframe_header(bl_hw, skb, sw_txhdr)) {
txq->amsdu = NULL;
goto end;
}
if (sw_txhdr->amsdu.nb >= bl_hw->mod_params->amsdu_maxnb) {
bl_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++;
/* max number of subframes reached */
txq->amsdu = NULL;
}
} else {
/* Check if a new amsdu can be started with the previous buffer
(if any) and this one */
struct sk_buff *skb_prev = skb_peek_tail(&txq->sk_list);
struct bl_txhdr *txhdr;
struct bl_sw_txhdr *sw_txhdr;
int len1, len2;
if (!skb_prev || !bl_amsdu_is_aggregable(skb_prev))
goto end;
txhdr = (struct bl_txhdr *)skb_prev->data;
sw_txhdr = txhdr->sw_hdr;
if ((sw_txhdr->amsdu.len) ||
(sw_txhdr->desc.host.flags & TXU_CNTRL_RETRY))
/* previous buffer is already a complete amsdu or a retry */
goto end;
eth = (struct ethhdr *)(skb_prev->data + sw_txhdr->headroom);
len1 = bl_amsdu_subframe_length(eth, (sw_txhdr->frame_len +
sizeof(struct ethhdr)));
eth = (struct ethhdr *)(skb->data);
len2 = bl_amsdu_subframe_length(eth, skb->len);
if (len1 + AMSDU_PADDING(len1) + len2 > txq->amsdu_len)
/* not enough space to aggregate those two buffers */
goto end;
/* Add subframe header.
Note: Fw will take care of adding AMDSU header for the first
subframe while generating 802.11 MAC header */
INIT_LIST_HEAD(&sw_txhdr->amsdu.hdrs);
sw_txhdr->amsdu.len = len1;
sw_txhdr->amsdu.nb = 1;
sw_txhdr->amsdu.pad = AMSDU_PADDING(len1);
if (bl_amsdu_add_subframe_header(bl_hw, skb, sw_txhdr))
goto end;
sw_txhdr->desc.host.flags |= TXU_CNTRL_AMSDU;
if (sw_txhdr->amsdu.nb < bl_hw->mod_params->amsdu_maxnb)
txq->amsdu = sw_txhdr;
else
bl_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++;
}
res = true;
end:
spin_unlock_bh(&bl_hw->tx_lock);
return res;
}
#endif /* CONFIG_BL_AMSDUS_TX */
int bl_requeue_multicast_skb(struct sk_buff *skb, struct bl_vif *bl_vif)
{
struct bl_hw *bl_hw = bl_vif->bl_hw;
struct bl_txhdr *txhdr;
struct bl_sw_txhdr *sw_txhdr;
struct ethhdr *eth;
struct ethhdr tmp_eth;
struct txdesc_api *desc;
struct bl_sta *sta;
struct bl_txq *txq;
int headroom;
int hdr_pads;
u16 frame_len;
u16 frame_oft;
u8 tid;
BL_DBG(BL_FN_ENTRY_STR);
BL_DBG("1111bl_requeue_multicast_skb: skb=%p, skb->priority=%d\n", skb, skb->priority);
/* Get the STA id and TID information */
sta = bl_get_tx_info(bl_vif, skb, &tid);