提交 3fd172d3 编写于 作者: A Arend van Spriel 提交者: John W. Linville

brcm80211: smac: use sk_buff list for handling frames in receive path

In the receive path the frames are obtained from the dma using
multiple sk_buff that were linked using the skb next pointer.
This has been changed and it now used sk_buff lists and skb_queue
functions instead.
Reported-by: NJohannes Berg <johannes@sipsolutions.net>
Reviewed-by: NPieter-Paul Giesberts <pieterpg@broadcom.com>
Reviewed-by: NAlwin Beukers <alwin@broadcom.com>
Signed-off-by: NArend van Spriel <arend@broadcom.com>
Signed-off-by: NJohn W. Linville <linville@tuxdriver.com>
上级 81d2e2d1
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
......@@ -14,7 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/pci.h>
......@@ -901,7 +900,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
/*
* !! rx entry routine
* returns a pointer to the next frame received, or NULL if there are no more
* returns the number packages in the next frame, or 0 if there are no more
* if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
* supported with pkts chain
* otherwise, it's treated as giant pkt and will be tossed.
......@@ -909,38 +908,40 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
* buffer data. After it reaches the max size of buffer, the data continues
* in next DMA descriptor buffer WITHOUT DMA header
*/
struct sk_buff *dma_rx(struct dma_pub *pub)
int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
{
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p, *head, *tail;
struct sk_buff_head dma_frames;
struct sk_buff *p, *next;
uint len;
uint pkt_len;
int resid = 0;
int pktcnt = 1;
skb_queue_head_init(&dma_frames);
next_frame:
head = _dma_getnextrxp(di, false);
if (head == NULL)
return NULL;
p = _dma_getnextrxp(di, false);
if (p == NULL)
return 0;
len = le16_to_cpu(*(__le16 *) (head->data));
len = le16_to_cpu(*(__le16 *) (p->data));
DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
dma_spin_for_len(len, head);
dma_spin_for_len(len, p);
/* set actual length */
pkt_len = min((di->rxoffset + len), di->rxbufsize);
__skb_trim(head, pkt_len);
__skb_trim(p, pkt_len);
skb_queue_tail(&dma_frames, p);
resid = len - (di->rxbufsize - di->rxoffset);
/* check for single or multi-buffer rx */
if (resid > 0) {
tail = head;
while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
tail->next = p;
pkt_len = min_t(uint, resid, di->rxbufsize);
__skb_trim(p, pkt_len);
tail = p;
skb_queue_tail(&dma_frames, p);
resid -= di->rxbufsize;
pktcnt++;
}
#ifdef BCMDBG
......@@ -959,13 +960,18 @@ struct sk_buff *dma_rx(struct dma_pub *pub)
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
di->name, len));
brcmu_pkt_buf_free_skb(head);
skb_queue_walk_safe(&dma_frames, p, next) {
skb_unlink(p, &dma_frames);
brcmu_pkt_buf_free_skb(p);
}
di->dma.rxgiants++;
pktcnt = 1;
goto next_frame;
}
}
return head;
skb_queue_splice_tail(&dma_frames, skb_list);
return pktcnt;
}
static bool dma64_rxidle(struct dma_info *di)
......
......@@ -18,6 +18,7 @@
#define _BRCM_DMA_H_
#include <linux/delay.h>
#include <linux/skbuff.h>
#include "types.h" /* forward structure declarations */
/* map/unmap direction */
......@@ -80,7 +81,7 @@ extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
uint nrxpost, uint rxoffset, uint *msg_level);
void dma_rxinit(struct dma_pub *pub);
struct sk_buff *dma_rx(struct dma_pub *pub);
int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
bool dma_rxfill(struct dma_pub *pub);
bool dma_rxreset(struct dma_pub *pub);
bool dma_txreset(struct dma_pub *pub);
......
......@@ -8115,21 +8115,17 @@ static bool
brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
{
struct sk_buff *p;
struct sk_buff *head = NULL;
struct sk_buff *tail = NULL;
struct sk_buff *next = NULL;
struct sk_buff_head recv_frames;
uint n = 0;
uint bound_limit = bound ? RXBND : -1;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* gather received frames */
while ((p = dma_rx(wlc_hw->di[fifo]))) {
skb_queue_head_init(&recv_frames);
if (!tail)
head = tail = p;
else {
tail->prev = p;
tail = p;
}
/* gather received frames */
while (dma_rx(wlc_hw->di[fifo], &recv_frames)) {
/* !give others some time to run! */
if (++n >= bound_limit)
......@@ -8140,12 +8136,11 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
dma_rxfill(wlc_hw->di[fifo]);
/* process each frame */
while ((p = head) != NULL) {
skb_queue_walk_safe(&recv_frames, p, next) {
struct d11rxhdr_le *rxh_le;
struct d11rxhdr *rxh;
head = head->prev;
p->prev = NULL;
skb_unlink(p, &recv_frames);
rxh_le = (struct d11rxhdr_le *)p->data;
rxh = (struct d11rxhdr *)p->data;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册