提交 69647fab 编写于 作者: J Jakub Kicinski 提交者: Kalle Valo

mt7601u: unify paged and non-paged RX dma paths

Signed-off-by: NJakub Kicinski <kubakici@wp.pl>
Signed-off-by: NKalle Valo <kvalo@codeaurora.org>
上级 6896f4fb
...@@ -34,56 +34,28 @@ static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len) ...@@ -34,56 +34,28 @@ static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
static struct sk_buff * static struct sk_buff *
mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
u8 *data, u32 seg_len) void *data, u32 seg_len, u32 truesize, struct page *p)
{ {
struct sk_buff *skb; struct sk_buff *skb;
u32 true_len; u32 true_len;
int hdr_len, copy, frag;
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
seg_len -= 2;
skb = alloc_skb(seg_len, GFP_ATOMIC);
if (!skb)
return NULL;
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
memcpy(skb_put(skb, hdr_len), data, hdr_len);
data += hdr_len + 2;
seg_len -= hdr_len;
}
memcpy(skb_put(skb, seg_len), data, seg_len);
true_len = mt76_mac_process_rx(dev, skb, skb->data, rxwi);
skb_trim(skb, true_len);
return skb;
}
static struct sk_buff *
mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev,
struct mt7601u_rxwi *rxwi, void *data,
u32 seg_len, u32 truesize, struct page *p)
{
unsigned int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
unsigned int true_len, copy, frag;
struct sk_buff *skb;
skb = alloc_skb(128, GFP_ATOMIC);
if (!skb) if (!skb)
return NULL; return NULL;
true_len = mt76_mac_process_rx(dev, skb, data, rxwi); true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) { if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
memcpy(skb_put(skb, hdr_len), data, hdr_len); memcpy(skb_put(skb, hdr_len), data, hdr_len);
data += hdr_len + 2; data += hdr_len + 2;
true_len -= hdr_len; true_len -= hdr_len;
hdr_len = 0; hdr_len = 0;
} }
/* If not doing paged RX allocated skb will always have enough space */
copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8; copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
frag = true_len - copy; frag = true_len - copy;
...@@ -100,7 +72,7 @@ mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev, ...@@ -100,7 +72,7 @@ mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev,
} }
static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
u32 seg_len, struct page *p, bool paged) u32 seg_len, struct page *p)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct mt7601u_rxwi *rxwi; struct mt7601u_rxwi *rxwi;
...@@ -126,11 +98,7 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, ...@@ -126,11 +98,7 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
trace_mt_rx(dev, rxwi, fce_info); trace_mt_rx(dev, rxwi, fce_info);
if (paged) skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
skb = mt7601u_rx_skb_from_seg_paged(dev, rxwi, data, seg_len,
truesize, p);
else
skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len);
if (!skb) if (!skb)
return; return;
...@@ -158,23 +126,17 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) ...@@ -158,23 +126,17 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
u32 seg_len, data_len = e->urb->actual_length; u32 seg_len, data_len = e->urb->actual_length;
u8 *data = page_address(e->p); u8 *data = page_address(e->p);
struct page *new_p = NULL; struct page *new_p = NULL;
bool paged = true;
int cnt = 0; int cnt = 0;
if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state)) if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
return; return;
/* Copy if there is very little data in the buffer. */ /* Copy if there is very little data in the buffer. */
if (data_len < 512) { if (data_len > 512)
paged = false;
} else {
new_p = dev_alloc_pages(MT_RX_ORDER); new_p = dev_alloc_pages(MT_RX_ORDER);
if (!new_p)
paged = false;
}
while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) { while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
mt7601u_rx_process_seg(dev, data, seg_len, e->p, paged); mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
data_len -= seg_len; data_len -= seg_len;
data += seg_len; data += seg_len;
...@@ -182,9 +144,9 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) ...@@ -182,9 +144,9 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
} }
if (cnt > 1) if (cnt > 1)
trace_mt_rx_dma_aggr(dev, cnt, paged); trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
if (paged) { if (new_p) {
/* we have one extra ref from the allocator */ /* we have one extra ref from the allocator */
__free_pages(e->p, MT_RX_ORDER); __free_pages(e->p, MT_RX_ORDER);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册