提交 b753c5a7 编写于 作者: J Jakub Kicinski

Merge branch 'r8152-RX-improve'

Hayes says:

====================
v2:
For patch #2, replace list_for_each_safe with list_for_each_entry_safe.
Remove unlikely in WARN_ON. Adjust the coding style.

For patch #4, replace list_for_each_safe with list_for_each_entry_safe.
Remove "else" after "continue".

For patch #5. replace sysfs with ethtool to modify rx_copybreak and
rx_pending.

v1:
The different chips use different rx buffer size.

Use skb_add_rx_frag() to reduce memory copy for RX.
====================
Signed-off-by: NJakub Kicinski <jakub.kicinski@netronome.com>
...@@ -22,10 +22,11 @@ ...@@ -22,10 +22,11 @@
#include <linux/mdio.h> #include <linux/mdio.h>
#include <linux/usb/cdc.h> #include <linux/usb/cdc.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/atomic.h>
#include <linux/acpi.h> #include <linux/acpi.h>
/* Information for net-next */ /* Information for net-next */
#define NETNEXT_VERSION "09" #define NETNEXT_VERSION "10"
/* Information for net */ /* Information for net */
#define NET_VERSION "10" #define NET_VERSION "10"
...@@ -583,6 +584,9 @@ enum rtl_register_content { ...@@ -583,6 +584,9 @@ enum rtl_register_content {
#define TX_ALIGN 4 #define TX_ALIGN 4
#define RX_ALIGN 8 #define RX_ALIGN 8
#define RTL8152_RX_MAX_PENDING 4096
#define RTL8152_RXFG_HEADSZ 256
#define INTR_LINK 0x0004 #define INTR_LINK 0x0004
#define RTL8152_REQT_READ 0xc0 #define RTL8152_REQT_READ 0xc0
...@@ -694,11 +698,11 @@ struct tx_desc { ...@@ -694,11 +698,11 @@ struct tx_desc {
struct r8152; struct r8152;
struct rx_agg { struct rx_agg {
struct list_head list; struct list_head list, info_list;
struct urb *urb; struct urb *urb;
struct r8152 *context; struct r8152 *context;
struct page *page;
void *buffer; void *buffer;
void *head;
}; };
struct tx_agg { struct tx_agg {
...@@ -719,7 +723,7 @@ struct r8152 { ...@@ -719,7 +723,7 @@ struct r8152 {
struct net_device *netdev; struct net_device *netdev;
struct urb *intr_urb; struct urb *intr_urb;
struct tx_agg tx_info[RTL8152_MAX_TX]; struct tx_agg tx_info[RTL8152_MAX_TX];
struct rx_agg rx_info[RTL8152_MAX_RX]; struct list_head rx_info, rx_used;
struct list_head rx_done, tx_free; struct list_head rx_done, tx_free;
struct sk_buff_head tx_queue, rx_queue; struct sk_buff_head tx_queue, rx_queue;
spinlock_t rx_lock, tx_lock; spinlock_t rx_lock, tx_lock;
...@@ -744,11 +748,17 @@ struct r8152 { ...@@ -744,11 +748,17 @@ struct r8152 {
void (*autosuspend_en)(struct r8152 *tp, bool enable); void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops; } rtl_ops;
atomic_t rx_count;
int intr_interval; int intr_interval;
u32 saved_wolopts; u32 saved_wolopts;
u32 msg_enable; u32 msg_enable;
u32 tx_qlen; u32 tx_qlen;
u32 coalesce; u32 coalesce;
u32 rx_buf_sz;
u32 rx_copybreak;
u32 rx_pending;
u16 ocp_base; u16 ocp_base;
u16 speed; u16 speed;
u8 *intr_buff; u8 *intr_buff;
...@@ -1467,18 +1477,72 @@ static inline void *tx_agg_align(void *data) ...@@ -1467,18 +1477,72 @@ static inline void *tx_agg_align(void *data)
return (void *)ALIGN((uintptr_t)data, TX_ALIGN); return (void *)ALIGN((uintptr_t)data, TX_ALIGN);
} }
static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg)
{
list_del(&agg->info_list);
usb_free_urb(agg->urb);
put_page(agg->page);
kfree(agg);
atomic_dec(&tp->rx_count);
}
static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags)
{
struct net_device *netdev = tp->netdev;
int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
unsigned int order = get_order(tp->rx_buf_sz);
struct rx_agg *rx_agg;
unsigned long flags;
rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node);
if (!rx_agg)
return NULL;
rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
if (!rx_agg->page)
goto free_rx;
rx_agg->buffer = page_address(rx_agg->page);
rx_agg->urb = usb_alloc_urb(0, mflags);
if (!rx_agg->urb)
goto free_buf;
rx_agg->context = tp;
INIT_LIST_HEAD(&rx_agg->list);
INIT_LIST_HEAD(&rx_agg->info_list);
spin_lock_irqsave(&tp->rx_lock, flags);
list_add_tail(&rx_agg->info_list, &tp->rx_info);
spin_unlock_irqrestore(&tp->rx_lock, flags);
atomic_inc(&tp->rx_count);
return rx_agg;
free_buf:
__free_pages(rx_agg->page, order);
free_rx:
kfree(rx_agg);
return NULL;
}
static void free_all_mem(struct r8152 *tp) static void free_all_mem(struct r8152 *tp)
{ {
struct rx_agg *agg, *agg_next;
unsigned long flags;
int i; int i;
for (i = 0; i < RTL8152_MAX_RX; i++) { spin_lock_irqsave(&tp->rx_lock, flags);
usb_free_urb(tp->rx_info[i].urb);
tp->rx_info[i].urb = NULL;
kfree(tp->rx_info[i].buffer); list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list)
tp->rx_info[i].buffer = NULL; free_rx_agg(tp, agg);
tp->rx_info[i].head = NULL;
} spin_unlock_irqrestore(&tp->rx_lock, flags);
WARN_ON(atomic_read(&tp->rx_count));
for (i = 0; i < RTL8152_MAX_TX; i++) { for (i = 0; i < RTL8152_MAX_TX; i++) {
usb_free_urb(tp->tx_info[i].urb); usb_free_urb(tp->tx_info[i].urb);
...@@ -1502,46 +1566,28 @@ static int alloc_all_mem(struct r8152 *tp) ...@@ -1502,46 +1566,28 @@ static int alloc_all_mem(struct r8152 *tp)
struct usb_interface *intf = tp->intf; struct usb_interface *intf = tp->intf;
struct usb_host_interface *alt = intf->cur_altsetting; struct usb_host_interface *alt = intf->cur_altsetting;
struct usb_host_endpoint *ep_intr = alt->endpoint + 2; struct usb_host_endpoint *ep_intr = alt->endpoint + 2;
struct urb *urb;
int node, i; int node, i;
u8 *buf;
node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->rx_lock);
spin_lock_init(&tp->tx_lock); spin_lock_init(&tp->tx_lock);
INIT_LIST_HEAD(&tp->rx_info);
INIT_LIST_HEAD(&tp->tx_free); INIT_LIST_HEAD(&tp->tx_free);
INIT_LIST_HEAD(&tp->rx_done); INIT_LIST_HEAD(&tp->rx_done);
skb_queue_head_init(&tp->tx_queue); skb_queue_head_init(&tp->tx_queue);
skb_queue_head_init(&tp->rx_queue); skb_queue_head_init(&tp->rx_queue);
atomic_set(&tp->rx_count, 0);
for (i = 0; i < RTL8152_MAX_RX; i++) { for (i = 0; i < RTL8152_MAX_RX; i++) {
buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!alloc_rx_agg(tp, GFP_KERNEL))
if (!buf)
goto err1;
if (buf != rx_agg_align(buf)) {
kfree(buf);
buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL,
node);
if (!buf)
goto err1;
}
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(buf);
goto err1; goto err1;
}
INIT_LIST_HEAD(&tp->rx_info[i].list);
tp->rx_info[i].context = tp;
tp->rx_info[i].urb = urb;
tp->rx_info[i].buffer = buf;
tp->rx_info[i].head = rx_agg_align(buf);
} }
for (i = 0; i < RTL8152_MAX_TX; i++) { for (i = 0; i < RTL8152_MAX_TX; i++) {
struct urb *urb;
u8 *buf;
buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
if (!buf) if (!buf)
goto err1; goto err1;
...@@ -1907,6 +1953,46 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) ...@@ -1907,6 +1953,46 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
return checksum; return checksum;
} }
static inline bool rx_count_exceed(struct r8152 *tp)
{
return atomic_read(&tp->rx_count) > RTL8152_MAX_RX;
}
static inline int agg_offset(struct rx_agg *agg, void *addr)
{
return (int)(addr - agg->buffer);
}
static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags)
{
struct rx_agg *agg, *agg_next, *agg_free = NULL;
unsigned long flags;
spin_lock_irqsave(&tp->rx_lock, flags);
list_for_each_entry_safe(agg, agg_next, &tp->rx_used, list) {
if (page_count(agg->page) == 1) {
if (!agg_free) {
list_del_init(&agg->list);
agg_free = agg;
continue;
}
if (rx_count_exceed(tp)) {
list_del_init(&agg->list);
free_rx_agg(tp, agg);
}
break;
}
}
spin_unlock_irqrestore(&tp->rx_lock, flags);
if (!agg_free && atomic_read(&tp->rx_count) < tp->rx_pending)
agg_free = alloc_rx_agg(tp, mflags);
return agg_free;
}
static int rx_bottom(struct r8152 *tp, int budget) static int rx_bottom(struct r8152 *tp, int budget)
{ {
unsigned long flags; unsigned long flags;
...@@ -1942,7 +2028,7 @@ static int rx_bottom(struct r8152 *tp, int budget) ...@@ -1942,7 +2028,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
list_for_each_safe(cursor, next, &rx_queue) { list_for_each_safe(cursor, next, &rx_queue) {
struct rx_desc *rx_desc; struct rx_desc *rx_desc;
struct rx_agg *agg; struct rx_agg *agg, *agg_free;
int len_used = 0; int len_used = 0;
struct urb *urb; struct urb *urb;
u8 *rx_data; u8 *rx_data;
...@@ -1954,14 +2040,16 @@ static int rx_bottom(struct r8152 *tp, int budget) ...@@ -1954,14 +2040,16 @@ static int rx_bottom(struct r8152 *tp, int budget)
if (urb->actual_length < ETH_ZLEN) if (urb->actual_length < ETH_ZLEN)
goto submit; goto submit;
rx_desc = agg->head; agg_free = rtl_get_free_rx(tp, GFP_ATOMIC);
rx_data = agg->head;
rx_desc = agg->buffer;
rx_data = agg->buffer;
len_used += sizeof(struct rx_desc); len_used += sizeof(struct rx_desc);
while (urb->actual_length > len_used) { while (urb->actual_length > len_used) {
struct net_device *netdev = tp->netdev; struct net_device *netdev = tp->netdev;
struct net_device_stats *stats = &netdev->stats; struct net_device_stats *stats = &netdev->stats;
unsigned int pkt_len; unsigned int pkt_len, rx_frag_head_sz;
struct sk_buff *skb; struct sk_buff *skb;
/* limite the skb numbers for rx_queue */ /* limite the skb numbers for rx_queue */
...@@ -1979,22 +2067,37 @@ static int rx_bottom(struct r8152 *tp, int budget) ...@@ -1979,22 +2067,37 @@ static int rx_bottom(struct r8152 *tp, int budget)
pkt_len -= ETH_FCS_LEN; pkt_len -= ETH_FCS_LEN;
rx_data += sizeof(struct rx_desc); rx_data += sizeof(struct rx_desc);
skb = napi_alloc_skb(napi, pkt_len); if (!agg_free || tp->rx_copybreak > pkt_len)
rx_frag_head_sz = pkt_len;
else
rx_frag_head_sz = tp->rx_copybreak;
skb = napi_alloc_skb(napi, rx_frag_head_sz);
if (!skb) { if (!skb) {
stats->rx_dropped++; stats->rx_dropped++;
goto find_next_rx; goto find_next_rx;
} }
skb->ip_summed = r8152_rx_csum(tp, rx_desc); skb->ip_summed = r8152_rx_csum(tp, rx_desc);
memcpy(skb->data, rx_data, pkt_len); memcpy(skb->data, rx_data, rx_frag_head_sz);
skb_put(skb, pkt_len); skb_put(skb, rx_frag_head_sz);
pkt_len -= rx_frag_head_sz;
rx_data += rx_frag_head_sz;
if (pkt_len) {
skb_add_rx_frag(skb, 0, agg->page,
agg_offset(agg, rx_data),
pkt_len,
SKB_DATA_ALIGN(pkt_len));
get_page(agg->page);
}
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
rtl_rx_vlan_tag(rx_desc, skb); rtl_rx_vlan_tag(rx_desc, skb);
if (work_done < budget) { if (work_done < budget) {
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
work_done++; work_done++;
stats->rx_packets++; stats->rx_packets++;
stats->rx_bytes += pkt_len; stats->rx_bytes += skb->len;
} else { } else {
__skb_queue_tail(&tp->rx_queue, skb); __skb_queue_tail(&tp->rx_queue, skb);
} }
...@@ -2002,10 +2105,24 @@ static int rx_bottom(struct r8152 *tp, int budget) ...@@ -2002,10 +2105,24 @@ static int rx_bottom(struct r8152 *tp, int budget)
find_next_rx: find_next_rx:
rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN); rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN);
rx_desc = (struct rx_desc *)rx_data; rx_desc = (struct rx_desc *)rx_data;
len_used = (int)(rx_data - (u8 *)agg->head); len_used = agg_offset(agg, rx_data);
len_used += sizeof(struct rx_desc); len_used += sizeof(struct rx_desc);
} }
WARN_ON(!agg_free && page_count(agg->page) > 1);
if (agg_free) {
spin_lock_irqsave(&tp->rx_lock, flags);
if (page_count(agg->page) == 1) {
list_add(&agg_free->list, &tp->rx_used);
} else {
list_add_tail(&agg->list, &tp->rx_used);
agg = agg_free;
urb = agg->urb;
}
spin_unlock_irqrestore(&tp->rx_lock, flags);
}
submit: submit:
if (!ret) { if (!ret) {
ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
...@@ -2113,7 +2230,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) ...@@ -2113,7 +2230,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
return 0; return 0;
usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
agg->head, agg_buf_sz, agg->buffer, tp->rx_buf_sz,
(usb_complete_t)read_bulk_callback, agg); (usb_complete_t)read_bulk_callback, agg);
ret = usb_submit_urb(agg->urb, mem_flags); ret = usb_submit_urb(agg->urb, mem_flags);
...@@ -2330,44 +2447,80 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable) ...@@ -2330,44 +2447,80 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable)
static int rtl_start_rx(struct r8152 *tp) static int rtl_start_rx(struct r8152 *tp)
{ {
int i, ret = 0; struct rx_agg *agg, *agg_next;
struct list_head tmp_list;
unsigned long flags;
int ret = 0, i = 0;
INIT_LIST_HEAD(&tp->rx_done); INIT_LIST_HEAD(&tmp_list);
for (i = 0; i < RTL8152_MAX_RX; i++) {
INIT_LIST_HEAD(&tp->rx_info[i].list);
ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
if (ret)
break;
}
if (ret && ++i < RTL8152_MAX_RX) { spin_lock_irqsave(&tp->rx_lock, flags);
struct list_head rx_queue;
unsigned long flags;
INIT_LIST_HEAD(&rx_queue); INIT_LIST_HEAD(&tp->rx_done);
INIT_LIST_HEAD(&tp->rx_used);
do { list_splice_init(&tp->rx_info, &tmp_list);
struct rx_agg *agg = &tp->rx_info[i++];
struct urb *urb = agg->urb;
urb->actual_length = 0; spin_unlock_irqrestore(&tp->rx_lock, flags);
list_add_tail(&agg->list, &rx_queue);
} while (i < RTL8152_MAX_RX);
spin_lock_irqsave(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) {
list_splice_tail(&rx_queue, &tp->rx_done); INIT_LIST_HEAD(&agg->list);
spin_unlock_irqrestore(&tp->rx_lock, flags);
/* Only RTL8152_MAX_RX rx_agg need to be submitted. */
if (++i > RTL8152_MAX_RX) {
spin_lock_irqsave(&tp->rx_lock, flags);
list_add_tail(&agg->list, &tp->rx_used);
spin_unlock_irqrestore(&tp->rx_lock, flags);
} else if (unlikely(ret < 0)) {
spin_lock_irqsave(&tp->rx_lock, flags);
list_add_tail(&agg->list, &tp->rx_done);
spin_unlock_irqrestore(&tp->rx_lock, flags);
} else {
ret = r8152_submit_rx(tp, agg, GFP_KERNEL);
}
} }
spin_lock_irqsave(&tp->rx_lock, flags);
WARN_ON(!list_empty(&tp->rx_info));
list_splice(&tmp_list, &tp->rx_info);
spin_unlock_irqrestore(&tp->rx_lock, flags);
return ret; return ret;
} }
static int rtl_stop_rx(struct r8152 *tp) static int rtl_stop_rx(struct r8152 *tp)
{ {
int i; struct rx_agg *agg, *agg_next;
struct list_head tmp_list;
unsigned long flags;
INIT_LIST_HEAD(&tmp_list);
/* The usb_kill_urb() couldn't be used in atomic.
* Therefore, move the list of rx_info to a tmp one.
* Then, list_for_each_entry_safe could be used without
* spin lock.
*/
spin_lock_irqsave(&tp->rx_lock, flags);
list_splice_init(&tp->rx_info, &tmp_list);
spin_unlock_irqrestore(&tp->rx_lock, flags);
list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) {
/* At least RTL8152_MAX_RX rx_agg have the page_count being
* equal to 1, so the other ones could be freed safely.
*/
if (page_count(agg->page) > 1)
free_rx_agg(tp, agg);
else
usb_kill_urb(agg->urb);
}
for (i = 0; i < RTL8152_MAX_RX; i++) /* Move back the list of temp to the rx_info */
usb_kill_urb(tp->rx_info[i].urb); spin_lock_irqsave(&tp->rx_lock, flags);
WARN_ON(!list_empty(&tp->rx_info));
list_splice(&tmp_list, &tp->rx_info);
spin_unlock_irqrestore(&tp->rx_lock, flags);
while (!skb_queue_empty(&tp->rx_queue)) while (!skb_queue_empty(&tp->rx_queue))
dev_kfree_skb(__skb_dequeue(&tp->rx_queue)); dev_kfree_skb(__skb_dequeue(&tp->rx_queue));
...@@ -2447,7 +2600,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) ...@@ -2447,7 +2600,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
static void r8153_set_rx_early_size(struct r8152 *tp) static void r8153_set_rx_early_size(struct r8152 *tp)
{ {
u32 ocp_data = agg_buf_sz - rx_reserved_size(tp->netdev->mtu); u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu);
switch (tp->version) { switch (tp->version) {
case RTL_VER_03: case RTL_VER_03:
...@@ -4954,6 +5107,77 @@ static int rtl8152_set_coalesce(struct net_device *netdev, ...@@ -4954,6 +5107,77 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
return ret; return ret;
} }
static int rtl8152_get_tunable(struct net_device *netdev,
const struct ethtool_tunable *tunable, void *d)
{
struct r8152 *tp = netdev_priv(netdev);
switch (tunable->id) {
case ETHTOOL_RX_COPYBREAK:
*(u32 *)d = tp->rx_copybreak;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int rtl8152_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tunable,
const void *d)
{
struct r8152 *tp = netdev_priv(netdev);
u32 val;
switch (tunable->id) {
case ETHTOOL_RX_COPYBREAK:
val = *(u32 *)d;
if (val < ETH_ZLEN) {
netif_err(tp, rx_err, netdev,
"Invalid rx copy break value\n");
return -EINVAL;
}
if (tp->rx_copybreak != val) {
napi_disable(&tp->napi);
tp->rx_copybreak = val;
napi_enable(&tp->napi);
}
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static void rtl8152_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct r8152 *tp = netdev_priv(netdev);
ring->rx_max_pending = RTL8152_RX_MAX_PENDING;
ring->rx_pending = tp->rx_pending;
}
static int rtl8152_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct r8152 *tp = netdev_priv(netdev);
if (ring->rx_pending < (RTL8152_MAX_RX * 2))
return -EINVAL;
if (tp->rx_pending != ring->rx_pending) {
napi_disable(&tp->napi);
tp->rx_pending = ring->rx_pending;
napi_enable(&tp->napi);
}
return 0;
}
static const struct ethtool_ops ops = { static const struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo, .get_drvinfo = rtl8152_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
...@@ -4971,6 +5195,10 @@ static const struct ethtool_ops ops = { ...@@ -4971,6 +5195,10 @@ static const struct ethtool_ops ops = {
.set_eee = rtl_ethtool_set_eee, .set_eee = rtl_ethtool_set_eee,
.get_link_ksettings = rtl8152_get_link_ksettings, .get_link_ksettings = rtl8152_get_link_ksettings,
.set_link_ksettings = rtl8152_set_link_ksettings, .set_link_ksettings = rtl8152_set_link_ksettings,
.get_tunable = rtl8152_get_tunable,
.set_tunable = rtl8152_set_tunable,
.get_ringparam = rtl8152_get_ringparam,
.set_ringparam = rtl8152_set_ringparam,
}; };
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
...@@ -5115,6 +5343,7 @@ static int rtl_ops_init(struct r8152 *tp) ...@@ -5115,6 +5343,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->in_nway = rtl8152_in_nway; ops->in_nway = rtl8152_in_nway;
ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->hw_phy_cfg = r8152b_hw_phy_cfg;
ops->autosuspend_en = rtl_runtime_suspend_enable; ops->autosuspend_en = rtl_runtime_suspend_enable;
tp->rx_buf_sz = 16 * 1024;
break; break;
case RTL_VER_03: case RTL_VER_03:
...@@ -5132,6 +5361,7 @@ static int rtl_ops_init(struct r8152 *tp) ...@@ -5132,6 +5361,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->in_nway = rtl8153_in_nway; ops->in_nway = rtl8153_in_nway;
ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->hw_phy_cfg = r8153_hw_phy_cfg;
ops->autosuspend_en = rtl8153_runtime_enable; ops->autosuspend_en = rtl8153_runtime_enable;
tp->rx_buf_sz = 32 * 1024;
break; break;
case RTL_VER_08: case RTL_VER_08:
...@@ -5147,6 +5377,7 @@ static int rtl_ops_init(struct r8152 *tp) ...@@ -5147,6 +5377,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->in_nway = rtl8153_in_nway; ops->in_nway = rtl8153_in_nway;
ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->hw_phy_cfg = r8153b_hw_phy_cfg;
ops->autosuspend_en = rtl8153b_runtime_enable; ops->autosuspend_en = rtl8153b_runtime_enable;
tp->rx_buf_sz = 32 * 1024;
break; break;
default: default:
...@@ -5321,6 +5552,9 @@ static int rtl8152_probe(struct usb_interface *intf, ...@@ -5321,6 +5552,9 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100;
tp->duplex = DUPLEX_FULL; tp->duplex = DUPLEX_FULL;
tp->rx_copybreak = RTL8152_RXFG_HEADSZ;
tp->rx_pending = 10 * RTL8152_MAX_RX;
intf->needs_remote_wakeup = 1; intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp); tp->rtl_ops.init(tp);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册