提交 5ed18368 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  net: Fix percpu counters deadlock
  cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits: net
  drivers/net/usb: use USB API functions rather than constants
  cls_cgroup: clean up Kconfig
  cls_cgroup: clean up for cgroup part
  cls_cgroup: fix an oops when removing a cgroup
  EtherExpress16: fix printing timed out status
  mlx4_en: Added "set_ringparam" Ethtool interface implementation
  mlx4_en: Always allocate RX ring for each interrupt vector
  mlx4_en: Verify number of RX rings doesn't exceed MAX_RX_RINGS
  IPVS: Make "no destination available" message more consistent between schedulers
  net: KS8695: removed duplicated #include
  tun: Fix SIOCSIFHWADDR error.
  smsc911x: compile fix re netif_rx signature changes
  netns: foreach_netdev_safe is insufficient in default_device_exit
  net: make xfrm_statistics_seq_show use generic snmp_fold_field
  net: Fix more NAPI interface netdev argument drop fallout.
  net: Fix unused variable warnings in pasemi_mac.c and spider_net.c
......@@ -307,7 +307,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
}
spin_unlock_irq(&ep->rx_lock);
if (more && netif_rx_reschedule(dev, napi))
if (more && netif_rx_reschedule(napi))
goto poll_some_more;
}
......
......@@ -504,7 +504,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
netif_rx_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_empty(rxq) &&
netif_rx_reschedule(dev, napi)) {
netif_rx_reschedule(napi)) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: eth_poll"
" netif_rx_reschedule successed\n",
......
......@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <asm/irq.h>
......
......@@ -68,17 +68,17 @@
*/
/* these functions take the SCB status word and test the relevant status bit */
#define SCB_complete(s) ((s&0x8000)!=0)
#define SCB_rxdframe(s) ((s&0x4000)!=0)
#define SCB_CUdead(s) ((s&0x2000)!=0)
#define SCB_RUdead(s) ((s&0x1000)!=0)
#define SCB_ack(s) (s & 0xf000)
#define SCB_complete(s) (((s) & 0x8000) != 0)
#define SCB_rxdframe(s) (((s) & 0x4000) != 0)
#define SCB_CUdead(s) (((s) & 0x2000) != 0)
#define SCB_RUdead(s) (((s) & 0x1000) != 0)
#define SCB_ack(s) ((s) & 0xf000)
/* Command unit status: 0=idle, 1=suspended, 2=active */
#define SCB_CUstat(s) ((s&0x0300)>>8)
#define SCB_CUstat(s) (((s)&0x0300)>>8)
/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
#define SCB_RUstat(s) ((s&0x0070)>>4)
#define SCB_RUstat(s) (((s)&0x0070)>>4)
/* SCB commands */
#define SCB_CUnop 0x0000
......@@ -98,18 +98,18 @@
* Command block defines
*/
#define Stat_Done(s) ((s&0x8000)!=0)
#define Stat_Busy(s) ((s&0x4000)!=0)
#define Stat_OK(s) ((s&0x2000)!=0)
#define Stat_Abort(s) ((s&0x1000)!=0)
#define Stat_STFail ((s&0x0800)!=0)
#define Stat_TNoCar(s) ((s&0x0400)!=0)
#define Stat_TNoCTS(s) ((s&0x0200)!=0)
#define Stat_TNoDMA(s) ((s&0x0100)!=0)
#define Stat_TDefer(s) ((s&0x0080)!=0)
#define Stat_TColl(s) ((s&0x0040)!=0)
#define Stat_TXColl(s) ((s&0x0020)!=0)
#define Stat_NoColl(s) (s&0x000f)
#define Stat_Done(s) (((s) & 0x8000) != 0)
#define Stat_Busy(s) (((s) & 0x4000) != 0)
#define Stat_OK(s) (((s) & 0x2000) != 0)
#define Stat_Abort(s) (((s) & 0x1000) != 0)
#define Stat_STFail (((s) & 0x0800) != 0)
#define Stat_TNoCar(s) (((s) & 0x0400) != 0)
#define Stat_TNoCTS(s) (((s) & 0x0200) != 0)
#define Stat_TNoDMA(s) (((s) & 0x0100) != 0)
#define Stat_TDefer(s) (((s) & 0x0080) != 0)
#define Stat_TColl(s) (((s) & 0x0040) != 0)
#define Stat_TXColl(s) (((s) & 0x0020) != 0)
#define Stat_NoColl(s) ((s) & 0x000f)
/* Cmd_END will end AFTER the command if this is the first
* command block after an SCB_CUstart, but BEFORE the command
......@@ -136,16 +136,16 @@
* Frame Descriptor (Receive block) defines
*/
#define FD_Done(s) ((s&0x8000)!=0)
#define FD_Busy(s) ((s&0x4000)!=0)
#define FD_OK(s) ((s&0x2000)!=0)
#define FD_Done(s) (((s) & 0x8000) != 0)
#define FD_Busy(s) (((s) & 0x4000) != 0)
#define FD_OK(s) (((s) & 0x2000) != 0)
#define FD_CRC(s) ((s&0x0800)!=0)
#define FD_Align(s) ((s&0x0400)!=0)
#define FD_Resrc(s) ((s&0x0200)!=0)
#define FD_DMA(s) ((s&0x0100)!=0)
#define FD_Short(s) ((s&0x0080)!=0)
#define FD_NoEOF(s) ((s&0x0040)!=0)
#define FD_CRC(s) (((s) & 0x0800) != 0)
#define FD_Align(s) (((s) & 0x0400) != 0)
#define FD_Resrc(s) (((s) & 0x0200) != 0)
#define FD_DMA(s) (((s) & 0x0100) != 0)
#define FD_Short(s) (((s) & 0x0080) != 0)
#define FD_NoEOF(s) (((s) & 0x0040) != 0)
struct rfd_header {
volatile unsigned long flags;
......
......@@ -169,13 +169,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Using %d tx rings for port:%d\n",
mdev->profile.prof[i].tx_ring_num, i);
if (!mdev->profile.prof[i].rx_ring_num) {
mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors;
mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
mdev->profile.prof[i].rx_ring_num, i);
} else
mlx4_info(mdev, "Using %d rx rings for port:%d\n",
mdev->profile.prof[i].rx_ring_num, i);
mdev->profile.prof[i].rx_ring_num =
min_t(int, dev->caps.num_comp_vectors, MAX_RX_RINGS);
mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
mdev->profile.prof[i].rx_ring_num, i);
}
/* Create our own workqueue for reset/multicast tasks
......
......@@ -552,7 +552,7 @@ static void mlx4_en_linkstate(struct work_struct *work)
}
static int mlx4_en_start_port(struct net_device *dev)
int mlx4_en_start_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
......@@ -707,7 +707,7 @@ static int mlx4_en_start_port(struct net_device *dev)
}
static void mlx4_en_stop_port(struct net_device *dev)
void mlx4_en_stop_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
......@@ -826,7 +826,7 @@ static int mlx4_en_close(struct net_device *dev)
return 0;
}
static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
......@@ -845,7 +845,7 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
}
}
static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile *prof = priv->prof;
......
......@@ -65,15 +65,6 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
" Per priority bit mask");
MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
......@@ -87,6 +78,8 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].rx_ppp = pfcrx;
params->prof[i].tx_pause = 1;
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
}
if (pfcrx || pfctx) {
params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
......@@ -95,32 +88,7 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[1].tx_ring_num = 1;
params->prof[2].tx_ring_num = 1;
}
params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[1].tx_ring_size =
(tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[2].tx_ring_size =
(tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[1].rx_ring_size =
(rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[2].rx_ring_size =
(rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
return 0;
}
......@@ -417,6 +385,54 @@ static void mlx4_en_get_pauseparam(struct net_device *dev,
pause->rx_pause = priv->prof->rx_pause;
}
static int mlx4_en_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
rx_size = roundup_pow_of_two(param->rx_pending);
rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
tx_size = roundup_pow_of_two(param->tx_pending);
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
if (rx_size == priv->prof->rx_ring_size &&
tx_size == priv->prof->tx_ring_size)
return 0;
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev);
}
mlx4_en_free_resources(priv);
priv->prof->tx_ring_size = tx_size;
priv->prof->rx_ring_size = rx_size;
err = mlx4_en_alloc_resources(priv);
if (err) {
mlx4_err(mdev, "Failed reallocating port resources\n");
goto out;
}
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
mlx4_err(mdev, "Failed starting port\n");
}
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static void mlx4_en_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
......@@ -456,6 +472,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_pauseparam = mlx4_en_get_pauseparam,
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
};
......
......@@ -489,6 +489,12 @@ void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof);
int mlx4_en_start_port(struct net_device *dev);
void mlx4_en_stop_port(struct net_device *dev);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
......
......@@ -954,7 +954,6 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
{
const struct pasemi_mac_rxring *rxring = data;
struct pasemi_mac *mac = rxring->mac;
struct net_device *dev = mac->netdev;
const struct pasemi_dmachan *chan = &rxring->chan;
unsigned int reg;
......@@ -1634,7 +1633,6 @@ static void pasemi_mac_set_rx_mode(struct net_device *dev)
static int pasemi_mac_poll(struct napi_struct *napi, int budget)
{
struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
struct net_device *dev = mac->netdev;
int pkts;
pasemi_mac_clean_tx(tx_ring(mac));
......
......@@ -1484,13 +1484,13 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
}
if (likely(intsts & inten & INT_STS_RSFL_)) {
if (likely(netif_rx_schedule_prep(dev, &pdata->napi))) {
if (likely(netif_rx_schedule_prep(&pdata->napi))) {
/* Disable Rx interrupts */
temp = smsc911x_reg_read(pdata, INT_EN);
temp &= (~INT_EN_RSFL_EN_);
smsc911x_reg_write(pdata, INT_EN, temp);
/* Schedule a NAPI poll */
__netif_rx_schedule(dev, &pdata->napi);
__netif_rx_schedule(&pdata->napi);
} else {
SMSC_WARNING(RX_ERR,
"netif_rx_schedule_prep failed");
......
......@@ -1277,7 +1277,6 @@ spider_net_decode_one_descr(struct spider_net_card *card)
static int spider_net_poll(struct napi_struct *napi, int budget)
{
struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
struct net_device *netdev = card->netdev;
int packets_done = 0;
while (packets_done < budget) {
......
......@@ -343,7 +343,7 @@ static void tun_net_init(struct net_device *dev)
break;
case TUN_TAP_DEV:
dev->netdev_ops = &tun_netdev_ops;
dev->netdev_ops = &tap_netdev_ops;
/* Ethernet TAP Device */
ether_setup(dev);
......
......@@ -2831,7 +2831,7 @@ static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
for (i = 0; i < iface->desc.bNumEndpoints; i++) {
endp = &iface->endpoint[i].desc;
if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) &&
((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == type))
(usb_endpoint_type(endp) == type))
return endp;
}
......
......@@ -654,7 +654,7 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
netif_rx_complete(dev, napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_empty(rxq) &&
netif_rx_reschedule(dev, napi)) {
netif_rx_reschedule(napi)) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
" netif_rx_reschedule succeeded\n",
......
......@@ -1065,8 +1065,7 @@ static int eject_installer(struct usb_interface *intf)
/* Find bulk out endpoint */
endpoint = &iface_desc->endpoint[1].desc;
if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT &&
(endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK) {
usb_endpoint_xfer_bulk(endpoint)) {
bulk_out_ep = endpoint->bEndpointAddress;
} else {
dev_err(&udev->dev,
......
......@@ -5066,13 +5066,14 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
static void __net_exit default_device_exit(struct net *net)
{
struct net_device *dev, *next;
struct net_device *dev;
/*
* Push all migratable of the network devices back to the
* initial network namespace
*/
rtnl_lock();
for_each_netdev_safe(net, dev, next) {
restart:
for_each_netdev(net, dev) {
int err;
char fb_name[IFNAMSIZ];
......@@ -5083,7 +5084,7 @@ static void __net_exit default_device_exit(struct net *net)
/* Delete virtual devices */
if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
dev->rtnl_link_ops->dellink(dev);
continue;
goto restart;
}
/* Push remaing network devices to init_net */
......@@ -5094,6 +5095,7 @@ static void __net_exit default_device_exit(struct net *net)
__func__, dev->name, err);
BUG();
}
goto restart;
}
rtnl_unlock();
}
......
......@@ -2414,7 +2414,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
......@@ -2429,7 +2429,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct neigh_table *tbl = pde->data;
int cpu;
for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
......
......@@ -964,7 +964,6 @@ void dccp_close(struct sock *sk, long timeout)
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
percpu_counter_inc(sk->sk_prot->orphan_count);
/*
* It is the last release_sock in its life. It will remove backlog.
......@@ -978,6 +977,8 @@ void dccp_close(struct sock *sk, long timeout)
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
percpu_counter_inc(sk->sk_prot->orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
goto out;
......
......@@ -633,8 +633,6 @@ void inet_csk_listen_stop(struct sock *sk)
acc_req = req->dl_next;
percpu_counter_inc(sk->sk_prot->orphan_count);
local_bh_disable();
bh_lock_sock(child);
WARN_ON(sock_owned_by_user(child));
......@@ -644,6 +642,8 @@ void inet_csk_listen_stop(struct sock *sk)
sock_orphan(child);
percpu_counter_inc(sk->sk_prot->orphan_count);
inet_csk_destroy_sock(child);
bh_unlock_sock(child);
......
......@@ -291,7 +291,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
......@@ -306,7 +306,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
......
......@@ -38,6 +38,7 @@
#include <net/tcp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <linux/bottom_half.h>
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
......@@ -50,13 +51,17 @@
static int sockstat_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
int orphans, sockets;
local_bh_disable();
orphans = percpu_counter_sum_positive(&tcp_orphan_count),
sockets = percpu_counter_sum_positive(&tcp_sockets_allocated),
local_bh_enable();
socket_seq_show(seq);
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
sock_prot_inuse_get(net, &tcp_prot),
(int)percpu_counter_sum_positive(&tcp_orphan_count),
tcp_death_row.tw_count,
(int)percpu_counter_sum_positive(&tcp_sockets_allocated),
sock_prot_inuse_get(net, &tcp_prot), orphans,
tcp_death_row.tw_count, sockets,
atomic_read(&tcp_memory_allocated));
seq_printf(seq, "UDP: inuse %d mem %d\n",
sock_prot_inuse_get(net, &udp_prot),
......
......@@ -429,7 +429,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
......@@ -442,7 +442,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int cpu;
for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
......
......@@ -1836,7 +1836,6 @@ void tcp_close(struct sock *sk, long timeout)
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
percpu_counter_inc(sk->sk_prot->orphan_count);
/* It is the last release_sock in its life. It will remove backlog. */
release_sock(sk);
......@@ -1849,6 +1848,8 @@ void tcp_close(struct sock *sk, long timeout)
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
percpu_counter_inc(sk->sk_prot->orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
goto out;
......
......@@ -51,6 +51,7 @@
*/
#include <linux/bottom_half.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
......@@ -1797,7 +1798,9 @@ static int tcp_v4_init_sock(struct sock *sk)
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
local_bh_disable();
percpu_counter_inc(&tcp_sockets_allocated);
local_bh_enable();
return 0;
}
......
......@@ -23,6 +23,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bottom_half.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
......@@ -1830,7 +1831,9 @@ static int tcp_v6_init_sock(struct sock *sk)
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
local_bh_disable();
percpu_counter_inc(&tcp_sockets_allocated);
local_bh_enable();
return 0;
}
......
......@@ -507,7 +507,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* No cache entry or it is invalid, time to schedule */
dest = __ip_vs_lblc_schedule(svc);
if (!dest) {
IP_VS_DBG(1, "no destination available\n");
IP_VS_ERR_RL("LBLC: no destination available\n");
return NULL;
}
......
......@@ -690,7 +690,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* The cache entry is invalid, time to schedule */
dest = __ip_vs_lblcr_schedule(svc);
if (!dest) {
IP_VS_DBG(1, "no destination available\n");
IP_VS_ERR_RL("LBLCR: no destination available\n");
read_unlock(&svc->sched_lock);
return NULL;
}
......
......@@ -66,11 +66,15 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
}
if (least)
IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n",
IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
atomic_read(&least->activeconns),
atomic_read(&least->inactconns));
if (!least)
IP_VS_ERR_RL("LC: no destination available\n");
else
IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
"inactconns %d\n",
IP_VS_DBG_ADDR(svc->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
atomic_read(&least->inactconns));
return least;
}
......
......@@ -95,8 +95,10 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
}
if (!least)
if (!least) {
IP_VS_ERR_RL("NQ: no destination available\n");
return NULL;
}
out:
IP_VS_DBG_BUF(6, "NQ: server %s:%u "
......
......@@ -69,6 +69,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
q = q->next;
} while (q != p);
write_unlock(&svc->sched_lock);
IP_VS_ERR_RL("RR: no destination available\n");
return NULL;
out:
......
......@@ -84,6 +84,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
goto nextstage;
}
}
IP_VS_ERR_RL("SED: no destination available\n");
return NULL;
/*
......
......@@ -219,6 +219,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|| !(dest->flags & IP_VS_DEST_F_AVAILABLE)
|| atomic_read(&dest->weight) <= 0
|| is_overloaded(dest)) {
IP_VS_ERR_RL("SH: no destination available\n");
return NULL;
}
......
......@@ -72,6 +72,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
goto nextstage;
}
}
IP_VS_ERR_RL("WLC: no destination available\n");
return NULL;
/*
......
......@@ -155,6 +155,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if (mark->cl == mark->cl->next) {
/* no dest entry */
IP_VS_ERR_RL("WRR: no destination available: "
"no destinations present\n");
dest = NULL;
goto out;
}
......@@ -168,8 +170,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
*/
if (mark->cw == 0) {
mark->cl = &svc->destinations;
IP_VS_ERR_RL("ip_vs_wrr_schedule(): "
"no available servers\n");
IP_VS_ERR_RL("WRR: no destination "
"available\n");
dest = NULL;
goto out;
}
......@@ -191,6 +193,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* back to the start, and no dest is found.
It is only possible when all dests are OVERLOADED */
dest = NULL;
IP_VS_ERR_RL("WRR: no destination available: "
"all destinations are overloaded\n");
goto out;
}
}
......
......@@ -200,7 +200,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
......@@ -215,7 +215,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
......
......@@ -335,9 +335,6 @@ config NET_CLS_CGROUP
Say Y here if you want to classify packets based on the control
cgroup of their process.
To compile this code as a module, choose M here: the
module will be called cls_cgroup.
config NET_EMATCH
bool "Extended Matches"
select NET_CLS
......
......@@ -24,10 +24,16 @@ struct cgroup_cls_state
u32 classid;
};
static inline struct cgroup_cls_state *net_cls_state(struct cgroup *cgrp)
static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
{
return (struct cgroup_cls_state *)
cgroup_subsys_state(cgrp, net_cls_subsys_id);
return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
struct cgroup_cls_state, css);
}
static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
{
return container_of(task_subsys_state(p, net_cls_subsys_id),
struct cgroup_cls_state, css);
}
static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
......@@ -39,19 +45,19 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
return ERR_PTR(-ENOMEM);
if (cgrp->parent)
cs->classid = net_cls_state(cgrp->parent)->classid;
cs->classid = cgrp_cls_state(cgrp->parent)->classid;
return &cs->css;
}
static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
kfree(ss);
kfree(cgrp_cls_state(cgrp));
}
static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
{
return net_cls_state(cgrp)->classid;
return cgrp_cls_state(cgrp)->classid;
}
static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
......@@ -59,7 +65,7 @@ static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
net_cls_state(cgrp)->classid = (u32) value;
cgrp_cls_state(cgrp)->classid = (u32) value;
cgroup_unlock();
......@@ -115,8 +121,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
return -1;
rcu_read_lock();
cs = (struct cgroup_cls_state *) task_subsys_state(current,
net_cls_subsys_id);
cs = task_cls_state(current);
if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) {
res->classid = cs->classid;
res->class = 0;
......
......@@ -44,27 +44,14 @@ static struct snmp_mib xfrm_mib_list[] = {
SNMP_MIB_SENTINEL
};
static unsigned long
fold_field(void *mib[], int offt)
{
unsigned long res = 0;
int i;
for_each_possible_cpu(i) {
res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
}
return res;
}
static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
int i;
for (i=0; xfrm_mib_list[i].name; i++)
seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
fold_field((void **)net->mib.xfrm_statistics,
xfrm_mib_list[i].entry));
snmp_fold_field((void **)net->mib.xfrm_statistics,
xfrm_mib_list[i].entry));
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册