提交 7fd10678 编写于 作者: S Simon Horman

Merge branch 'master' of...

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/lvs-2.6 into lvs-next-2.6
...@@ -242,4 +242,164 @@ struct ip_vs_daemon_user { ...@@ -242,4 +242,164 @@ struct ip_vs_daemon_user {
int syncid; int syncid;
}; };
/*
*
* IPVS Generic Netlink interface definitions
*
*/
/* Generic Netlink family info */
#define IPVS_GENL_NAME "IPVS"
#define IPVS_GENL_VERSION 0x1
struct ip_vs_flags {
__be32 flags;
__be32 mask;
};
/* Generic Netlink command attributes */
enum {
IPVS_CMD_UNSPEC = 0,
IPVS_CMD_NEW_SERVICE, /* add service */
IPVS_CMD_SET_SERVICE, /* modify service */
IPVS_CMD_DEL_SERVICE, /* delete service */
IPVS_CMD_GET_SERVICE, /* get service info */
IPVS_CMD_NEW_DEST, /* add destination */
IPVS_CMD_SET_DEST, /* modify destination */
IPVS_CMD_DEL_DEST, /* delete destination */
IPVS_CMD_GET_DEST, /* get destination info */
IPVS_CMD_NEW_DAEMON, /* start sync daemon */
IPVS_CMD_DEL_DAEMON, /* stop sync daemon */
IPVS_CMD_GET_DAEMON, /* get sync daemon status */
IPVS_CMD_SET_CONFIG, /* set config settings */
IPVS_CMD_GET_CONFIG, /* get config settings */
IPVS_CMD_SET_INFO, /* only used in GET_INFO reply */
IPVS_CMD_GET_INFO, /* get general IPVS info */
IPVS_CMD_ZERO, /* zero all counters and stats */
IPVS_CMD_FLUSH, /* flush services and dests */
__IPVS_CMD_MAX,
};
#define IPVS_CMD_MAX (__IPVS_CMD_MAX - 1)
/* Attributes used in the first level of commands */
enum {
IPVS_CMD_ATTR_UNSPEC = 0,
IPVS_CMD_ATTR_SERVICE, /* nested service attribute */
IPVS_CMD_ATTR_DEST, /* nested destination attribute */
IPVS_CMD_ATTR_DAEMON, /* nested sync daemon attribute */
IPVS_CMD_ATTR_TIMEOUT_TCP, /* TCP connection timeout */
IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, /* TCP FIN wait timeout */
IPVS_CMD_ATTR_TIMEOUT_UDP, /* UDP timeout */
__IPVS_CMD_ATTR_MAX,
};
#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
/*
* Attributes used to describe a service
*
* Used inside nested attribute IPVS_CMD_ATTR_SERVICE
*/
enum {
IPVS_SVC_ATTR_UNSPEC = 0,
IPVS_SVC_ATTR_AF, /* address family */
IPVS_SVC_ATTR_PROTOCOL, /* virtual service protocol */
IPVS_SVC_ATTR_ADDR, /* virtual service address */
IPVS_SVC_ATTR_PORT, /* virtual service port */
IPVS_SVC_ATTR_FWMARK, /* firewall mark of service */
IPVS_SVC_ATTR_SCHED_NAME, /* name of scheduler */
IPVS_SVC_ATTR_FLAGS, /* virtual service flags */
IPVS_SVC_ATTR_TIMEOUT, /* persistent timeout */
IPVS_SVC_ATTR_NETMASK, /* persistent netmask */
IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */
__IPVS_SVC_ATTR_MAX,
};
#define IPVS_SVC_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
/*
* Attributes used to describe a destination (real server)
*
* Used inside nested attribute IPVS_CMD_ATTR_DEST
*/
enum {
IPVS_DEST_ATTR_UNSPEC = 0,
IPVS_DEST_ATTR_ADDR, /* real server address */
IPVS_DEST_ATTR_PORT, /* real server port */
IPVS_DEST_ATTR_FWD_METHOD, /* forwarding method */
IPVS_DEST_ATTR_WEIGHT, /* destination weight */
IPVS_DEST_ATTR_U_THRESH, /* upper threshold */
IPVS_DEST_ATTR_L_THRESH, /* lower threshold */
IPVS_DEST_ATTR_ACTIVE_CONNS, /* active connections */
IPVS_DEST_ATTR_INACT_CONNS, /* inactive connections */
IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */
IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */
__IPVS_DEST_ATTR_MAX,
};
#define IPVS_DEST_ATTR_MAX (__IPVS_DEST_ATTR_MAX - 1)
/*
* Attributes describing a sync daemon
*
* Used inside nested attribute IPVS_CMD_ATTR_DAEMON
*/
enum {
IPVS_DAEMON_ATTR_UNSPEC = 0,
IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */
IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */
IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */
__IPVS_DAEMON_ATTR_MAX,
};
#define IPVS_DAEMON_ATTR_MAX (__IPVS_DAEMON_ATTR_MAX - 1)
/*
* Attributes used to describe service or destination entry statistics
*
* Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS
*/
enum {
IPVS_STATS_ATTR_UNSPEC = 0,
IPVS_STATS_ATTR_CONNS, /* connections scheduled */
IPVS_STATS_ATTR_INPKTS, /* incoming packets */
IPVS_STATS_ATTR_OUTPKTS, /* outgoing packets */
IPVS_STATS_ATTR_INBYTES, /* incoming bytes */
IPVS_STATS_ATTR_OUTBYTES, /* outgoing bytes */
IPVS_STATS_ATTR_CPS, /* current connection rate */
IPVS_STATS_ATTR_INPPS, /* current in packet rate */
IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */
IPVS_STATS_ATTR_INBPS, /* current in byte rate */
IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */
__IPVS_STATS_ATTR_MAX,
};
#define IPVS_STATS_ATTR_MAX (__IPVS_STATS_ATTR_MAX - 1)
/* Attributes used in response to IPVS_CMD_GET_INFO command */
enum {
IPVS_INFO_ATTR_UNSPEC = 0,
IPVS_INFO_ATTR_VERSION, /* IPVS version number */
IPVS_INFO_ATTR_CONN_TAB_SIZE, /* size of connection hash table */
__IPVS_INFO_ATTR_MAX,
};
#define IPVS_INFO_ATTR_MAX (__IPVS_INFO_ATTR_MAX - 1)
#endif /* _IP_VS_H */ #endif /* _IP_VS_H */
...@@ -683,6 +683,8 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp); ...@@ -683,6 +683,8 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
/* /*
* IPVS rate estimator prototypes (from ip_vs_est.c) * IPVS rate estimator prototypes (from ip_vs_est.c)
*/ */
extern int ip_vs_estimator_init(void);
extern void ip_vs_estimator_cleanup(void);
extern void ip_vs_new_estimator(struct ip_vs_stats *stats); extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
......
...@@ -71,14 +71,20 @@ config IP_VS_PROTO_UDP ...@@ -71,14 +71,20 @@ config IP_VS_PROTO_UDP
This option enables support for load balancing UDP transport This option enables support for load balancing UDP transport
protocol. Say Y if unsure. protocol. Say Y if unsure.
config IP_VS_PROTO_AH_ESP
bool
depends on UNDEFINED
config IP_VS_PROTO_ESP config IP_VS_PROTO_ESP
bool "ESP load balancing support" bool "ESP load balancing support"
select IP_VS_PROTO_AH_ESP
---help--- ---help---
This option enables support for load balancing ESP (Encapsulation This option enables support for load balancing ESP (Encapsulation
Security Payload) transport protocol. Say Y if unsure. Security Payload) transport protocol. Say Y if unsure.
config IP_VS_PROTO_AH config IP_VS_PROTO_AH
bool "AH load balancing support" bool "AH load balancing support"
select IP_VS_PROTO_AH_ESP
---help--- ---help---
This option enables support for load balancing AH (Authentication This option enables support for load balancing AH (Authentication
Header) transport protocol. Say Y if unsure. Header) transport protocol. Say Y if unsure.
......
...@@ -6,8 +6,7 @@ ...@@ -6,8 +6,7 @@
ip_vs_proto-objs-y := ip_vs_proto-objs-y :=
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_ESP) += ip_vs_proto_esp.o ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o
ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
......
...@@ -1070,10 +1070,12 @@ static int __init ip_vs_init(void) ...@@ -1070,10 +1070,12 @@ static int __init ip_vs_init(void)
{ {
int ret; int ret;
ip_vs_estimator_init();
ret = ip_vs_control_init(); ret = ip_vs_control_init();
if (ret < 0) { if (ret < 0) {
IP_VS_ERR("can't setup control.\n"); IP_VS_ERR("can't setup control.\n");
goto cleanup_nothing; goto cleanup_estimator;
} }
ip_vs_protocol_init(); ip_vs_protocol_init();
...@@ -1106,7 +1108,8 @@ static int __init ip_vs_init(void) ...@@ -1106,7 +1108,8 @@ static int __init ip_vs_init(void)
cleanup_protocol: cleanup_protocol:
ip_vs_protocol_cleanup(); ip_vs_protocol_cleanup();
ip_vs_control_cleanup(); ip_vs_control_cleanup();
cleanup_nothing: cleanup_estimator:
ip_vs_estimator_cleanup();
return ret; return ret;
} }
...@@ -1117,6 +1120,7 @@ static void __exit ip_vs_cleanup(void) ...@@ -1117,6 +1120,7 @@ static void __exit ip_vs_cleanup(void)
ip_vs_app_cleanup(); ip_vs_app_cleanup();
ip_vs_protocol_cleanup(); ip_vs_protocol_cleanup();
ip_vs_control_cleanup(); ip_vs_control_cleanup();
ip_vs_estimator_cleanup();
IP_VS_INFO("ipvs unloaded.\n"); IP_VS_INFO("ipvs unloaded.\n");
} }
......
此差异已折叠。
...@@ -124,8 +124,6 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats) ...@@ -124,8 +124,6 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats)
est->outbps = stats->outbps<<5; est->outbps = stats->outbps<<5;
spin_lock_bh(&est_lock); spin_lock_bh(&est_lock);
if (list_empty(&est_list))
mod_timer(&est_timer, jiffies + 2 * HZ);
list_add(&est->list, &est_list); list_add(&est->list, &est_list);
spin_unlock_bh(&est_lock); spin_unlock_bh(&est_lock);
} }
...@@ -136,11 +134,6 @@ void ip_vs_kill_estimator(struct ip_vs_stats *stats) ...@@ -136,11 +134,6 @@ void ip_vs_kill_estimator(struct ip_vs_stats *stats)
spin_lock_bh(&est_lock); spin_lock_bh(&est_lock);
list_del(&est->list); list_del(&est->list);
while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
spin_unlock_bh(&est_lock);
cpu_relax();
spin_lock_bh(&est_lock);
}
spin_unlock_bh(&est_lock); spin_unlock_bh(&est_lock);
} }
...@@ -160,3 +153,14 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats) ...@@ -160,3 +153,14 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
est->inbps = 0; est->inbps = 0;
est->outbps = 0; est->outbps = 0;
} }
int __init ip_vs_estimator_init(void)
{
mod_timer(&est_timer, jiffies + 2 * HZ);
return 0;
}
void ip_vs_estimator_cleanup(void)
{
del_timer_sync(&est_timer);
}
...@@ -96,7 +96,6 @@ struct ip_vs_lblc_entry { ...@@ -96,7 +96,6 @@ struct ip_vs_lblc_entry {
* IPVS lblc hash table * IPVS lblc hash table
*/ */
struct ip_vs_lblc_table { struct ip_vs_lblc_table {
rwlock_t lock; /* lock for this table */
struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
atomic_t entries; /* number of entries */ atomic_t entries; /* number of entries */
int max_size; /* maximum size of entries */ int max_size; /* maximum size of entries */
...@@ -123,31 +122,6 @@ static ctl_table vs_vars_table[] = { ...@@ -123,31 +122,6 @@ static ctl_table vs_vars_table[] = {
static struct ctl_table_header * sysctl_header; static struct ctl_table_header * sysctl_header;
/*
* new/free a ip_vs_lblc_entry, which is a mapping of a destionation
* IP address to a server.
*/
static inline struct ip_vs_lblc_entry *
ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest)
{
struct ip_vs_lblc_entry *en;
en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC);
if (en == NULL) {
IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
return NULL;
}
INIT_LIST_HEAD(&en->list);
en->addr = daddr;
atomic_inc(&dest->refcnt);
en->dest = dest;
return en;
}
static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
{ {
list_del(&en->list); list_del(&en->list);
...@@ -173,55 +147,66 @@ static inline unsigned ip_vs_lblc_hashkey(__be32 addr) ...@@ -173,55 +147,66 @@ static inline unsigned ip_vs_lblc_hashkey(__be32 addr)
* Hash an entry in the ip_vs_lblc_table. * Hash an entry in the ip_vs_lblc_table.
* returns bool success. * returns bool success.
*/ */
static int static void
ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
{ {
unsigned hash; unsigned hash = ip_vs_lblc_hashkey(en->addr);
if (!list_empty(&en->list)) {
IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, "
"called from %p\n", __builtin_return_address(0));
return 0;
}
/*
* Hash by destination IP address
*/
hash = ip_vs_lblc_hashkey(en->addr);
write_lock(&tbl->lock);
list_add(&en->list, &tbl->bucket[hash]); list_add(&en->list, &tbl->bucket[hash]);
atomic_inc(&tbl->entries); atomic_inc(&tbl->entries);
write_unlock(&tbl->lock);
return 1;
} }
/* /*
* Get ip_vs_lblc_entry associated with supplied parameters. * Get ip_vs_lblc_entry associated with supplied parameters. Called under read
* lock
*/ */
static inline struct ip_vs_lblc_entry * static inline struct ip_vs_lblc_entry *
ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
{ {
unsigned hash; unsigned hash = ip_vs_lblc_hashkey(addr);
struct ip_vs_lblc_entry *en; struct ip_vs_lblc_entry *en;
hash = ip_vs_lblc_hashkey(addr); list_for_each_entry(en, &tbl->bucket[hash], list)
if (en->addr == addr)
return en;
read_lock(&tbl->lock); return NULL;
}
list_for_each_entry(en, &tbl->bucket[hash], list) {
if (en->addr == addr) { /*
/* HIT */ * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
read_unlock(&tbl->lock); * address to a server. Called under write lock.
return en; */
static inline struct ip_vs_lblc_entry *
ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, __be32 daddr,
struct ip_vs_dest *dest)
{
struct ip_vs_lblc_entry *en;
en = ip_vs_lblc_get(tbl, daddr);
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en) {
IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
return NULL;
} }
}
read_unlock(&tbl->lock); en->addr = daddr;
en->lastuse = jiffies;
return NULL; atomic_inc(&dest->refcnt);
en->dest = dest;
ip_vs_lblc_hash(tbl, en);
} else if (en->dest != dest) {
atomic_dec(&en->dest->refcnt);
atomic_inc(&dest->refcnt);
en->dest = dest;
}
return en;
} }
...@@ -230,30 +215,29 @@ ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) ...@@ -230,30 +215,29 @@ ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
*/ */
static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
{ {
int i;
struct ip_vs_lblc_entry *en, *nxt; struct ip_vs_lblc_entry *en, *nxt;
int i;
for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
write_lock(&tbl->lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
ip_vs_lblc_free(en); ip_vs_lblc_free(en);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
write_unlock(&tbl->lock);
} }
} }
static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
{ {
struct ip_vs_lblc_table *tbl = svc->sched_data;
struct ip_vs_lblc_entry *en, *nxt;
unsigned long now = jiffies; unsigned long now = jiffies;
int i, j; int i, j;
struct ip_vs_lblc_entry *en, *nxt;
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK; j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&tbl->lock); write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now, if (time_before(now,
en->lastuse + sysctl_ip_vs_lblc_expiration)) en->lastuse + sysctl_ip_vs_lblc_expiration))
...@@ -262,7 +246,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) ...@@ -262,7 +246,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
ip_vs_lblc_free(en); ip_vs_lblc_free(en);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
write_unlock(&tbl->lock); write_unlock(&svc->sched_lock);
} }
tbl->rover = j; tbl->rover = j;
} }
...@@ -281,17 +265,16 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) ...@@ -281,17 +265,16 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
*/ */
static void ip_vs_lblc_check_expire(unsigned long data) static void ip_vs_lblc_check_expire(unsigned long data)
{ {
struct ip_vs_lblc_table *tbl; struct ip_vs_service *svc = (struct ip_vs_service *) data;
struct ip_vs_lblc_table *tbl = svc->sched_data;
unsigned long now = jiffies; unsigned long now = jiffies;
int goal; int goal;
int i, j; int i, j;
struct ip_vs_lblc_entry *en, *nxt; struct ip_vs_lblc_entry *en, *nxt;
tbl = (struct ip_vs_lblc_table *)data;
if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
/* do full expiration check */ /* do full expiration check */
ip_vs_lblc_full_check(tbl); ip_vs_lblc_full_check(svc);
tbl->counter = 1; tbl->counter = 1;
goto out; goto out;
} }
...@@ -308,7 +291,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) ...@@ -308,7 +291,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK; j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&tbl->lock); write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
continue; continue;
...@@ -317,7 +300,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) ...@@ -317,7 +300,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
goal--; goal--;
} }
write_unlock(&tbl->lock); write_unlock(&svc->sched_lock);
if (goal <= 0) if (goal <= 0)
break; break;
} }
...@@ -336,15 +319,14 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) ...@@ -336,15 +319,14 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
/* /*
* Allocate the ip_vs_lblc_table for this service * Allocate the ip_vs_lblc_table for this service
*/ */
tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC); tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
if (tbl == NULL) { if (tbl == NULL) {
IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
return -ENOMEM; return -ENOMEM;
} }
svc->sched_data = tbl; svc->sched_data = tbl;
IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
"current service\n", "current service\n", sizeof(*tbl));
sizeof(struct ip_vs_lblc_table));
/* /*
* Initialize the hash buckets * Initialize the hash buckets
...@@ -352,7 +334,6 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) ...@@ -352,7 +334,6 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
INIT_LIST_HEAD(&tbl->bucket[i]); INIT_LIST_HEAD(&tbl->bucket[i]);
} }
rwlock_init(&tbl->lock);
tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
tbl->rover = 0; tbl->rover = 0;
tbl->counter = 1; tbl->counter = 1;
...@@ -361,9 +342,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) ...@@ -361,9 +342,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
* Hook periodic timer for garbage collection * Hook periodic timer for garbage collection
*/ */
setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
(unsigned long)tbl); (unsigned long)svc);
tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
add_timer(&tbl->periodic_timer);
return 0; return 0;
} }
...@@ -380,22 +360,16 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc) ...@@ -380,22 +360,16 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
ip_vs_lblc_flush(tbl); ip_vs_lblc_flush(tbl);
/* release the table itself */ /* release the table itself */
kfree(svc->sched_data); kfree(tbl);
IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
sizeof(struct ip_vs_lblc_table)); sizeof(*tbl));
return 0; return 0;
} }
static int ip_vs_lblc_update_svc(struct ip_vs_service *svc)
{
return 0;
}
static inline struct ip_vs_dest * static inline struct ip_vs_dest *
__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) __ip_vs_lblc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
{ {
struct ip_vs_dest *dest, *least; struct ip_vs_dest *dest, *least;
int loh, doh; int loh, doh;
...@@ -484,46 +458,54 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) ...@@ -484,46 +458,54 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
static struct ip_vs_dest * static struct ip_vs_dest *
ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
{ {
struct ip_vs_dest *dest; struct ip_vs_lblc_table *tbl = svc->sched_data;
struct ip_vs_lblc_table *tbl;
struct ip_vs_lblc_entry *en;
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
struct ip_vs_dest *dest = NULL;
struct ip_vs_lblc_entry *en;
IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
tbl = (struct ip_vs_lblc_table *)svc->sched_data; /* First look in our cache */
read_lock(&svc->sched_lock);
en = ip_vs_lblc_get(tbl, iph->daddr); en = ip_vs_lblc_get(tbl, iph->daddr);
if (en == NULL) { if (en) {
dest = __ip_vs_wlc_schedule(svc, iph); /* We only hold a read lock, but this is atomic */
if (dest == NULL) { en->lastuse = jiffies;
IP_VS_DBG(1, "no destination available\n");
return NULL; /*
} * If the destination is not available, i.e. it's in the trash,
en = ip_vs_lblc_new(iph->daddr, dest); * we must ignore it, as it may be removed from under our feet,
if (en == NULL) { * if someone drops our reference count. Our caller only makes
return NULL; * sure that destinations, that are not in the trash, are not
} * moved to the trash, while we are scheduling. But anyone can
ip_vs_lblc_hash(tbl, en); * free up entries from the trash at any time.
} else { */
dest = en->dest;
if (!(dest->flags & IP_VS_DEST_F_AVAILABLE) if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
|| atomic_read(&dest->weight) <= 0 dest = en->dest;
|| is_overloaded(dest, svc)) { }
dest = __ip_vs_wlc_schedule(svc, iph); read_unlock(&svc->sched_lock);
if (dest == NULL) {
IP_VS_DBG(1, "no destination available\n"); /* If the destination has a weight and is not overloaded, use it */
return NULL; if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
} goto out;
atomic_dec(&en->dest->refcnt);
atomic_inc(&dest->refcnt); /* No cache entry or it is invalid, time to schedule */
en->dest = dest; dest = __ip_vs_lblc_schedule(svc, iph);
} if (!dest) {
IP_VS_DBG(1, "no destination available\n");
return NULL;
} }
en->lastuse = jiffies;
/* If we fail to create a cache entry, we'll just use the valid dest */
write_lock(&svc->sched_lock);
ip_vs_lblc_new(tbl, iph->daddr, dest);
write_unlock(&svc->sched_lock);
out:
IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u " IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u "
"--> server %u.%u.%u.%u:%d\n", "--> server %u.%u.%u.%u:%d\n",
NIPQUAD(en->addr), NIPQUAD(iph->daddr),
NIPQUAD(dest->addr), NIPQUAD(dest->addr),
ntohs(dest->port)); ntohs(dest->port));
...@@ -542,7 +524,6 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler = ...@@ -542,7 +524,6 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
.n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
.init_service = ip_vs_lblc_init_svc, .init_service = ip_vs_lblc_init_svc,
.done_service = ip_vs_lblc_done_svc, .done_service = ip_vs_lblc_done_svc,
.update_service = ip_vs_lblc_update_svc,
.schedule = ip_vs_lblc_schedule, .schedule = ip_vs_lblc_schedule,
}; };
......
...@@ -106,7 +106,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) ...@@ -106,7 +106,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
return NULL; return NULL;
} }
e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC); e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (e == NULL) { if (e == NULL) {
IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n"); IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
return NULL; return NULL;
...@@ -116,11 +116,9 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) ...@@ -116,11 +116,9 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
e->dest = dest; e->dest = dest;
/* link it to the list */ /* link it to the list */
write_lock(&set->lock);
e->next = set->list; e->next = set->list;
set->list = e; set->list = e;
atomic_inc(&set->size); atomic_inc(&set->size);
write_unlock(&set->lock);
set->lastmod = jiffies; set->lastmod = jiffies;
return e; return e;
...@@ -131,7 +129,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) ...@@ -131,7 +129,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
{ {
struct ip_vs_dest_list *e, **ep; struct ip_vs_dest_list *e, **ep;
write_lock(&set->lock);
for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
if (e->dest == dest) { if (e->dest == dest) {
/* HIT */ /* HIT */
...@@ -144,7 +141,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) ...@@ -144,7 +141,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
} }
ep = &e->next; ep = &e->next;
} }
write_unlock(&set->lock);
} }
static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
...@@ -174,7 +170,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) ...@@ -174,7 +170,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
if (set == NULL) if (set == NULL)
return NULL; return NULL;
read_lock(&set->lock);
/* select the first destination server, whose weight > 0 */ /* select the first destination server, whose weight > 0 */
for (e=set->list; e!=NULL; e=e->next) { for (e=set->list; e!=NULL; e=e->next) {
least = e->dest; least = e->dest;
...@@ -188,7 +183,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) ...@@ -188,7 +183,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
goto nextstage; goto nextstage;
} }
} }
read_unlock(&set->lock);
return NULL; return NULL;
/* find the destination with the weighted least load */ /* find the destination with the weighted least load */
...@@ -207,7 +201,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) ...@@ -207,7 +201,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
loh = doh; loh = doh;
} }
} }
read_unlock(&set->lock);
IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d " IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
"activeconns %d refcnt %d weight %d overhead %d\n", "activeconns %d refcnt %d weight %d overhead %d\n",
...@@ -229,7 +222,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) ...@@ -229,7 +222,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
if (set == NULL) if (set == NULL)
return NULL; return NULL;
read_lock(&set->lock);
/* select the first destination server, whose weight > 0 */ /* select the first destination server, whose weight > 0 */
for (e=set->list; e!=NULL; e=e->next) { for (e=set->list; e!=NULL; e=e->next) {
most = e->dest; most = e->dest;
...@@ -239,7 +231,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) ...@@ -239,7 +231,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
goto nextstage; goto nextstage;
} }
} }
read_unlock(&set->lock);
return NULL; return NULL;
/* find the destination with the weighted most load */ /* find the destination with the weighted most load */
...@@ -256,7 +247,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) ...@@ -256,7 +247,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
moh = doh; moh = doh;
} }
} }
read_unlock(&set->lock);
IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d " IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
"activeconns %d refcnt %d weight %d overhead %d\n", "activeconns %d refcnt %d weight %d overhead %d\n",
...@@ -284,7 +274,6 @@ struct ip_vs_lblcr_entry { ...@@ -284,7 +274,6 @@ struct ip_vs_lblcr_entry {
* IPVS lblcr hash table * IPVS lblcr hash table
*/ */
struct ip_vs_lblcr_table { struct ip_vs_lblcr_table {
rwlock_t lock; /* lock for this table */
struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
atomic_t entries; /* number of entries */ atomic_t entries; /* number of entries */
int max_size; /* maximum size of entries */ int max_size; /* maximum size of entries */
...@@ -311,32 +300,6 @@ static ctl_table vs_vars_table[] = { ...@@ -311,32 +300,6 @@ static ctl_table vs_vars_table[] = {
static struct ctl_table_header * sysctl_header; static struct ctl_table_header * sysctl_header;
/*
* new/free a ip_vs_lblcr_entry, which is a mapping of a destination
* IP address to a server.
*/
static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr)
{
struct ip_vs_lblcr_entry *en;
en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC);
if (en == NULL) {
IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
return NULL;
}
INIT_LIST_HEAD(&en->list);
en->addr = daddr;
/* initilize its dest set */
atomic_set(&(en->set.size), 0);
en->set.list = NULL;
rwlock_init(&en->set.lock);
return en;
}
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
{ {
list_del(&en->list); list_del(&en->list);
...@@ -358,55 +321,68 @@ static inline unsigned ip_vs_lblcr_hashkey(__be32 addr) ...@@ -358,55 +321,68 @@ static inline unsigned ip_vs_lblcr_hashkey(__be32 addr)
* Hash an entry in the ip_vs_lblcr_table. * Hash an entry in the ip_vs_lblcr_table.
* returns bool success. * returns bool success.
*/ */
static int static void
ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
{ {
unsigned hash; unsigned hash = ip_vs_lblcr_hashkey(en->addr);
if (!list_empty(&en->list)) {
IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
"called from %p\n", __builtin_return_address(0));
return 0;
}
/*
* Hash by destination IP address
*/
hash = ip_vs_lblcr_hashkey(en->addr);
write_lock(&tbl->lock);
list_add(&en->list, &tbl->bucket[hash]); list_add(&en->list, &tbl->bucket[hash]);
atomic_inc(&tbl->entries); atomic_inc(&tbl->entries);
write_unlock(&tbl->lock);
return 1;
} }
/* /*
* Get ip_vs_lblcr_entry associated with supplied parameters. * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
* read lock.
*/ */
static inline struct ip_vs_lblcr_entry * static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr) ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr)
{ {
unsigned hash; unsigned hash = ip_vs_lblcr_hashkey(addr);
struct ip_vs_lblcr_entry *en; struct ip_vs_lblcr_entry *en;
hash = ip_vs_lblcr_hashkey(addr); list_for_each_entry(en, &tbl->bucket[hash], list)
if (en->addr == addr)
return en;
read_lock(&tbl->lock); return NULL;
}
list_for_each_entry(en, &tbl->bucket[hash], list) {
if (en->addr == addr) { /*
/* HIT */ * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
read_unlock(&tbl->lock); * IP address to a server. Called under write lock.
return en; */
static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, __be32 daddr,
struct ip_vs_dest *dest)
{
struct ip_vs_lblcr_entry *en;
en = ip_vs_lblcr_get(tbl, daddr);
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en) {
IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
return NULL;
} }
en->addr = daddr;
en->lastuse = jiffies;
/* initilize its dest set */
atomic_set(&(en->set.size), 0);
en->set.list = NULL;
rwlock_init(&en->set.lock);
ip_vs_lblcr_hash(tbl, en);
} }
read_unlock(&tbl->lock); write_lock(&en->set.lock);
ip_vs_dest_set_insert(&en->set, dest);
write_unlock(&en->set.lock);
return NULL; return en;
} }
...@@ -418,19 +394,18 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl) ...@@ -418,19 +394,18 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
int i; int i;
struct ip_vs_lblcr_entry *en, *nxt; struct ip_vs_lblcr_entry *en, *nxt;
/* No locking required, only called during cleanup. */
for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
write_lock(&tbl->lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
ip_vs_lblcr_free(en); ip_vs_lblcr_free(en);
atomic_dec(&tbl->entries);
} }
write_unlock(&tbl->lock);
} }
} }
static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
{ {
struct ip_vs_lblcr_table *tbl = svc->sched_data;
unsigned long now = jiffies; unsigned long now = jiffies;
int i, j; int i, j;
struct ip_vs_lblcr_entry *en, *nxt; struct ip_vs_lblcr_entry *en, *nxt;
...@@ -438,7 +413,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) ...@@ -438,7 +413,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK; j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&tbl->lock); write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
now)) now))
...@@ -447,7 +422,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) ...@@ -447,7 +422,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
ip_vs_lblcr_free(en); ip_vs_lblcr_free(en);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
write_unlock(&tbl->lock); write_unlock(&svc->sched_lock);
} }
tbl->rover = j; tbl->rover = j;
} }
...@@ -466,17 +441,16 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) ...@@ -466,17 +441,16 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
*/ */
static void ip_vs_lblcr_check_expire(unsigned long data) static void ip_vs_lblcr_check_expire(unsigned long data)
{ {
struct ip_vs_lblcr_table *tbl; struct ip_vs_service *svc = (struct ip_vs_service *) data;
struct ip_vs_lblcr_table *tbl = svc->sched_data;
unsigned long now = jiffies; unsigned long now = jiffies;
int goal; int goal;
int i, j; int i, j;
struct ip_vs_lblcr_entry *en, *nxt; struct ip_vs_lblcr_entry *en, *nxt;
tbl = (struct ip_vs_lblcr_table *)data;
if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
/* do full expiration check */ /* do full expiration check */
ip_vs_lblcr_full_check(tbl); ip_vs_lblcr_full_check(svc);
tbl->counter = 1; tbl->counter = 1;
goto out; goto out;
} }
...@@ -493,7 +467,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data) ...@@ -493,7 +467,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK; j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&tbl->lock); write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
continue; continue;
...@@ -502,7 +476,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data) ...@@ -502,7 +476,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
goal--; goal--;
} }
write_unlock(&tbl->lock); write_unlock(&svc->sched_lock);
if (goal <= 0) if (goal <= 0)
break; break;
} }
...@@ -520,15 +494,14 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) ...@@ -520,15 +494,14 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
/* /*
* Allocate the ip_vs_lblcr_table for this service * Allocate the ip_vs_lblcr_table for this service
*/ */
tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC); tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
if (tbl == NULL) { if (tbl == NULL) {
IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n"); IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
return -ENOMEM; return -ENOMEM;
} }
svc->sched_data = tbl; svc->sched_data = tbl;
IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
"current service\n", "current service\n", sizeof(*tbl));
sizeof(struct ip_vs_lblcr_table));
/* /*
* Initialize the hash buckets * Initialize the hash buckets
...@@ -536,7 +509,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) ...@@ -536,7 +509,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
INIT_LIST_HEAD(&tbl->bucket[i]); INIT_LIST_HEAD(&tbl->bucket[i]);
} }
rwlock_init(&tbl->lock);
tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
tbl->rover = 0; tbl->rover = 0;
tbl->counter = 1; tbl->counter = 1;
...@@ -545,9 +517,8 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) ...@@ -545,9 +517,8 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
* Hook periodic timer for garbage collection * Hook periodic timer for garbage collection
*/ */
setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
(unsigned long)tbl); (unsigned long)svc);
tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
add_timer(&tbl->periodic_timer);
return 0; return 0;
} }
...@@ -564,22 +535,16 @@ static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc) ...@@ -564,22 +535,16 @@ static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
ip_vs_lblcr_flush(tbl); ip_vs_lblcr_flush(tbl);
/* release the table itself */ /* release the table itself */
kfree(svc->sched_data); kfree(tbl);
IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
sizeof(struct ip_vs_lblcr_table)); sizeof(*tbl));
return 0; return 0;
} }
static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc)
{
return 0;
}
static inline struct ip_vs_dest * static inline struct ip_vs_dest *
__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) __ip_vs_lblcr_schedule(struct ip_vs_service *svc, struct iphdr *iph)
{ {
struct ip_vs_dest *dest, *least; struct ip_vs_dest *dest, *least;
int loh, doh; int loh, doh;
...@@ -669,50 +634,78 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) ...@@ -669,50 +634,78 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
static struct ip_vs_dest * static struct ip_vs_dest *
ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
{ {
struct ip_vs_dest *dest; struct ip_vs_lblcr_table *tbl = svc->sched_data;
struct ip_vs_lblcr_table *tbl;
struct ip_vs_lblcr_entry *en;
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
struct ip_vs_dest *dest = NULL;
struct ip_vs_lblcr_entry *en;
IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
tbl = (struct ip_vs_lblcr_table *)svc->sched_data; /* First look in our cache */
read_lock(&svc->sched_lock);
en = ip_vs_lblcr_get(tbl, iph->daddr); en = ip_vs_lblcr_get(tbl, iph->daddr);
if (en == NULL) { if (en) {
dest = __ip_vs_wlc_schedule(svc, iph); /* We only hold a read lock, but this is atomic */
if (dest == NULL) { en->lastuse = jiffies;
IP_VS_DBG(1, "no destination available\n");
return NULL; /* Get the least loaded destination */
} read_lock(&en->set.lock);
en = ip_vs_lblcr_new(iph->daddr);
if (en == NULL) {
return NULL;
}
ip_vs_dest_set_insert(&en->set, dest);
ip_vs_lblcr_hash(tbl, en);
} else {
dest = ip_vs_dest_set_min(&en->set); dest = ip_vs_dest_set_min(&en->set);
if (!dest || is_overloaded(dest, svc)) { read_unlock(&en->set.lock);
dest = __ip_vs_wlc_schedule(svc, iph);
if (dest == NULL) { /* More than one destination + enough time passed by, cleanup */
IP_VS_DBG(1, "no destination available\n");
return NULL;
}
ip_vs_dest_set_insert(&en->set, dest);
}
if (atomic_read(&en->set.size) > 1 && if (atomic_read(&en->set.size) > 1 &&
jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) { time_after(jiffies, en->set.lastmod +
sysctl_ip_vs_lblcr_expiration)) {
struct ip_vs_dest *m; struct ip_vs_dest *m;
write_lock(&en->set.lock);
m = ip_vs_dest_set_max(&en->set); m = ip_vs_dest_set_max(&en->set);
if (m) if (m)
ip_vs_dest_set_erase(&en->set, m); ip_vs_dest_set_erase(&en->set, m);
write_unlock(&en->set.lock);
}
/* If the destination is not overloaded, use it */
if (dest && !is_overloaded(dest, svc)) {
read_unlock(&svc->sched_lock);
goto out;
} }
/* The cache entry is invalid, time to schedule */
dest = __ip_vs_lblcr_schedule(svc, iph);
if (!dest) {
IP_VS_DBG(1, "no destination available\n");
read_unlock(&svc->sched_lock);
return NULL;
}
/* Update our cache entry */
write_lock(&en->set.lock);
ip_vs_dest_set_insert(&en->set, dest);
write_unlock(&en->set.lock);
}
read_unlock(&svc->sched_lock);
if (dest)
goto out;
/* No cache entry, time to schedule */
dest = __ip_vs_lblcr_schedule(svc, iph);
if (!dest) {
IP_VS_DBG(1, "no destination available\n");
return NULL;
} }
en->lastuse = jiffies;
/* If we fail to create a cache entry, we'll just use the valid dest */
write_lock(&svc->sched_lock);
ip_vs_lblcr_new(tbl, iph->daddr, dest);
write_unlock(&svc->sched_lock);
out:
IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u " IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
"--> server %u.%u.%u.%u:%d\n", "--> server %u.%u.%u.%u:%d\n",
NIPQUAD(en->addr), NIPQUAD(iph->daddr),
NIPQUAD(dest->addr), NIPQUAD(dest->addr),
ntohs(dest->port)); ntohs(dest->port));
...@@ -731,7 +724,6 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler = ...@@ -731,7 +724,6 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
.n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
.init_service = ip_vs_lblcr_init_svc, .init_service = ip_vs_lblcr_init_svc,
.done_service = ip_vs_lblcr_done_svc, .done_service = ip_vs_lblcr_done_svc,
.update_service = ip_vs_lblcr_update_svc,
.schedule = ip_vs_lblcr_schedule, .schedule = ip_vs_lblcr_schedule,
}; };
......
...@@ -20,24 +20,6 @@ ...@@ -20,24 +20,6 @@
#include <net/ip_vs.h> #include <net/ip_vs.h>
static int ip_vs_lc_init_svc(struct ip_vs_service *svc)
{
return 0;
}
static int ip_vs_lc_done_svc(struct ip_vs_service *svc)
{
return 0;
}
static int ip_vs_lc_update_svc(struct ip_vs_service *svc)
{
return 0;
}
static inline unsigned int static inline unsigned int
ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
{ {
...@@ -99,9 +81,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = { ...@@ -99,9 +81,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
.init_service = ip_vs_lc_init_svc,
.done_service = ip_vs_lc_done_svc,
.update_service = ip_vs_lc_update_svc,
.schedule = ip_vs_lc_schedule, .schedule = ip_vs_lc_schedule,
}; };
......
...@@ -37,27 +37,6 @@ ...@@ -37,27 +37,6 @@
#include <net/ip_vs.h> #include <net/ip_vs.h>
static int
ip_vs_nq_init_svc(struct ip_vs_service *svc)
{
return 0;
}
static int
ip_vs_nq_done_svc(struct ip_vs_service *svc)
{
return 0;
}
static int
ip_vs_nq_update_svc(struct ip_vs_service *svc)
{
return 0;
}
static inline unsigned int static inline unsigned int
ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
{ {
...@@ -137,9 +116,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler = ...@@ -137,9 +116,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
.init_service = ip_vs_nq_init_svc,
.done_service = ip_vs_nq_done_svc,
.update_service = ip_vs_nq_update_svc,
.schedule = ip_vs_nq_schedule, .schedule = ip_vs_nq_schedule,
}; };
......
/* /*
* ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS * ip_vs_proto_ah_esp.c: AH/ESP IPSec load balancing support for IPVS
* *
* Authors: Julian Anastasov <ja@ssi.bg>, February 2002 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
* Wensong Zhang <wensong@linuxvirtualserver.org> * Wensong Zhang <wensong@linuxvirtualserver.org>
...@@ -39,11 +39,11 @@ struct isakmp_hdr { ...@@ -39,11 +39,11 @@ struct isakmp_hdr {
static struct ip_vs_conn * static struct ip_vs_conn *
ah_conn_in_get(const struct sk_buff *skb, ah_esp_conn_in_get(const struct sk_buff *skb,
struct ip_vs_protocol *pp, struct ip_vs_protocol *pp,
const struct iphdr *iph, const struct iphdr *iph,
unsigned int proto_off, unsigned int proto_off,
int inverse) int inverse)
{ {
struct ip_vs_conn *cp; struct ip_vs_conn *cp;
...@@ -79,8 +79,8 @@ ah_conn_in_get(const struct sk_buff *skb, ...@@ -79,8 +79,8 @@ ah_conn_in_get(const struct sk_buff *skb,
static struct ip_vs_conn * static struct ip_vs_conn *
ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, ah_esp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
const struct iphdr *iph, unsigned int proto_off, int inverse) const struct iphdr *iph, unsigned int proto_off, int inverse)
{ {
struct ip_vs_conn *cp; struct ip_vs_conn *cp;
...@@ -112,12 +112,12 @@ ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, ...@@ -112,12 +112,12 @@ ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
static int static int
ah_conn_schedule(struct sk_buff *skb, ah_esp_conn_schedule(struct sk_buff *skb,
struct ip_vs_protocol *pp, struct ip_vs_protocol *pp,
int *verdict, struct ip_vs_conn **cpp) int *verdict, struct ip_vs_conn **cpp)
{ {
/* /*
* AH is only related traffic. Pass the packet to IP stack. * AH/ESP is only related traffic. Pass the packet to IP stack.
*/ */
*verdict = NF_ACCEPT; *verdict = NF_ACCEPT;
return 0; return 0;
...@@ -125,8 +125,8 @@ ah_conn_schedule(struct sk_buff *skb, ...@@ -125,8 +125,8 @@ ah_conn_schedule(struct sk_buff *skb,
static void static void
ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, ah_esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
int offset, const char *msg) int offset, const char *msg)
{ {
char buf[256]; char buf[256];
struct iphdr _iph, *ih; struct iphdr _iph, *ih;
...@@ -143,28 +143,29 @@ ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, ...@@ -143,28 +143,29 @@ ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
} }
static void ah_init(struct ip_vs_protocol *pp) static void ah_esp_init(struct ip_vs_protocol *pp)
{ {
/* nothing to do now */ /* nothing to do now */
} }
static void ah_exit(struct ip_vs_protocol *pp) static void ah_esp_exit(struct ip_vs_protocol *pp)
{ {
/* nothing to do now */ /* nothing to do now */
} }
#ifdef CONFIG_IP_VS_PROTO_AH
struct ip_vs_protocol ip_vs_protocol_ah = { struct ip_vs_protocol ip_vs_protocol_ah = {
.name = "AH", .name = "AH",
.protocol = IPPROTO_AH, .protocol = IPPROTO_AH,
.num_states = 1, .num_states = 1,
.dont_defrag = 1, .dont_defrag = 1,
.init = ah_init, .init = ah_esp_init,
.exit = ah_exit, .exit = ah_esp_exit,
.conn_schedule = ah_conn_schedule, .conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_conn_in_get, .conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_conn_out_get, .conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL, .snat_handler = NULL,
.dnat_handler = NULL, .dnat_handler = NULL,
.csum_check = NULL, .csum_check = NULL,
...@@ -172,7 +173,31 @@ struct ip_vs_protocol ip_vs_protocol_ah = { ...@@ -172,7 +173,31 @@ struct ip_vs_protocol ip_vs_protocol_ah = {
.register_app = NULL, .register_app = NULL,
.unregister_app = NULL, .unregister_app = NULL,
.app_conn_bind = NULL, .app_conn_bind = NULL,
.debug_packet = ah_debug_packet, .debug_packet = ah_esp_debug_packet,
.timeout_change = NULL, /* ISAKMP */ .timeout_change = NULL, /* ISAKMP */
.set_state_timeout = NULL, .set_state_timeout = NULL,
}; };
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
struct ip_vs_protocol ip_vs_protocol_esp = {
.name = "ESP",
.protocol = IPPROTO_ESP,
.num_states = 1,
.dont_defrag = 1,
.init = ah_esp_init,
.exit = ah_esp_exit,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
.csum_check = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
.app_conn_bind = NULL,
.debug_packet = ah_esp_debug_packet,
.timeout_change = NULL, /* ISAKMP */
};
#endif
/*
* ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS
*
* Authors: Julian Anastasov <ja@ssi.bg>, February 2002
* Wensong Zhang <wensong@linuxvirtualserver.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation;
*
*/
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/ip_vs.h>
/* TODO:
struct isakmp_hdr {
__u8 icookie[8];
__u8 rcookie[8];
__u8 np;
__u8 version;
__u8 xchgtype;
__u8 flags;
__u32 msgid;
__u32 length;
};
*/
#define PORT_ISAKMP 500
static struct ip_vs_conn *
esp_conn_in_get(const struct sk_buff *skb,
struct ip_vs_protocol *pp,
const struct iphdr *iph,
unsigned int proto_off,
int inverse)
{
struct ip_vs_conn *cp;
if (likely(!inverse)) {
cp = ip_vs_conn_in_get(IPPROTO_UDP,
iph->saddr,
htons(PORT_ISAKMP),
iph->daddr,
htons(PORT_ISAKMP));
} else {
cp = ip_vs_conn_in_get(IPPROTO_UDP,
iph->daddr,
htons(PORT_ISAKMP),
iph->saddr,
htons(PORT_ISAKMP));
}
if (!cp) {
/*
* We are not sure if the packet is from our
* service, so our conn_schedule hook should return NF_ACCEPT
*/
IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet "
"%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
inverse ? "ICMP+" : "",
pp->name,
NIPQUAD(iph->saddr),
NIPQUAD(iph->daddr));
}
return cp;
}
static struct ip_vs_conn *
esp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
const struct iphdr *iph, unsigned int proto_off, int inverse)
{
struct ip_vs_conn *cp;
if (likely(!inverse)) {
cp = ip_vs_conn_out_get(IPPROTO_UDP,
iph->saddr,
htons(PORT_ISAKMP),
iph->daddr,
htons(PORT_ISAKMP));
} else {
cp = ip_vs_conn_out_get(IPPROTO_UDP,
iph->daddr,
htons(PORT_ISAKMP),
iph->saddr,
htons(PORT_ISAKMP));
}
if (!cp) {
IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet "
"%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
inverse ? "ICMP+" : "",
pp->name,
NIPQUAD(iph->saddr),
NIPQUAD(iph->daddr));
}
return cp;
}
static int
esp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
int *verdict, struct ip_vs_conn **cpp)
{
/*
* ESP is only related traffic. Pass the packet to IP stack.
*/
*verdict = NF_ACCEPT;
return 0;
}
static void
esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
int offset, const char *msg)
{
char buf[256];
struct iphdr _iph, *ih;
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
sprintf(buf, "%s TRUNCATED", pp->name);
else
sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
pp->name, NIPQUAD(ih->saddr),
NIPQUAD(ih->daddr));
printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
}
static void esp_init(struct ip_vs_protocol *pp)
{
/* nothing to do now */
}
static void esp_exit(struct ip_vs_protocol *pp)
{
/* nothing to do now */
}
struct ip_vs_protocol ip_vs_protocol_esp = {
.name = "ESP",
.protocol = IPPROTO_ESP,
.num_states = 1,
.dont_defrag = 1,
.init = esp_init,
.exit = esp_exit,
.conn_schedule = esp_conn_schedule,
.conn_in_get = esp_conn_in_get,
.conn_out_get = esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
.csum_check = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
.app_conn_bind = NULL,
.debug_packet = esp_debug_packet,
.timeout_change = NULL, /* ISAKMP */
};
...@@ -32,12 +32,6 @@ static int ip_vs_rr_init_svc(struct ip_vs_service *svc) ...@@ -32,12 +32,6 @@ static int ip_vs_rr_init_svc(struct ip_vs_service *svc)
} }
static int ip_vs_rr_done_svc(struct ip_vs_service *svc)
{
return 0;
}
static int ip_vs_rr_update_svc(struct ip_vs_service *svc) static int ip_vs_rr_update_svc(struct ip_vs_service *svc)
{ {
svc->sched_data = &svc->destinations; svc->sched_data = &svc->destinations;
...@@ -96,7 +90,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = { ...@@ -96,7 +90,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
.init_service = ip_vs_rr_init_svc, .init_service = ip_vs_rr_init_svc,
.done_service = ip_vs_rr_done_svc,
.update_service = ip_vs_rr_update_svc, .update_service = ip_vs_rr_update_svc,
.schedule = ip_vs_rr_schedule, .schedule = ip_vs_rr_schedule,
}; };
......
...@@ -41,27 +41,6 @@ ...@@ -41,27 +41,6 @@
#include <net/ip_vs.h> #include <net/ip_vs.h>
static int
ip_vs_sed_init_svc(struct ip_vs_service *svc)
{
return 0;
}
static int
ip_vs_sed_done_svc(struct ip_vs_service *svc)
{
return 0;
}
static int
ip_vs_sed_update_svc(struct ip_vs_service *svc)
{
return 0;
}
static inline unsigned int static inline unsigned int
ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
{ {
...@@ -139,9 +118,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler = ...@@ -139,9 +118,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
.init_service = ip_vs_sed_init_svc,
.done_service = ip_vs_sed_done_svc,
.update_service = ip_vs_sed_update_svc,
.schedule = ip_vs_sed_schedule, .schedule = ip_vs_sed_schedule,
}; };
......
...@@ -25,27 +25,6 @@ ...@@ -25,27 +25,6 @@
#include <net/ip_vs.h> #include <net/ip_vs.h>
static int
ip_vs_wlc_init_svc(struct ip_vs_service *svc)
{
return 0;
}
static int
ip_vs_wlc_done_svc(struct ip_vs_service *svc)
{
return 0;
}
static int
ip_vs_wlc_update_svc(struct ip_vs_service *svc)
{
return 0;
}
static inline unsigned int static inline unsigned int
ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
{ {
...@@ -127,9 +106,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler = ...@@ -127,9 +106,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
.init_service = ip_vs_wlc_init_svc,
.done_service = ip_vs_wlc_done_svc,
.update_service = ip_vs_wlc_update_svc,
.schedule = ip_vs_wlc_schedule, .schedule = ip_vs_wlc_schedule,
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册