提交 41d06b13 编写于 作者: D David S. Miller

Merge tag 'batman-adv-for-davem' of git://git.open-mesh.org/linux-merge

Included changes:
- sysfs removal postponement during interface un-registration
- random32() function renaming
- struct refactoring
- kernel doc improvement
- deleyed_work initialisation clean up work
- copyright year and internal version number update
- kernel doc improvement
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......@@ -123,7 +123,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
unsigned int msecs;
msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
msecs += random32() % (2 * BATADV_JITTER);
msecs += prandom_u32() % (2 * BATADV_JITTER);
return jiffies + msecs_to_jiffies(msecs);
}
......@@ -131,7 +131,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
/* when do we schedule a ogm packet to be sent */
static unsigned long batadv_iv_ogm_fwd_send_time(void)
{
return jiffies + msecs_to_jiffies(random32() % (BATADV_JITTER / 2));
return jiffies + msecs_to_jiffies(prandom_u32() % (BATADV_JITTER / 2));
}
/* apply hop penalty for a normal link */
......
/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
......
/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
......
/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
......@@ -34,13 +34,14 @@
static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
static void batadv_bla_periodic_work(struct work_struct *work);
static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
struct batadv_backbone_gw *backbone_gw);
static void
batadv_bla_send_announce(struct batadv_priv *bat_priv,
struct batadv_bla_backbone_gw *backbone_gw);
/* return the index of the claim */
static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
{
struct batadv_claim *claim = (struct batadv_claim *)data;
struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
uint32_t hash = 0;
hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
......@@ -57,7 +58,7 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
static inline uint32_t batadv_choose_backbone_gw(const void *data,
uint32_t size)
{
const struct batadv_claim *claim = (struct batadv_claim *)data;
const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
uint32_t hash = 0;
hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
......@@ -75,9 +76,9 @@ static inline uint32_t batadv_choose_backbone_gw(const void *data,
static int batadv_compare_backbone_gw(const struct hlist_node *node,
const void *data2)
{
const void *data1 = container_of(node, struct batadv_backbone_gw,
const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
hash_entry);
const struct batadv_backbone_gw *gw1 = data1, *gw2 = data2;
const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
if (!batadv_compare_eth(gw1->orig, gw2->orig))
return 0;
......@@ -92,9 +93,9 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node,
static int batadv_compare_claim(const struct hlist_node *node,
const void *data2)
{
const void *data1 = container_of(node, struct batadv_claim,
const void *data1 = container_of(node, struct batadv_bla_claim,
hash_entry);
const struct batadv_claim *cl1 = data1, *cl2 = data2;
const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
if (!batadv_compare_eth(cl1->addr, cl2->addr))
return 0;
......@@ -106,7 +107,8 @@ static int batadv_compare_claim(const struct hlist_node *node,
}
/* free a backbone gw */
static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw)
static void
batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
{
if (atomic_dec_and_test(&backbone_gw->refcount))
kfree_rcu(backbone_gw, rcu);
......@@ -115,16 +117,16 @@ static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw)
/* finally deinitialize the claim */
static void batadv_claim_free_rcu(struct rcu_head *rcu)
{
struct batadv_claim *claim;
struct batadv_bla_claim *claim;
claim = container_of(rcu, struct batadv_claim, rcu);
claim = container_of(rcu, struct batadv_bla_claim, rcu);
batadv_backbone_gw_free_ref(claim->backbone_gw);
kfree(claim);
}
/* free a claim, call claim_free_rcu if its the last reference */
static void batadv_claim_free_ref(struct batadv_claim *claim)
static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
{
if (atomic_dec_and_test(&claim->refcount))
call_rcu(&claim->rcu, batadv_claim_free_rcu);
......@@ -136,14 +138,15 @@ static void batadv_claim_free_ref(struct batadv_claim *claim)
* looks for a claim in the hash, and returns it if found
* or NULL otherwise.
*/
static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
struct batadv_claim *data)
static struct batadv_bla_claim
*batadv_claim_hash_find(struct batadv_priv *bat_priv,
struct batadv_bla_claim *data)
{
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
struct hlist_head *head;
struct hlist_node *node;
struct batadv_claim *claim;
struct batadv_claim *claim_tmp = NULL;
struct batadv_bla_claim *claim;
struct batadv_bla_claim *claim_tmp = NULL;
int index;
if (!hash)
......@@ -176,15 +179,15 @@ static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
*
* Returns claim if found or NULL otherwise.
*/
static struct batadv_backbone_gw *
static struct batadv_bla_backbone_gw *
batadv_backbone_hash_find(struct batadv_priv *bat_priv,
uint8_t *addr, short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
struct hlist_node *node;
struct batadv_backbone_gw search_entry, *backbone_gw;
struct batadv_backbone_gw *backbone_gw_tmp = NULL;
struct batadv_bla_backbone_gw search_entry, *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
int index;
if (!hash)
......@@ -215,12 +218,12 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
/* delete all claims for a backbone */
static void
batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
{
struct batadv_hashtable *hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
struct batadv_claim *claim;
struct batadv_bla_claim *claim;
int i;
spinlock_t *list_lock; /* protects write access to the hash lists */
......@@ -364,11 +367,11 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
* searches for the backbone gw or creates a new one if it could not
* be found.
*/
static struct batadv_backbone_gw *
static struct batadv_bla_backbone_gw *
batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
short vid, bool own_backbone)
{
struct batadv_backbone_gw *entry;
struct batadv_bla_backbone_gw *entry;
struct batadv_orig_node *orig_node;
int hash_added;
......@@ -435,7 +438,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
short vid)
{
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
primary_if->net_dev->dev_addr,
......@@ -460,8 +463,8 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
struct hlist_node *node;
struct hlist_head *head;
struct batadv_hashtable *hash;
struct batadv_claim *claim;
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_bla_backbone_gw *backbone_gw;
int i;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
......@@ -500,7 +503,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
* After the request, it will repeat all of his own claims and finally
* send an announcement claim with which we can check again.
*/
static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
{
/* first, remove all old entries */
batadv_bla_del_backbone_claims(backbone_gw);
......@@ -526,7 +529,7 @@ static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
* places.
*/
static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
struct batadv_backbone_gw *backbone_gw)
struct batadv_bla_backbone_gw *backbone_gw)
{
uint8_t mac[ETH_ALEN];
__be16 crc;
......@@ -548,10 +551,10 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
*/
static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
const uint8_t *mac, const short vid,
struct batadv_backbone_gw *backbone_gw)
struct batadv_bla_backbone_gw *backbone_gw)
{
struct batadv_claim *claim;
struct batadv_claim search_claim;
struct batadv_bla_claim *claim;
struct batadv_bla_claim search_claim;
int hash_added;
memcpy(search_claim.addr, mac, ETH_ALEN);
......@@ -613,7 +616,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
const uint8_t *mac, const short vid)
{
struct batadv_claim search_claim, *claim;
struct batadv_bla_claim search_claim, *claim;
memcpy(search_claim.addr, mac, ETH_ALEN);
search_claim.vid = vid;
......@@ -639,7 +642,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
uint8_t *an_addr, uint8_t *backbone_addr,
short vid)
{
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
uint16_t crc;
if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
......@@ -711,7 +714,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
uint8_t *backbone_addr,
uint8_t *claim_addr, short vid)
{
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
/* unclaim in any case if it is our own */
if (primary_if && batadv_compare_eth(backbone_addr,
......@@ -740,7 +743,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
uint8_t *backbone_addr, uint8_t *claim_addr,
short vid)
{
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
/* register the gateway if not yet available, and add the claim. */
......@@ -954,7 +957,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
*/
static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
{
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
struct batadv_hashtable *hash;
......@@ -1009,7 +1012,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
int now)
{
struct batadv_claim *claim;
struct batadv_bla_claim *claim;
struct hlist_node *node;
struct hlist_head *head;
struct batadv_hashtable *hash;
......@@ -1058,7 +1061,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
struct batadv_hard_iface *oldif)
{
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
struct hlist_node *node;
struct hlist_head *head;
struct batadv_hashtable *hash;
......@@ -1100,16 +1103,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
}
}
/* (re)start the timer */
static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
}
/* periodic work to do:
* * purge structures when they are too old
* * send announcements
......@@ -1121,7 +1114,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
struct batadv_priv_bla *priv_bla;
struct hlist_node *node;
struct hlist_head *head;
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hashtable *hash;
struct batadv_hard_iface *primary_if;
int i;
......@@ -1180,7 +1173,8 @@ static void batadv_bla_periodic_work(struct work_struct *work)
if (primary_if)
batadv_hardif_free_ref(primary_if);
batadv_bla_start_timer(bat_priv);
queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
}
/* The hash for claim and backbone hash receive the same key because they
......@@ -1238,7 +1232,10 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
batadv_bla_start_timer(bat_priv);
INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
return 0;
}
......@@ -1326,7 +1323,7 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
struct hlist_node *node;
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
int i;
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
......@@ -1367,7 +1364,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
{
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
short vid = -1;
if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
......@@ -1438,7 +1435,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
bool is_bcast)
{
struct ethhdr *ethhdr;
struct batadv_claim search_claim, *claim = NULL;
struct batadv_bla_claim search_claim, *claim = NULL;
struct batadv_hard_iface *primary_if;
int ret;
......@@ -1532,7 +1529,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
{
struct ethhdr *ethhdr;
struct batadv_claim search_claim, *claim = NULL;
struct batadv_bla_claim search_claim, *claim = NULL;
struct batadv_hard_iface *primary_if;
int ret = 0;
......@@ -1608,7 +1605,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
struct batadv_claim *claim;
struct batadv_bla_claim *claim;
struct batadv_hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
......@@ -1653,7 +1650,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct batadv_backbone_gw *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
......
/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
......
/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......@@ -40,13 +40,14 @@ static struct dentry *batadv_debugfs;
static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
static char *batadv_log_char_addr(struct batadv_debug_log *debug_log,
static char *batadv_log_char_addr(struct batadv_priv_debug_log *debug_log,
size_t idx)
{
return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK];
}
static void batadv_emit_log_char(struct batadv_debug_log *debug_log, char c)
static void batadv_emit_log_char(struct batadv_priv_debug_log *debug_log,
char c)
{
char *char_addr;
......@@ -59,7 +60,7 @@ static void batadv_emit_log_char(struct batadv_debug_log *debug_log, char c)
}
__printf(2, 3)
static int batadv_fdebug_log(struct batadv_debug_log *debug_log,
static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
const char *fmt, ...)
{
va_list args;
......@@ -114,7 +115,7 @@ static int batadv_log_release(struct inode *inode, struct file *file)
return 0;
}
static int batadv_log_empty(struct batadv_debug_log *debug_log)
static int batadv_log_empty(struct batadv_priv_debug_log *debug_log)
{
return !(debug_log->log_start - debug_log->log_end);
}
......@@ -123,7 +124,7 @@ static ssize_t batadv_log_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct batadv_priv *bat_priv = file->private_data;
struct batadv_debug_log *debug_log = bat_priv->debug_log;
struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
int error, i = 0;
char *char_addr;
char c;
......@@ -177,7 +178,7 @@ static ssize_t batadv_log_read(struct file *file, char __user *buf,
static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
{
struct batadv_priv *bat_priv = file->private_data;
struct batadv_debug_log *debug_log = bat_priv->debug_log;
struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
poll_wait(file, &debug_log->queue_wait, wait);
......
/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
......
/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
......
/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......@@ -457,6 +457,24 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface)
batadv_hardif_free_ref(primary_if);
}
/**
* batadv_hardif_remove_interface_finish - cleans up the remains of a hardif
* @work: work queue item
*
* Free the parts of the hard interface which can not be removed under
* rtnl lock (to prevent deadlock situations).
*/
static void batadv_hardif_remove_interface_finish(struct work_struct *work)
{
struct batadv_hard_iface *hard_iface;
hard_iface = container_of(work, struct batadv_hard_iface,
cleanup_work);
batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
batadv_hardif_free_ref(hard_iface);
}
static struct batadv_hard_iface *
batadv_hardif_add_interface(struct net_device *net_dev)
{
......@@ -484,6 +502,9 @@ batadv_hardif_add_interface(struct net_device *net_dev)
hard_iface->soft_iface = NULL;
hard_iface->if_status = BATADV_IF_NOT_IN_USE;
INIT_LIST_HEAD(&hard_iface->list);
INIT_WORK(&hard_iface->cleanup_work,
batadv_hardif_remove_interface_finish);
/* extra reference for return */
atomic_set(&hard_iface->refcount, 2);
......@@ -518,8 +539,7 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
return;
hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
batadv_hardif_free_ref(hard_iface);
queue_work(batadv_event_workqueue, &hard_iface->cleanup_work);
}
void batadv_hardif_remove_interfaces(void)
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......
/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
......
/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......@@ -26,7 +26,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
#define BATADV_SOURCE_VERSION "2012.5.0"
#define BATADV_SOURCE_VERSION "2013.1.0"
#endif
/* B.A.T.M.A.N. parameters */
......@@ -44,6 +44,8 @@
#define BATADV_TT_LOCAL_TIMEOUT 600000 /* in milliseconds */
#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
#define BATADV_TT_WORK_PERIOD 5000 /* 5 seconds */
#define BATADV_ORIG_WORK_PERIOD 1000 /* 1 second */
#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */
/* sliding packet range of received originator messages in sequence numbers
* (should be a multiple of our word size)
......
/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......@@ -34,13 +34,6 @@ static struct lock_class_key batadv_orig_hash_lock_class_key;
static void batadv_purge_orig(struct work_struct *work);
static void batadv_start_purge_timer(struct batadv_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
queue_delayed_work(batadv_event_workqueue,
&bat_priv->orig_work, msecs_to_jiffies(1000));
}
/* returns 1 if they are the same originator */
static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
{
......@@ -63,7 +56,11 @@ int batadv_originator_init(struct batadv_priv *bat_priv)
batadv_hash_set_lock_class(bat_priv->orig_hash,
&batadv_orig_hash_lock_class_key);
batadv_start_purge_timer(bat_priv);
INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
queue_delayed_work(batadv_event_workqueue,
&bat_priv->orig_work,
msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
return 0;
err:
......@@ -396,7 +393,9 @@ static void batadv_purge_orig(struct work_struct *work)
delayed_work = container_of(work, struct delayed_work, work);
bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
_batadv_purge_orig(bat_priv);
batadv_start_purge_timer(bat_priv);
queue_delayed_work(batadv_event_workqueue,
&bat_priv->orig_work,
msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
}
void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......@@ -155,8 +155,6 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet->delayed_work,
batadv_send_outstanding_bcast_packet);
queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
send_time);
}
......@@ -210,6 +208,9 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
/* how often did we send the bcast packet ? */
forw_packet->num_packets = 0;
INIT_DELAYED_WORK(&forw_packet->delayed_work,
batadv_send_outstanding_bcast_packet);
_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
return NETDEV_TX_OK;
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
......@@ -449,6 +449,30 @@ static void batadv_interface_setup(struct net_device *dev)
memset(priv, 0, sizeof(*priv));
}
/**
* batadv_softif_destroy_finish - cleans up the remains of a softif
* @work: work queue item
*
* Free the parts of the soft interface which can not be removed under
* rtnl lock (to prevent deadlock situations).
*/
static void batadv_softif_destroy_finish(struct work_struct *work)
{
struct batadv_priv *bat_priv;
struct net_device *soft_iface;
bat_priv = container_of(work, struct batadv_priv,
cleanup_work);
soft_iface = bat_priv->soft_iface;
batadv_debugfs_del_meshif(soft_iface);
batadv_sysfs_del_meshif(soft_iface);
rtnl_lock();
unregister_netdevice(soft_iface);
rtnl_unlock();
}
struct net_device *batadv_softif_create(const char *name)
{
struct net_device *soft_iface;
......@@ -463,6 +487,8 @@ struct net_device *batadv_softif_create(const char *name)
goto out;
bat_priv = netdev_priv(soft_iface);
bat_priv->soft_iface = soft_iface;
INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish);
/* batadv_interface_stats() needs to be available as soon as
* register_netdevice() has been called
......@@ -551,10 +577,10 @@ struct net_device *batadv_softif_create(const char *name)
void batadv_softif_destroy(struct net_device *soft_iface)
{
batadv_debugfs_del_meshif(soft_iface);
batadv_sysfs_del_meshif(soft_iface);
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
batadv_mesh_free(soft_iface);
unregister_netdevice(soft_iface);
queue_work(batadv_event_workqueue, &bat_priv->cleanup_work);
}
int batadv_softif_is_valid(const struct net_device *net_dev)
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
......@@ -52,13 +52,6 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
msecs_to_jiffies(5000));
}
static struct batadv_tt_common_entry *
batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
{
......@@ -2136,7 +2129,9 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
if (ret < 0)
return ret;
batadv_tt_start_timer(bat_priv);
INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
return 1;
}
......@@ -2286,7 +2281,8 @@ static void batadv_tt_purge(struct work_struct *work)
batadv_tt_req_purge(bat_priv);
batadv_tt_roam_purge(bat_priv);
batadv_tt_start_timer(bat_priv);
queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
}
void batadv_tt_free(struct batadv_priv *bat_priv)
......
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
......
此差异已折叠。
/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
......
/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
......
/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
......@@ -31,14 +31,12 @@
/* hash class keys */
static struct lock_class_key batadv_vis_hash_lock_class_key;
static void batadv_start_vis_timer(struct batadv_priv *bat_priv);
/* free the info */
static void batadv_free_info(struct kref *ref)
{
struct batadv_vis_info *info;
struct batadv_priv *bat_priv;
struct batadv_recvlist_node *entry, *tmp;
struct batadv_vis_recvlist_node *entry, *tmp;
info = container_of(ref, struct batadv_vis_info, refcount);
bat_priv = info->bat_priv;
......@@ -129,7 +127,7 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface,
struct hlist_head *if_list,
bool primary)
{
struct batadv_if_list_entry *entry;
struct batadv_vis_if_list_entry *entry;
struct hlist_node *pos;
hlist_for_each_entry(entry, pos, if_list, list) {
......@@ -149,7 +147,7 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface,
static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
const struct hlist_head *if_list)
{
struct batadv_if_list_entry *entry;
struct batadv_vis_if_list_entry *entry;
struct hlist_node *pos;
hlist_for_each_entry(entry, pos, if_list, list) {
......@@ -199,7 +197,7 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
struct batadv_vis_info_entry *entries)
{
int i;
struct batadv_if_list_entry *entry;
struct batadv_vis_if_list_entry *entry;
struct hlist_node *pos;
hlist_for_each_entry(entry, pos, list, list) {
......@@ -225,7 +223,7 @@ static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
struct batadv_vis_packet *packet;
uint8_t *entries_pos;
struct batadv_vis_info_entry *entries;
struct batadv_if_list_entry *entry;
struct batadv_vis_if_list_entry *entry;
struct hlist_node *pos, *n;
HLIST_HEAD(vis_if_list);
......@@ -307,7 +305,7 @@ static void batadv_send_list_del(struct batadv_vis_info *info)
static void batadv_recv_list_add(struct batadv_priv *bat_priv,
struct list_head *recv_list, const char *mac)
{
struct batadv_recvlist_node *entry;
struct batadv_vis_recvlist_node *entry;
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
......@@ -324,7 +322,7 @@ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
const struct list_head *recv_list,
const char *mac)
{
const struct batadv_recvlist_node *entry;
const struct batadv_vis_recvlist_node *entry;
spin_lock_bh(&bat_priv->vis.list_lock);
list_for_each_entry(entry, recv_list, list) {
......@@ -830,7 +828,9 @@ static void batadv_send_vis_packets(struct work_struct *work)
kref_put(&info->refcount, batadv_free_info);
}
spin_unlock_bh(&bat_priv->vis.hash_lock);
batadv_start_vis_timer(bat_priv);
queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
msecs_to_jiffies(BATADV_VIS_INTERVAL));
}
/* init the vis server. this may only be called when if_list is already
......@@ -900,7 +900,11 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
}
spin_unlock_bh(&bat_priv->vis.hash_lock);
batadv_start_vis_timer(bat_priv);
INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
msecs_to_jiffies(BATADV_VIS_INTERVAL));
return 0;
free_info:
......@@ -937,11 +941,3 @@ void batadv_vis_quit(struct batadv_priv *bat_priv)
bat_priv->vis.my_info = NULL;
spin_unlock_bh(&bat_priv->vis.hash_lock);
}
/* schedule packets for (re)transmission */
static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
msecs_to_jiffies(BATADV_VIS_INTERVAL));
}
/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册