提交 91ed81f9 编写于 作者: J Jonas Bonn 提交者: David S. Miller

gtp: support SGSN-side tunnels

The GTP-tunnel driver is explicitly GGSN-side as it searches for PDP
contexts based on the incoming packets _destination_ address.  If we
want to place ourselves on the SGSN side of the  tunnel, then we want
to be identifying PDP contexts based on _source_ address.

Let it be noted that in a "real" configuration this module would never
be used:  the SGSN normally does not see IP packets as input.  The
justification for this functionality is for PGW load-testing applications
where the input to the SGSN is locally generally IP traffic.

This patch adds a "role" argument at GTP-link creation time to specify
whether we are on the GGSN or SGSN side of the tunnel; this flag is then
used to determine which part of the IP packet to use in determining
the PDP context.
Signed-off-by: NJonas Bonn <jonas@southpole.se>
Acked-by: NPablo Neira Ayuso <pablo@netfilter.org>
Acked-by: NHarald Welte <laforge@gnumonks.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 ae6336b5
......@@ -74,6 +74,7 @@ struct gtp_dev {
struct net_device *dev;
unsigned int role;
unsigned int hash_size;
struct hlist_head *tid_hash;
struct hlist_head *addr_hash;
......@@ -154,8 +155,8 @@ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
return NULL;
}
static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
unsigned int hdrlen)
static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
unsigned int hdrlen, unsigned int role)
{
struct iphdr *iph;
......@@ -164,27 +165,31 @@ static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
iph = (struct iphdr *)(skb->data + hdrlen);
return iph->saddr == pctx->ms_addr_ip4.s_addr;
if (role == GTP_ROLE_SGSN)
return iph->daddr == pctx->ms_addr_ip4.s_addr;
else
return iph->saddr == pctx->ms_addr_ip4.s_addr;
}
/* Check if the inner IP source address in this packet is assigned to any
/* Check if the inner IP address in this packet is assigned to any
* existing mobile subscriber.
*/
static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
unsigned int hdrlen)
static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
unsigned int hdrlen, unsigned int role)
{
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
}
return false;
}
static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, unsigned int hdrlen)
static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
unsigned int hdrlen, unsigned int role)
{
struct pcpu_sw_netstats *stats;
if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
return 1;
}
......@@ -239,7 +244,7 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
return 1;
}
return gtp_rx(pctx, skb, hdrlen);
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
......@@ -281,7 +286,7 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
return 1;
}
return gtp_rx(pctx, skb, hdrlen);
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
static void gtp_encap_destroy(struct sock *sk)
......@@ -481,7 +486,11 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
* Prepend PDP header with TEI/TID from PDP ctx.
*/
iph = ip_hdr(skb);
pctx = ipv4_pdp_find(gtp, iph->daddr);
if (gtp->role == GTP_ROLE_SGSN)
pctx = ipv4_pdp_find(gtp, iph->saddr);
else
pctx = ipv4_pdp_find(gtp, iph->daddr);
if (!pctx) {
netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
&iph->daddr);
......@@ -685,6 +694,7 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD0] = { .type = NLA_U32 },
[IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
......@@ -810,6 +820,7 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
{
struct sock *sk1u = NULL;
struct sock *sk0 = NULL;
unsigned int role = GTP_ROLE_GGSN;
if (data[IFLA_GTP_FD0]) {
u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
......@@ -830,8 +841,15 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
}
}
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN)
return -EINVAL;
}
gtp->sk0 = sk0;
gtp->sk1u = sk1u;
gtp->role = role;
return 0;
}
......
......@@ -538,11 +538,18 @@ enum {
#define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1)
/* GTP section */
enum ifla_gtp_role {
GTP_ROLE_GGSN = 0,
GTP_ROLE_SGSN,
};
enum {
IFLA_GTP_UNSPEC,
IFLA_GTP_FD0,
IFLA_GTP_FD1,
IFLA_GTP_PDP_HASHSIZE,
IFLA_GTP_ROLE,
__IFLA_GTP_MAX,
};
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册