diff options
author | Nikolay Aleksandrov <nikolay@cumulusnetworks.com> | 2020-03-17 14:08:35 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-03-17 22:47:12 -0700 |
commit | 188c67dd1906eea5542268e4513ad6253fbf9297 (patch) | |
tree | d0d1aee72ee428449202729f6ed86d4e9babc551 /net/bridge/br_vlan_options.c | |
parent | 53e96632ab934b28d6237c85b851f09e1595b76f (diff) |
net: bridge: vlan options: add support for tunnel id dumping
Add a new option - BRIDGE_VLANDB_ENTRY_TUNNEL_ID which is used to dump
the tunnel id mapping. Since they're unique per vlan they can enter a
vlan range if they're consecutive, thus we can calculate the tunnel id
range map simply as: vlan range end id - vlan range start id. The
starting point is the tunnel id in BRIDGE_VLANDB_ENTRY_TUNNEL_ID. This
is similar to how the tunnel entries can be created in a range via the
old API (a vlan range maps to a tunnel range).
Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/bridge/br_vlan_options.c')
-rw-r--r-- | net/bridge/br_vlan_options.c | 29 |
1 files changed, 26 insertions, 3 deletions
diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c index 24cf2a621df9..d3618da32b8e 100644 --- a/net/bridge/br_vlan_options.c +++ b/net/bridge/br_vlan_options.c @@ -4,25 +4,48 @@ #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/slab.h> +#include <net/ip_tunnels.h> #include "br_private.h" +#include "br_private_tunnel.h" + +static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v) +{ + __be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id); + + if (!v->tinfo.tunnel_dst) + return true; + + return !nla_put_u32(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_ID, + be32_to_cpu(tid)); +} + +static bool __vlan_tun_can_enter_range(const struct net_bridge_vlan *v_curr, + const struct net_bridge_vlan *range_end) +{ + return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) || + vlan_tunid_inrange(v_curr, range_end); +} /* check if the options' state of v_curr allow it to enter the range */ bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr, const struct net_bridge_vlan *range_end) { - return v_curr->state == range_end->state; + return v_curr->state == range_end->state && + __vlan_tun_can_enter_range(v_curr, range_end); } bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v) { return !nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, - br_vlan_get_state(v)); + br_vlan_get_state(v)) && + __vlan_tun_put(skb, v); } size_t br_vlan_opts_nl_size(void) { - return nla_total_size(sizeof(u8)); /* BRIDGE_VLANDB_ENTRY_STATE */ + return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */ + + nla_total_size(sizeof(u32)); /* BRIDGE_VLANDB_ENTRY_TUNNEL_ID */ } static int br_vlan_modify_state(struct net_bridge_vlan_group *vg, |