summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmir Vadai <amir@vadai.me>2016-03-08 12:42:29 +0200
committerDavid S. Miller <davem@davemloft.net>2016-03-10 16:24:02 -0500
commit5b33f48842fa1e13e9c0ea8cc59c1d0df19042db (patch)
tree51aafa151a70b38dfd500b261d8e32bd4045332a
parent10f79037b4ac32e8929d73f31f908cfa061e29aa (diff)
net/flower: Introduce hardware offload support
This patch is based on a patch made by John Fastabend. It adds support for offloading cls_flower. when NETIF_F_HW_TC is on: flags = 0 => Rule will be processed twice - by hardware, and if still relevant, by software. flags = SKIP_HW => Rull will be processed by software only If hardware fail/not capabale to apply the rule, operation will NOT fail. Filter will be processed by SW only. Acked-by: Jiri Pirko <jiri@mellanox.com> Suggested-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: Amir Vadai <amir@vadai.me> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/net/pkt_cls.h14
-rw-r--r--include/uapi/linux/pkt_cls.h2
-rw-r--r--net/sched/cls_flower.c64
4 files changed, 81 insertions, 1 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fd30cb545c45..41df0b450757 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -786,6 +786,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
enum {
TC_SETUP_MQPRIO,
TC_SETUP_CLSU32,
+ TC_SETUP_CLSFLOWER,
};
struct tc_cls_u32_offload;
@@ -795,6 +796,7 @@ struct tc_to_netdev {
union {
u8 tc;
struct tc_cls_u32_offload *cls_u32;
+ struct tc_cls_flower_offload *cls_flower;
};
};
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index bea14eee373e..5b4e8f08b8f0 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -409,4 +409,18 @@ static inline bool tc_should_offload(struct net_device *dev, u32 flags)
return true;
}
+enum tc_fl_command {
+ TC_CLSFLOWER_REPLACE,
+ TC_CLSFLOWER_DESTROY,
+};
+
+struct tc_cls_flower_offload {
+ enum tc_fl_command command;
+ u64 cookie;
+ struct flow_dissector *dissector;
+ struct fl_flow_key *mask;
+ struct fl_flow_key *key;
+ struct tcf_exts *exts;
+};
+
#endif
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 9874f5680926..c43c5f78b9c4 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -417,6 +417,8 @@ enum {
TCA_FLOWER_KEY_TCP_DST, /* be16 */
TCA_FLOWER_KEY_UDP_SRC, /* be16 */
TCA_FLOWER_KEY_UDP_DST, /* be16 */
+
+ TCA_FLOWER_FLAGS,
__TCA_FLOWER_MAX,
};
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 95b021243233..25d87666bf1e 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -165,6 +165,51 @@ static void fl_destroy_filter(struct rcu_head *head)
kfree(f);
}
+static void fl_hw_destroy_filter(struct tcf_proto *tp, u64 cookie)
+{
+ struct net_device *dev = tp->q->dev_queue->dev;
+ struct tc_cls_flower_offload offload = {0};
+ struct tc_to_netdev tc;
+
+ if (!tc_should_offload(dev, 0))
+ return;
+
+ offload.command = TC_CLSFLOWER_DESTROY;
+ offload.cookie = cookie;
+
+ tc.type = TC_SETUP_CLSFLOWER;
+ tc.cls_flower = &offload;
+
+ dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
+}
+
+static void fl_hw_replace_filter(struct tcf_proto *tp,
+ struct flow_dissector *dissector,
+ struct fl_flow_key *mask,
+ struct fl_flow_key *key,
+ struct tcf_exts *actions,
+ u64 cookie, u32 flags)
+{
+ struct net_device *dev = tp->q->dev_queue->dev;
+ struct tc_cls_flower_offload offload = {0};
+ struct tc_to_netdev tc;
+
+ if (!tc_should_offload(dev, flags))
+ return;
+
+ offload.command = TC_CLSFLOWER_REPLACE;
+ offload.cookie = cookie;
+ offload.dissector = dissector;
+ offload.mask = mask;
+ offload.key = key;
+ offload.exts = actions;
+
+ tc.type = TC_SETUP_CLSFLOWER;
+ tc.cls_flower = &offload;
+
+ dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
+}
+
static bool fl_destroy(struct tcf_proto *tp, bool force)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
@@ -174,6 +219,7 @@ static bool fl_destroy(struct tcf_proto *tp, bool force)
return false;
list_for_each_entry_safe(f, next, &head->filters, list) {
+ fl_hw_destroy_filter(tp, (u64)f);
list_del_rcu(&f->list);
call_rcu(&f->rcu, fl_destroy_filter);
}
@@ -459,6 +505,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
struct cls_fl_filter *fnew;
struct nlattr *tb[TCA_FLOWER_MAX + 1];
struct fl_flow_mask mask = {};
+ u32 flags = 0;
int err;
if (!tca[TCA_OPTIONS])
@@ -486,6 +533,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
fnew->handle = handle;
+ if (tb[TCA_FLOWER_FLAGS])
+ flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
+
err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
if (err)
goto errout;
@@ -498,9 +548,20 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
head->ht_params);
if (err)
goto errout;
- if (fold)
+
+ fl_hw_replace_filter(tp,
+ &head->dissector,
+ &mask.key,
+ &fnew->key,
+ &fnew->exts,
+ (u64)fnew,
+ flags);
+
+ if (fold) {
rhashtable_remove_fast(&head->ht, &fold->ht_node,
head->ht_params);
+ fl_hw_destroy_filter(tp, (u64)fold);
+ }
*arg = (unsigned long) fnew;
@@ -527,6 +588,7 @@ static int fl_delete(struct tcf_proto *tp, unsigned long arg)
rhashtable_remove_fast(&head->ht, &f->ht_node,
head->ht_params);
list_del_rcu(&f->list);
+ fl_hw_destroy_filter(tp, (u64)f);
tcf_unbind_filter(tp, &f->res);
call_rcu(&f->rcu, fl_destroy_filter);
return 0;