summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_api.c102
-rw-r--r--net/sched/act_ct.c10
-rw-r--r--net/sched/act_ctinfo.c8
-rw-r--r--net/sched/act_gate.c11
-rw-r--r--net/sched/act_mpls.c18
-rw-r--r--net/sched/act_police.c6
-rw-r--r--net/sched/cls_api.c72
-rw-r--r--net/sched/sch_api.c20
-rw-r--r--net/sched/sch_cbs.c2
-rw-r--r--net/sched/sch_choke.c2
-rw-r--r--net/sched/sch_fq.c42
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_qfq.c5
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c39
-rw-r--r--net/sched/sch_taprio.c2
18 files changed, 186 insertions, 162 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index eecad65fec92..839790043256 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -504,6 +504,50 @@ nla_put_failure:
return -1;
}
+static int
+tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct nlattr *nest;
+ int err = -EINVAL;
+ u32 flags;
+
+ if (tcf_action_dump_terse(skb, a, false))
+ goto nla_put_failure;
+
+ if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
+ nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
+ a->hw_stats, TCA_ACT_HW_STATS_ANY))
+ goto nla_put_failure;
+
+ if (a->used_hw_stats_valid &&
+ nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
+ a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
+ goto nla_put_failure;
+
+ flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
+ if (flags &&
+ nla_put_bitfield32(skb, TCA_ACT_FLAGS,
+ flags, flags))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
+ goto nla_put_failure;
+
+ nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
+ err = tcf_action_dump_old(skb, a, bind, ref);
+ if (err > 0) {
+ nla_nest_end(skb, nest);
+ return err;
+ }
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -1190,51 +1234,6 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
return a->ops->dump(skb, a, bind, ref);
}
-int
-tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
-{
- int err = -EINVAL;
- unsigned char *b = skb_tail_pointer(skb);
- struct nlattr *nest;
- u32 flags;
-
- if (tcf_action_dump_terse(skb, a, false))
- goto nla_put_failure;
-
- if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
- nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
- a->hw_stats, TCA_ACT_HW_STATS_ANY))
- goto nla_put_failure;
-
- if (a->used_hw_stats_valid &&
- nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
- a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
- goto nla_put_failure;
-
- flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
- if (flags &&
- nla_put_bitfield32(skb, TCA_ACT_FLAGS,
- flags, flags))
- goto nla_put_failure;
-
- if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
- goto nla_put_failure;
-
- nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
- err = tcf_action_dump_old(skb, a, bind, ref);
- if (err > 0) {
- nla_nest_end(skb, nest);
- return err;
- }
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-EXPORT_SYMBOL(tcf_action_dump_1);
-
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
int bind, int ref, bool terse)
{
@@ -2264,13 +2263,16 @@ out_module_put:
return skb->len;
}
+static const struct rtnl_msg_handler tc_action_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWACTION, .doit = tc_ctl_action},
+ {.msgtype = RTM_DELACTION, .doit = tc_ctl_action},
+ {.msgtype = RTM_GETACTION, .doit = tc_ctl_action,
+ .dumpit = tc_dump_action},
+};
+
static int __init tc_action_init(void)
{
- rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
- 0);
-
+ rtnl_register_many(tc_action_rtnl_msg_handlers);
return 0;
}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 2197eb625658..c02f39efc6ef 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1183,9 +1183,8 @@ static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
range->min_addr.ip =
nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
- range->max_addr.ip = max_attr ?
- nla_get_in_addr(max_attr) :
- range->min_addr.ip;
+ range->max_addr.ip =
+ nla_get_in_addr_default(max_attr, range->min_addr.ip);
} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
@@ -1314,8 +1313,9 @@ static int tcf_ct_fill_params(struct net *net,
err = -EINVAL;
goto err;
}
- family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET;
- proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP;
+ family = nla_get_u8_default(tb[TCA_CT_HELPER_FAMILY], AF_INET);
+ proto = nla_get_u8_default(tb[TCA_CT_HELPER_PROTO],
+ IPPROTO_TCP);
err = nf_ct_add_helper(tmpl, name, family, proto,
p->ct_action & TCA_CT_ACT_NAT, &p->helper);
if (err) {
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 5dd41a012110..5b1241ddc758 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -197,8 +197,9 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
"dscp mask must be 6 contiguous bits");
return -EINVAL;
}
- dscpstatemask = tb[TCA_CTINFO_PARMS_DSCP_STATEMASK] ?
- nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK]) : 0;
+ dscpstatemask =
+ nla_get_u32_default(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK],
+ 0);
/* mask & statemask must not overlap */
if (dscpmask & dscpstatemask) {
NL_SET_ERR_MSG_ATTR(extack,
@@ -243,8 +244,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
}
cp_new->net = net;
- cp_new->zone = tb[TCA_CTINFO_ZONE] ?
- nla_get_u16(tb[TCA_CTINFO_ZONE]) : 0;
+ cp_new->zone = nla_get_u16_default(tb[TCA_CTINFO_ZONE], 0);
if (dscpmask) {
cp_new->dscpmask = dscpmask;
cp_new->dscpmaskshift = dscpmaskshift;
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index 1dd74125398a..91c0ec729823 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -190,15 +190,10 @@ static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
entry->interval = interval;
- if (tb[TCA_GATE_ENTRY_IPV])
- entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]);
- else
- entry->ipv = -1;
+ entry->ipv = nla_get_s32_default(tb[TCA_GATE_ENTRY_IPV], -1);
- if (tb[TCA_GATE_ENTRY_MAX_OCTETS])
- entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]);
- else
- entry->maxoctets = -1;
+ entry->maxoctets = nla_get_s32_default(tb[TCA_GATE_ENTRY_MAX_OCTETS],
+ -1);
return 0;
}
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 44a37a71ae92..9f86f4e666d3 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -288,16 +288,14 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
}
p->tcfm_action = parm->m_action;
- p->tcfm_label = tb[TCA_MPLS_LABEL] ? nla_get_u32(tb[TCA_MPLS_LABEL]) :
- ACT_MPLS_LABEL_NOT_SET;
- p->tcfm_tc = tb[TCA_MPLS_TC] ? nla_get_u8(tb[TCA_MPLS_TC]) :
- ACT_MPLS_TC_NOT_SET;
- p->tcfm_ttl = tb[TCA_MPLS_TTL] ? nla_get_u8(tb[TCA_MPLS_TTL]) :
- mpls_ttl;
- p->tcfm_bos = tb[TCA_MPLS_BOS] ? nla_get_u8(tb[TCA_MPLS_BOS]) :
- ACT_MPLS_BOS_NOT_SET;
- p->tcfm_proto = tb[TCA_MPLS_PROTO] ? nla_get_be16(tb[TCA_MPLS_PROTO]) :
- htons(ETH_P_MPLS_UC);
+ p->tcfm_label = nla_get_u32_default(tb[TCA_MPLS_LABEL],
+ ACT_MPLS_LABEL_NOT_SET);
+ p->tcfm_tc = nla_get_u8_default(tb[TCA_MPLS_TC], ACT_MPLS_TC_NOT_SET);
+ p->tcfm_ttl = nla_get_u8_default(tb[TCA_MPLS_TTL], mpls_ttl);
+ p->tcfm_bos = nla_get_u8_default(tb[TCA_MPLS_BOS],
+ ACT_MPLS_BOS_NOT_SET);
+ p->tcfm_proto = nla_get_be16_default(tb[TCA_MPLS_PROTO],
+ htons(ETH_P_MPLS_UC));
spin_lock_bh(&m->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 8555125ed34d..a214ed681142 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -167,8 +167,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
if (R_tab) {
new->rate_present = true;
- rate64 = tb[TCA_POLICE_RATE64] ?
- nla_get_u64(tb[TCA_POLICE_RATE64]) : 0;
+ rate64 = nla_get_u64_default(tb[TCA_POLICE_RATE64], 0);
psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64);
qdisc_put_rtab(R_tab);
} else {
@@ -176,8 +175,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
if (P_tab) {
new->peak_present = true;
- prate64 = tb[TCA_POLICE_PEAKRATE64] ?
- nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0;
+ prate64 = nla_get_u64_default(tb[TCA_POLICE_PEAKRATE64], 0);
psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64);
qdisc_put_rtab(P_tab);
} else {
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index bbc778c233c8..7578e27260c9 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1933,7 +1933,8 @@ static void tcf_chain_tp_remove(struct tcf_chain *chain,
static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
u32 protocol, u32 prio,
- bool prio_allocate);
+ bool prio_allocate,
+ struct netlink_ext_ack *extack);
/* Try to insert new proto.
* If proto with specified priority already exists, free new proto
@@ -1957,8 +1958,7 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
return ERR_PTR(-EAGAIN);
}
- tp = tcf_chain_tp_find(chain, &chain_info,
- protocol, prio, false);
+ tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, NULL);
if (!tp)
err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
mutex_unlock(&chain->filter_chain_lock);
@@ -2018,7 +2018,8 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
u32 protocol, u32 prio,
- bool prio_allocate)
+ bool prio_allocate,
+ struct netlink_ext_ack *extack)
{
struct tcf_proto **pprev;
struct tcf_proto *tp;
@@ -2029,9 +2030,14 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
pprev = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
- if (prio_allocate ||
- (tp->protocol != protocol && protocol))
+ if (prio_allocate) {
+ NL_SET_ERR_MSG(extack, "Lowest ID from auto-alloc range already in use");
+ return ERR_PTR(-ENOSPC);
+ }
+ if (tp->protocol != protocol && protocol) {
+ NL_SET_ERR_MSG(extack, "Protocol mismatch for filter with specified priority");
return ERR_PTR(-EINVAL);
+ }
} else {
tp = NULL;
}
@@ -2297,7 +2303,7 @@ replay:
}
block->classid = parent;
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -2312,9 +2318,8 @@ replay:
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
- prio, prio_allocate);
+ prio, prio_allocate, extack);
if (IS_ERR(tp)) {
- NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
err = PTR_ERR(tp);
goto errout_locked;
}
@@ -2509,7 +2514,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
goto errout;
}
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -2539,10 +2544,13 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
- prio, false);
- if (!tp || IS_ERR(tp)) {
+ prio, false, extack);
+ if (!tp) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
- err = tp ? PTR_ERR(tp) : -ENOENT;
+ goto errout_locked;
+ } else if (IS_ERR(tp)) {
+ err = PTR_ERR(tp);
goto errout_locked;
} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
@@ -2664,7 +2672,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
goto errout;
}
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -2679,11 +2687,14 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
- prio, false);
+ prio, false, extack);
mutex_unlock(&chain->filter_chain_lock);
- if (!tp || IS_ERR(tp)) {
+ if (!tp) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
- err = tp ? PTR_ERR(tp) : -ENOENT;
+ goto errout;
+ } else if (IS_ERR(tp)) {
+ err = PTR_ERR(tp);
goto errout;
} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
@@ -3104,7 +3115,7 @@ replay:
if (IS_ERR(block))
return PTR_ERR(block);
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -4056,6 +4067,19 @@ static struct pernet_operations tcf_net_ops = {
.size = sizeof(struct tcf_net),
};
+static const struct rtnl_msg_handler tc_filter_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWTFILTER, .doit = tc_new_tfilter,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_DELTFILTER, .doit = tc_del_tfilter,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_GETTFILTER, .doit = tc_get_tfilter,
+ .dumpit = tc_dump_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_NEWCHAIN, .doit = tc_ctl_chain},
+ {.msgtype = RTM_DELCHAIN, .doit = tc_ctl_chain},
+ {.msgtype = RTM_GETCHAIN, .doit = tc_ctl_chain,
+ .dumpit = tc_dump_chain},
+};
+
static int __init tc_filter_init(void)
{
int err;
@@ -4069,17 +4093,7 @@ static int __init tc_filter_init(void)
goto err_register_pernet_subsys;
xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
-
- rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
- RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
- RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
- tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
- tc_dump_chain, 0);
+ rtnl_register_many(tc_filter_rtnl_msg_handlers);
return 0;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a1d27bc039a3..300430b8c4d2 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -2420,6 +2420,17 @@ static struct pernet_operations psched_net_ops = {
DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
#endif
+static const struct rtnl_msg_handler psched_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWQDISC, .doit = tc_modify_qdisc},
+ {.msgtype = RTM_DELQDISC, .doit = tc_get_qdisc},
+ {.msgtype = RTM_GETQDISC, .doit = tc_get_qdisc,
+ .dumpit = tc_dump_qdisc},
+ {.msgtype = RTM_NEWTCLASS, .doit = tc_ctl_tclass},
+ {.msgtype = RTM_DELTCLASS, .doit = tc_ctl_tclass},
+ {.msgtype = RTM_GETTCLASS, .doit = tc_ctl_tclass,
+ .dumpit = tc_dump_tclass},
+};
+
static int __init pktsched_init(void)
{
int err;
@@ -2438,14 +2449,7 @@ static int __init pktsched_init(void)
register_qdisc(&mq_qdisc_ops);
register_qdisc(&noqueue_qdisc_ops);
- rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
- 0);
- rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
- 0);
+ rtnl_register_many(psched_rtnl_msg_handlers);
tc_wrapper_init();
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 939425da1895..8c9a0400c862 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -310,7 +310,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
{
struct ethtool_link_ksettings ecmd;
int speed = SPEED_10;
- int port_rate;
+ s64 port_rate;
int err;
err = __ethtool_get_link_ksettings(dev, &ecmd);
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 91072010923d..1e940ad0d2fa 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -356,7 +356,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
tb[TCA_CHOKE_STAB] == NULL)
return -EINVAL;
- max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
+ max_P = nla_get_u32_default(tb[TCA_CHOKE_MAX_P], 0);
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
stab = nla_data(tb[TCA_CHOKE_STAB]);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 19a49af5a9e5..a5e87f9ea986 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -111,6 +111,7 @@ struct fq_perband_flows {
struct fq_sched_data {
/* Read mostly cache line */
+ u64 offload_horizon;
u32 quantum;
u32 initial_quantum;
u32 flow_refill_delay;
@@ -299,7 +300,7 @@ static void fq_gc(struct fq_sched_data *q,
}
/* Fast path can be used if :
- * 1) Packet tstamp is in the past.
+ * 1) Packet tstamp is in the past, or within the pacing offload horizon.
* 2) FQ qlen == 0 OR
* (no flow is currently eligible for transmit,
* AND fast path queue has less than 8 packets)
@@ -314,7 +315,7 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
const struct fq_sched_data *q = qdisc_priv(sch);
const struct sock *sk;
- if (fq_skb_cb(skb)->time_to_send > now)
+ if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon)
return false;
if (sch->q.qlen != 0) {
@@ -331,6 +332,12 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
*/
if (q->internal.qlen >= 8)
return false;
+
+ /* Ordering invariants fall apart if some delayed flows
+ * are ready but we haven't serviced them, yet.
+ */
+ if (q->time_next_delayed_flow <= now + q->offload_horizon)
+ return false;
}
sk = skb->sk;
@@ -361,8 +368,9 @@ static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
* 3) We do not want to rate limit them (eg SYNFLOOD attack),
* especially if the listener set SO_MAX_PACING_RATE
* 4) We pretend they are orphaned
+ * TCP can also associate TIME_WAIT sockets with RST or ACK packets.
*/
- if (!sk || sk_listener(sk)) {
+ if (!sk || sk_listener_or_tw(sk)) {
unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
/* By forcing low order bit to 1, we make sure to not
@@ -595,15 +603,18 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
unsigned long sample;
struct rb_node *p;
- if (q->time_next_delayed_flow > now)
+ if (q->time_next_delayed_flow > now + q->offload_horizon)
return;
/* Update unthrottle latency EWMA.
* This is cheap and can help diagnosing timer/latency problems.
*/
sample = (unsigned long)(now - q->time_next_delayed_flow);
- q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
- q->unthrottle_latency_ns += sample >> 3;
+ if ((long)sample > 0) {
+ q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
+ q->unthrottle_latency_ns += sample >> 3;
+ }
+ now += q->offload_horizon;
q->time_next_delayed_flow = ~0ULL;
while ((p = rb_first(&q->delayed)) != NULL) {
@@ -687,7 +698,7 @@ begin:
u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
f->time_next_packet);
- if (now < time_next_packet) {
+ if (now + q->offload_horizon < time_next_packet) {
head->first = f->next;
f->time_next_packet = time_next_packet;
fq_flow_set_throttled(q, f);
@@ -925,6 +936,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 },
[TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)),
[TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)),
+ [TCA_FQ_OFFLOAD_HORIZON] = { .type = NLA_U32 },
};
/* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
@@ -1100,6 +1112,17 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
WRITE_ONCE(q->horizon_drop,
nla_get_u8(tb[TCA_FQ_HORIZON_DROP]));
+ if (tb[TCA_FQ_OFFLOAD_HORIZON]) {
+ u64 offload_horizon = (u64)NSEC_PER_USEC *
+ nla_get_u32(tb[TCA_FQ_OFFLOAD_HORIZON]);
+
+ if (offload_horizon <= qdisc_dev(sch)->max_pacing_offload_horizon) {
+ WRITE_ONCE(q->offload_horizon, offload_horizon);
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "invalid offload_horizon");
+ err = -EINVAL;
+ }
+ }
if (!err) {
sch_tree_unlock(sch);
@@ -1183,6 +1206,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
.bands = FQ_BANDS,
};
struct nlattr *opts;
+ u64 offload_horizon;
u64 ce_threshold;
s32 weights[3];
u64 horizon;
@@ -1199,6 +1223,9 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
horizon = READ_ONCE(q->horizon);
do_div(horizon, NSEC_PER_USEC);
+ offload_horizon = READ_ONCE(q->offload_horizon);
+ do_div(offload_horizon, NSEC_PER_USEC);
+
if (nla_put_u32(skb, TCA_FQ_PLIMIT,
READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT,
@@ -1224,6 +1251,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_FQ_TIMER_SLACK,
READ_ONCE(q->timer_slack)) ||
nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) ||
+ nla_put_u32(skb, TCA_FQ_OFFLOAD_HORIZON, (u32)offload_horizon) ||
nla_put_u8(skb, TCA_FQ_HORIZON_DROP,
READ_ONCE(q->horizon_drop)))
goto nla_put_failure;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 79ba9dc70254..7d2151c62c4a 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -668,7 +668,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
return -EINVAL;
}
- max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
+ max_P = nla_get_u32_default(tb[TCA_GRED_MAX_P], 0);
ctl = nla_data(tb[TCA_GRED_PARMS]);
stab = nla_data(tb[TCA_GRED_STAB]);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index ff3de37874e4..c31bc5489bdd 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1810,8 +1810,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
NULL));
- rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
- ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
+ rate64 = nla_get_u64_default(tb[TCA_HTB_RATE64], 0);
+ ceil64 = nla_get_u64_default(tb[TCA_HTB_CEIL64], 0);
if (!cl) { /* new class */
struct net_device *dev = qdisc_dev(sch);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 39382ee1e331..fe6fed291a7b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
+#include <linux/prandom.h>
#include <linux/rtnetlink.h>
#include <linux/reciprocal_div.h>
#include <linux/rbtree.h>
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index d584c0c25899..6a07cdbdb9e1 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -421,10 +421,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (err < 0)
return err;
- if (tb[TCA_QFQ_WEIGHT])
- weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
- else
- weight = 1;
+ weight = nla_get_u32_default(tb[TCA_QFQ_WEIGHT], 1);
if (tb[TCA_QFQ_LMAX]) {
lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index b5f096588fae..6029bc29b51e 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -248,7 +248,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
tb[TCA_RED_STAB] == NULL)
return -EINVAL;
- max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+ max_P = nla_get_u32_default(tb[TCA_RED_MAX_P], 0);
ctl = nla_data(tb[TCA_RED_PARMS]);
stab = nla_data(tb[TCA_RED_STAB]);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3b9245a3c767..a4b8296a2fa1 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -77,12 +77,6 @@
#define SFQ_EMPTY_SLOT 0xffff
#define SFQ_DEFAULT_HASH_DIVISOR 1024
-/* We use 16 bits to store allot, and want to handle packets up to 64K
- * Scale allot by 8 (1<<3) so that no overflow occurs.
- */
-#define SFQ_ALLOT_SHIFT 3
-#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
-
/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
typedef u16 sfq_index;
@@ -104,7 +98,7 @@ struct sfq_slot {
sfq_index next; /* next slot in sfq RR chain */
struct sfq_head dep; /* anchor in dep[] chains */
unsigned short hash; /* hash value (index in ht[]) */
- short allot; /* credit for this slot */
+ int allot; /* credit for this slot */
unsigned int backlog;
struct red_vars vars;
@@ -120,7 +114,6 @@ struct sfq_sched_data {
siphash_key_t perturbation;
u8 cur_depth; /* depth of longest slot */
u8 flags;
- unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct tcf_proto __rcu *filter_list;
struct tcf_block *block;
sfq_index *ht; /* Hash table ('divisor' slots) */
@@ -456,7 +449,7 @@ enqueue:
*/
q->tail = slot;
/* We could use a bigger initial quantum for new flows */
- slot->allot = q->scaled_quantum;
+ slot->allot = q->quantum;
}
if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
@@ -493,7 +486,7 @@ next_slot:
slot = &q->slots[a];
if (slot->allot <= 0) {
q->tail = slot;
- slot->allot += q->scaled_quantum;
+ slot->allot += q->quantum;
goto next_slot;
}
skb = slot_dequeue_head(slot);
@@ -512,7 +505,7 @@ next_slot:
}
q->tail->next = next_a;
} else {
- slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
+ slot->allot -= qdisc_pkt_len(skb);
}
return skb;
}
@@ -595,7 +588,7 @@ drop:
q->tail->next = x;
}
q->tail = slot;
- slot->allot = q->scaled_quantum;
+ slot->allot = q->quantum;
}
}
sch->q.qlen -= dropped;
@@ -628,7 +621,8 @@ static void sfq_perturbation(struct timer_list *t)
rcu_read_unlock();
}
-static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = nla_data(opt);
@@ -646,14 +640,10 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
return -EINVAL;
- /* slot->allot is a short, make sure quantum is not too big. */
- if (ctl->quantum) {
- unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
-
- if (scaled <= 0 || scaled > SHRT_MAX)
- return -EINVAL;
+ if ((int)ctl->quantum < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
+ return -EINVAL;
}
-
if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
return -EINVAL;
@@ -663,10 +653,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
return -ENOMEM;
}
sch_tree_lock(sch);
- if (ctl->quantum) {
+ if (ctl->quantum)
q->quantum = ctl->quantum;
- q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
- }
WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ);
if (ctl->flows)
q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
@@ -762,12 +750,11 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
q->maxflows = SFQ_DEFAULT_FLOWS;
q->quantum = psched_mtu(qdisc_dev(sch));
- q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = 0;
get_random_bytes(&q->perturbation, sizeof(q->perturbation));
if (opt) {
- int err = sfq_change(sch, opt);
+ int err = sfq_change(sch, opt, extack);
if (err)
return err;
}
@@ -878,7 +865,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
if (idx != SFQ_EMPTY_SLOT) {
const struct sfq_slot *slot = &q->slots[idx];
- xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
+ xstats.allot = slot->allot;
qs.qlen = slot->qlen;
qs.backlog = slot->backlog;
}
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 8623dc0bafc0..a68e17891b0b 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1828,7 +1828,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
* zero; (2) the 'flags' of a "running" taprio instance cannot be
* changed.
*/
- taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]) : 0;
+ taprio_flags = nla_get_u32_default(tb[TCA_TAPRIO_ATTR_FLAGS], 0);
/* txtime-assist and full offload are mutually exclusive */
if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&