diff options
45 files changed, 264 insertions, 120 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index eb8776f2f061..e55a4d47324c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3833,6 +3833,7 @@ F: kernel/bpf/dispatcher.c F: kernel/bpf/trampoline.c F: include/linux/bpf* F: include/linux/filter.h +F: include/linux/tnum.h BPF [BTF] M: Martin KaFai Lau <martin.lau@linux.dev> diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h index 7ea10db20e3a..48133d022812 100644 --- a/drivers/isdn/mISDN/l1oip.h +++ b/drivers/isdn/mISDN/l1oip.h @@ -59,6 +59,7 @@ struct l1oip { int bundle; /* bundle channels in one frm */ int codec; /* codec to use for transmis. */ int limit; /* limit number of bchannels */ + bool shutdown; /* if card is released */ /* timer */ struct timer_list keep_tl; diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 2c40412466e6..a77195e378b7 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -275,7 +275,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, p = frame; /* restart timer */ - if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ)) + if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown) mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ); else hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ; @@ -601,7 +601,9 @@ multiframe: goto multiframe; /* restart timer */ - if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) { + if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || + !hc->timeout_on) && + !hc->shutdown) { hc->timeout_on = 1; mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ); } else /* only adjust timer */ @@ -1232,11 +1234,10 @@ release_card(struct l1oip *hc) { int ch; - if (timer_pending(&hc->keep_tl)) - del_timer(&hc->keep_tl); + hc->shutdown = true; - if (timer_pending(&hc->timeout_tl)) - del_timer(&hc->timeout_tl); + del_timer_sync(&hc->keep_tl); + del_timer_sync(&hc->timeout_tl); cancel_work_sync(&hc->workq); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 80bb8424e9a4..d30d11872719 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1912,11 +1912,14 @@ static int alx_suspend(struct device *dev) if (!netif_running(alx->dev)) return 0; + + rtnl_lock(); netif_device_detach(alx->dev); mutex_lock(&alx->mtx); __alx_stop(alx); mutex_unlock(&alx->mtx); + rtnl_unlock(); return 0; } @@ -1927,6 +1930,7 @@ static int alx_resume(struct device *dev) struct alx_hw *hw = &alx->hw; int err; + rtnl_lock(); mutex_lock(&alx->mtx); alx_reset_phy(hw); @@ -1943,6 +1947,7 @@ static int alx_resume(struct device *dev) unlock: mutex_unlock(&alx->mtx); + rtnl_unlock(); return err; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b6eb3ce976f0..16c490692f42 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -787,6 +787,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", pad, len, fp->rx_buf_size); bnx2x_panic(); + bnx2x_frag_free(fp, new_data); return; } #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index ad73a488fc5f..11e603686a27 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1530,6 +1530,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset); void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); +void mvpp2_dbgfs_exit(void); void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index 4a3baa7e0142..75e83ea2a926 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent, return 0; } +static struct dentry *mvpp2_root; + +void mvpp2_dbgfs_exit(void) +{ + debugfs_remove(mvpp2_root); +} + void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) { debugfs_remove_recursive(priv->dbgfs_dir); @@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) { - struct dentry *mvpp2_dir, *mvpp2_root; + struct dentry *mvpp2_dir; int ret, i; - mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); if (!mvpp2_root) mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index daa890d6993e..eb0fb8128096 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7704,7 +7704,18 @@ static struct platform_driver mvpp2_driver = { }, }; -module_platform_driver(mvpp2_driver); +static int __init mvpp2_driver_init(void) +{ + return platform_driver_register(&mvpp2_driver); +} +module_init(mvpp2_driver_init); + +static void __exit mvpp2_driver_exit(void) +{ + platform_driver_unregister(&mvpp2_driver); + mvpp2_dbgfs_exit(); +} +module_exit(mvpp2_driver_exit); MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c index 0fa7541f0d7e..cba89fda504b 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -185,10 +185,14 @@ err_rhashtable_init: return ERR_PTR(err); } -void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, - void *keymask) +int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask) { ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL); + if (!ruleset->keymask) + return -ENOMEM; + + return 0; } int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset) diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h index d45ee88e8287..a35cc0609a1d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h @@ -185,8 +185,8 @@ struct prestera_acl_ruleset * prestera_acl_ruleset_lookup(struct prestera_acl *acl, struct prestera_flow_block *block, u32 chain_index); -void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, - void *keymask); +int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask); bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset); int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset); void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index f38e8b3a543e..91a478b75cbf 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -500,7 +500,9 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block, } /* preserve keymask/template to this ruleset */ - prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); + err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); + if (err) + goto err_ruleset_keymask_set; /* skip error, as it is not possible to reject template operation, * so, keep the reference to the ruleset for rules to be added @@ -516,6 +518,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block, list_add_rcu(&template->list, &block->template_list); return 0; +err_ruleset_keymask_set: + prestera_acl_ruleset_put(ruleset); err_ruleset_get: kfree(template); err_malloc: diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 887f430734f7..ae00e572390d 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -442,7 +442,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); hwe->ib1 &= ~MTK_FOE_IB1_STATE; - hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_UNBIND); + hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); dma_wmb(); } entry->hash = 0xffff; diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 6a11e2ceb013..da3ea905adbb 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -1049,6 +1049,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp, enum ptp_pin_function func, unsigned int chan) { + struct lan743x_ptp *lan_ptp = + container_of(ptp, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(lan_ptp, struct lan743x_adapter, ptp); int result = 0; /* Confirm the requested function is supported. Parameter @@ -1057,7 +1061,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp, switch (func) { case PTP_PF_NONE: case PTP_PF_PEROUT: + break; case PTP_PF_EXTTS: + if (!adapter->is_pci11x1x) + result = -1; break; case PTP_PF_PHYSYNC: default: diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 8b42cad0e49c..7a83222caa73 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -305,7 +305,7 @@ struct frame_info { void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp); void sparx5_ifh_parse(u32 *ifh, struct frame_info *info); irqreturn_t sparx5_xtr_handler(int irq, void *_priv); -int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); int sparx5_manual_injection_mode(struct sparx5 *sparx5); void sparx5_port_inj_timer_setup(struct sparx5_port *port); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 21844beba72d..83c16ca5b30f 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -222,13 +222,13 @@ static int sparx5_inject(struct sparx5 *sparx5, return NETDEV_TX_OK; } -int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct sparx5_port *port = netdev_priv(dev); struct sparx5 *sparx5 = port->sparx5; u32 ifh[IFH_LEN]; - int ret; + netdev_tx_t ret; memset(ifh, 0, IFH_LEN * 4); sparx5_set_port_ifh(ifh, port->portno); diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index b89d72242002..9be585237277 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -249,8 +249,8 @@ static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node * /* Check if mac address is valid */ if (!is_valid_ether_addr(mac)) { - kfree(mac); dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac); + kfree(mac); return -EINVAL; } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index a51d8ded60f3..a481a1d831e2 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1875,7 +1875,9 @@ static void intr_callback(struct urb *urb) "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: - netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); + if (net_ratelimit()) + netif_info(tp, intr, tp->netdev, + "intr status -EOVERFLOW\n"); goto resubmit; /* -EPIPE: should clear the halt */ default: diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index 03757ad21d51..2f1f8b5d5b59 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -323,15 +323,16 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev) ipc_wwan->dev = dev; ipc_wwan->ipc_imem = ipc_imem; + mutex_init(&ipc_wwan->if_mutex); + /* WWAN core will create a netdev for the default IP MUX channel */ if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan, IP_MUX_SESSION_DEFAULT)) { + mutex_destroy(&ipc_wwan->if_mutex); kfree(ipc_wwan); return NULL; } - mutex_init(&ipc_wwan->if_mutex); - return ipc_wwan; } diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 368330417bde..5703775af129 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -393,7 +393,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, return NULL; } - pkt->buf = kmalloc(pkt->len, GFP_KERNEL); + pkt->buf = kvmalloc(pkt->len, GFP_KERNEL); if (!pkt->buf) { kfree(pkt); return NULL; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 3bdf687e2fb3..41b1da621a45 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -295,7 +295,7 @@ struct tcp_sock { u32 packets_out; /* Packets which are "in flight" */ u32 retrans_out; /* Retransmitted packets out */ u32 max_packets_out; /* max packets_out in last window */ - u32 max_packets_seq; /* right edge of max_packets_out flight */ + u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */ u16 urg_data; /* Saved octet of OOB data and control flags */ u8 ecn_flags; /* ECN status bits. */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 4f71cc15ff8e..14d45661a84d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1302,11 +1302,14 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); + if (tp->is_cwnd_limited) + return true; + /* If in slow start, ensure cwnd grows to twice what was ACKed. */ if (tcp_in_slow_start(tp)) return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out; - return tp->is_cwnd_limited; + return false; } /* BBR congestion control needs pacing. diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 647722e847b4..f787c3f524b0 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -95,7 +95,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, struct xdp_umem *umem); int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, u16 queue_id, u16 flags); -int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, +int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, struct net_device *dev, u16 queue_id); int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); void xp_destroy(struct xsk_buff_pool *pool); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 3075018a4ef8..51b9aa640ad2 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1259,7 +1259,7 @@ enum { /* Query effective (directly attached + inherited from ancestor cgroups) * programs that will be executed for events within a cgroup. - * attach_flags with this flag are returned only for directly attached programs. + * attach_flags with this flag are always returned 0. */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) @@ -1458,7 +1458,10 @@ union bpf_attr { __u32 attach_flags; __aligned_u64 prog_ids; __u32 prog_cnt; - __aligned_u64 prog_attach_flags; /* output: per-program attach_flags */ + /* output: per-program attach_flags. + * not allowed to be set during effective query. + */ + __aligned_u64 prog_attach_flags; } query; struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a44ad4b347ff..eba603cec2c5 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3128,7 +3128,7 @@ static int btf_struct_resolve(struct btf_verifier_env *env, if (v->next_member) { const struct btf_type *last_member_type; const struct btf_member *last_member; - u16 last_member_type_id; + u32 last_member_type_id; last_member = btf_type_member(v->t) + v->next_member - 1; last_member_type_id = last_member->type; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 00c7f864900e..bf2fdb33fb31 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1020,6 +1020,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr) { __u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags); + bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE; __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); enum bpf_attach_type type = attr->query.attach_type; enum cgroup_bpf_attach_type from_atype, to_atype; @@ -1029,8 +1030,12 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, int total_cnt = 0; u32 flags; + if (effective_query && prog_attach_flags) + return -EINVAL; + if (type == BPF_LSM_CGROUP) { - if (attr->query.prog_cnt && prog_ids && !prog_attach_flags) + if (!effective_query && attr->query.prog_cnt && + prog_ids && !prog_attach_flags) return -EINVAL; from_atype = CGROUP_LSM_START; @@ -1045,7 +1050,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, } for (atype = from_atype; atype <= to_atype; atype++) { - if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { + if (effective_query) { effective = rcu_dereference_protected(cgrp->bpf.effective[atype], lockdep_is_held(&cgroup_mutex)); total_cnt += bpf_prog_array_length(effective); @@ -1054,6 +1059,8 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, } } + /* always output uattr->query.attach_flags as 0 during effective query */ + flags = effective_query ? 0 : flags; if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) return -EFAULT; if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt))) @@ -1068,7 +1075,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, } for (atype = from_atype; atype <= to_atype && total_cnt; atype++) { - if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { + if (effective_query) { effective = rcu_dereference_protected(cgrp->bpf.effective[atype], lockdep_is_held(&cgroup_mutex)); cnt = min_t(int, bpf_prog_array_length(effective), total_cnt); @@ -1090,15 +1097,16 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, if (++i == cnt) break; } - } - if (prog_attach_flags) { - flags = cgrp->bpf.flags[atype]; + if (prog_attach_flags) { + flags = cgrp->bpf.flags[atype]; - for (i = 0; i < cnt; i++) - if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags))) - return -EFAULT; - prog_attach_flags += cnt; + for (i = 0; i < cnt; i++) + if (copy_to_user(prog_attach_flags + i, + &flags, sizeof(flags))) + return -EFAULT; + prog_attach_flags += cnt; + } } prog_ids += cnt; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index b069517a3da0..a6b04faed282 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1609,26 +1609,12 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_ringbuf_discard_proto; case BPF_FUNC_ringbuf_query: return &bpf_ringbuf_query_proto; - case BPF_FUNC_ringbuf_reserve_dynptr: - return &bpf_ringbuf_reserve_dynptr_proto; - case BPF_FUNC_ringbuf_submit_dynptr: - return &bpf_ringbuf_submit_dynptr_proto; - case BPF_FUNC_ringbuf_discard_dynptr: - return &bpf_ringbuf_discard_dynptr_proto; case BPF_FUNC_strncmp: return &bpf_strncmp_proto; case BPF_FUNC_strtol: return &bpf_strtol_proto; case BPF_FUNC_strtoul: return &bpf_strtoul_proto; - case BPF_FUNC_dynptr_from_mem: - return &bpf_dynptr_from_mem_proto; - case BPF_FUNC_dynptr_read: - return &bpf_dynptr_read_proto; - case BPF_FUNC_dynptr_write: - return &bpf_dynptr_write_proto; - case BPF_FUNC_dynptr_data: - return &bpf_dynptr_data_proto; default: break; } @@ -1663,6 +1649,20 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_loop_proto; case BPF_FUNC_user_ringbuf_drain: return &bpf_user_ringbuf_drain_proto; + case BPF_FUNC_ringbuf_reserve_dynptr: + return &bpf_ringbuf_reserve_dynptr_proto; + case BPF_FUNC_ringbuf_submit_dynptr: + return &bpf_ringbuf_submit_dynptr_proto; + case BPF_FUNC_ringbuf_discard_dynptr: + return &bpf_ringbuf_discard_dynptr_proto; + case BPF_FUNC_dynptr_from_mem: + return &bpf_dynptr_from_mem_proto; + case BPF_FUNC_dynptr_read: + return &bpf_dynptr_read_proto; + case BPF_FUNC_dynptr_write: + return &bpf_dynptr_write_proto; + case BPF_FUNC_dynptr_data: + return &bpf_dynptr_data_proto; default: break; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 372fad5ef3d3..7b373a5e861f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -4405,7 +4405,9 @@ static int bpf_task_fd_query(const union bpf_attr *attr, if (attr->task_fd_query.flags != 0) return -EINVAL; + rcu_read_lock(); task = get_pid_task(find_vpid(pid), PIDTYPE_PID); + rcu_read_unlock(); if (!task) return -ENOENT; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 7889e1ef7fad..cbd0e2ac4ffe 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -251,6 +251,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) return -EOPNOTSUPP; } + if (!size) + return -EINVAL; + lock_sock(sk); if (!sk->sk_bound_dev_if) dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 935026f4c807..170152772d33 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -110,7 +110,10 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) { - return skb_eth_gso_segment(skb, features, htons(ETH_P_IP)); + __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6) + : htons(ETH_P_IP); + + return skb_eth_gso_segment(skb, features, type); } static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 648b5c54bb32..0c51abeee172 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3136,6 +3136,8 @@ int tcp_disconnect(struct sock *sk, int flags) tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tcp_snd_cwnd_set(tp, TCP_INIT_CWND); tp->snd_cwnd_cnt = 0; + tp->is_cwnd_limited = 0; + tp->max_packets_out = 0; tp->window_clamp = 0; tp->delivered = 0; tp->delivered_ce = 0; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 290019de766d..c69f4d966024 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1875,15 +1875,20 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; struct tcp_sock *tp = tcp_sk(sk); - /* Track the maximum number of outstanding packets in each - * window, and remember whether we were cwnd-limited then. + /* Track the strongest available signal of the degree to which the cwnd + * is fully utilized. If cwnd-limited then remember that fact for the + * current window. If not cwnd-limited then track the maximum number of + * outstanding packets in the current window. (If cwnd-limited then we + * chose to not update tp->max_packets_out to avoid an extra else + * clause with no functional impact.) */ - if (!before(tp->snd_una, tp->max_packets_seq) || - tp->packets_out > tp->max_packets_out || - is_cwnd_limited) { - tp->max_packets_out = tp->packets_out; - tp->max_packets_seq = tp->snd_nxt; + if (!before(tp->snd_una, tp->cwnd_usage_seq) || + is_cwnd_limited || + (!tp->is_cwnd_limited && + tp->packets_out > tp->max_packets_out)) { tp->is_cwnd_limited = is_cwnd_limited; + tp->max_packets_out = tp->packets_out; + tp->cwnd_usage_seq = tp->snd_nxt; } if (tcp_is_cwnd_limited(sk)) { diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 3a293838a91d..79d43548279c 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -145,7 +145,10 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) { - return skb_eth_gso_segment(skb, features, htons(ETH_P_IPV6)); + __be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP) + : htons(ETH_P_IPV6); + + return skb_eth_gso_segment(skb, features, type); } static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x, diff --git a/net/rds/tcp.c b/net/rds/tcp.c index d8754366506a..4444fd82b66d 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -166,10 +166,10 @@ void rds_tcp_reset_callbacks(struct socket *sock, */ atomic_set(&cp->cp_state, RDS_CONN_RESETTING); wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags)); - lock_sock(osock->sk); /* reset receive side state for rds_tcp_data_recv() for osock */ cancel_delayed_work_sync(&cp->cp_send_w); cancel_delayed_work_sync(&cp->cp_recv_w); + lock_sock(osock->sk); if (tc->t_tinc) { rds_inc_put(&tc->t_tinc->ti_inc); tc->t_tinc = NULL; diff --git a/net/sctp/auth.c b/net/sctp/auth.c index db6b7373d16c..34964145514e 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -863,12 +863,17 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, } list_del_init(&shkey->key_list); - sctp_auth_shkey_release(shkey); list_add(&cur_key->key_list, sh_keys); - if (asoc && asoc->active_key_id == auth_key->sca_keynumber) - sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + if (asoc && asoc->active_key_id == auth_key->sca_keynumber && + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) { + list_del_init(&cur_key->key_list); + sctp_auth_shkey_release(cur_key); + list_add(&shkey->key_list, sh_keys); + return -ENOMEM; + } + sctp_auth_shkey_release(shkey); return 0; } @@ -902,8 +907,13 @@ int sctp_auth_set_active_key(struct sctp_endpoint *ep, return -EINVAL; if (asoc) { + __u16 active_key_id = asoc->active_key_id; + asoc->active_key_id = key_id; - sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + if (sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) { + asoc->active_key_id = active_key_id; + return -ENOMEM; + } } else ep->active_key_id = key_id; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c955c7253d4b..0f08c3177872 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -569,12 +569,6 @@ static void unix_sock_destructor(struct sock *sk) skb_queue_purge(&sk->sk_receive_queue); -#if IS_ENABLED(CONFIG_AF_UNIX_OOB) - if (u->oob_skb) { - kfree_skb(u->oob_skb); - u->oob_skb = NULL; - } -#endif DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc)); DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk)); DEBUG_NET_WARN_ON_ONCE(sk->sk_socket); @@ -620,6 +614,13 @@ static void unix_release_sock(struct sock *sk, int embrion) unix_state_unlock(sk); +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (u->oob_skb) { + kfree_skb(u->oob_skb); + u->oob_skb = NULL; + } +#endif + wake_up_interruptible_all(&u->peer_wait); if (skpair != NULL) { diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 35863132f4f1..a9980e9b9304 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -1339,7 +1339,7 @@ EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt); void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt) { - kfree(pkt->buf); + kvfree(pkt->buf); kfree(pkt); } EXPORT_SYMBOL_GPL(virtio_transport_free_pkt); diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 5b4ce6ba1bc7..9f0561b67c12 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -355,16 +355,15 @@ static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entr return nb_pkts; } -u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries) +u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts) { struct xdp_sock *xs; - u32 nb_pkts; rcu_read_lock(); if (!list_is_singular(&pool->xsk_tx_list)) { /* Fallback to the non-batched version */ rcu_read_unlock(); - return xsk_tx_peek_release_fallback(pool, max_entries); + return xsk_tx_peek_release_fallback(pool, nb_pkts); } xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); @@ -373,12 +372,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries) goto out; } - max_entries = xskq_cons_nb_entries(xs->tx, max_entries); - nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, max_entries); - if (!nb_pkts) { - xs->tx->queue_empty_descs++; - goto out; - } + nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts); /* This is the backpressure mechanism for the Tx path. Try to * reserve space in the completion queue for all packets, but @@ -386,12 +380,18 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries) * packets. This avoids having to implement any buffering in * the Tx path. */ - nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, pool->tx_descs, nb_pkts); + nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts); if (!nb_pkts) goto out; - xskq_cons_release_n(xs->tx, max_entries); + nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts); + if (!nb_pkts) { + xs->tx->queue_empty_descs++; + goto out; + } + __xskq_cons_release(xs->tx); + xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts); xs->sk.sk_write_space(&xs->sk); out: @@ -954,8 +954,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) goto out_unlock; } - err = xp_assign_dev_shared(xs->pool, umem_xs->umem, - dev, qid); + err = xp_assign_dev_shared(xs->pool, umem_xs, dev, + qid); if (err) { xp_destroy(xs->pool); xs->pool = NULL; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index a71a8c6edf55..ed6c71826d31 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -212,17 +212,18 @@ err_unreg_pool: return err; } -int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, +int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, struct net_device *dev, u16 queue_id) { u16 flags; + struct xdp_umem *umem = umem_xs->umem; /* One fill and completion ring required for each queue id. */ if (!pool->fq || !pool->cq) return -EINVAL; flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; - if (pool->uses_need_wakeup) + if (umem_xs->pool->uses_need_wakeup) flags |= XDP_USE_NEED_WAKEUP; return xp_assign_dev(pool, dev, queue_id, flags); diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index fb20bf7207cf..c6fb6b763658 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -205,6 +205,11 @@ static inline bool xskq_cons_read_desc(struct xsk_queue *q, return false; } +static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) +{ + q->cached_cons += cnt; +} + static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool, u32 max) { @@ -226,6 +231,8 @@ static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff cached_cons++; } + /* Release valid plus any invalid entries */ + xskq_cons_release_n(q, cached_cons - q->cached_cons); return nb_entries; } @@ -291,11 +298,6 @@ static inline void xskq_cons_release(struct xsk_queue *q) q->cached_cons++; } -static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) -{ - q->cached_cons += cnt; -} - static inline u32 xskq_cons_present_entries(struct xsk_queue *q) { /* No barriers needed since data is not accessed */ @@ -350,21 +352,17 @@ static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) return 0; } -static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs, - u32 max) +static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs, + u32 nb_entries) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; - u32 nb_entries, i, cached_prod; - - nb_entries = xskq_prod_nb_free(q, max); + u32 i, cached_prod; /* A, matches D */ cached_prod = q->cached_prod; for (i = 0; i < nb_entries; i++) ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; q->cached_prod = cached_prod; - - return nb_entries; } static inline int xskq_prod_reserve_desc(struct xsk_queue *q, diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index a3cf85d3f296..97074f6f2bde 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -25,7 +25,8 @@ #include "xfrm_inout.h" struct xfrm_trans_tasklet { - struct tasklet_struct tasklet; + struct work_struct work; + spinlock_t queue_lock; struct sk_buff_head queue; }; @@ -763,18 +764,22 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr) } EXPORT_SYMBOL(xfrm_input_resume); -static void xfrm_trans_reinject(struct tasklet_struct *t) +static void xfrm_trans_reinject(struct work_struct *work) { - struct xfrm_trans_tasklet *trans = from_tasklet(trans, t, tasklet); + struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work); struct sk_buff_head queue; struct sk_buff *skb; __skb_queue_head_init(&queue); + spin_lock_bh(&trans->queue_lock); skb_queue_splice_init(&trans->queue, &queue); + spin_unlock_bh(&trans->queue_lock); + local_bh_disable(); while ((skb = __skb_dequeue(&queue))) XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net, NULL, skb); + local_bh_enable(); } int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, @@ -792,8 +797,10 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, XFRM_TRANS_SKB_CB(skb)->finish = finish; XFRM_TRANS_SKB_CB(skb)->net = net; + spin_lock_bh(&trans->queue_lock); __skb_queue_tail(&trans->queue, skb); - tasklet_schedule(&trans->tasklet); + spin_unlock_bh(&trans->queue_lock); + schedule_work(&trans->work); return 0; } EXPORT_SYMBOL(xfrm_trans_queue_net); @@ -820,7 +827,8 @@ void __init xfrm_input_init(void) struct xfrm_trans_tasklet *trans; trans = &per_cpu(xfrm_trans_tasklet, i); + spin_lock_init(&trans->queue_lock); __skb_queue_head_init(&trans->queue); - tasklet_setup(&trans->tasklet, xfrm_trans_reinject); + INIT_WORK(&trans->work, xfrm_trans_reinject); } } diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 656045a87606..80143360bf09 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -203,6 +203,7 @@ static void ipcomp_free_scratches(void) vfree(*per_cpu_ptr(scratches, i)); free_percpu(scratches); + ipcomp_scratches = NULL; } static void * __percpu *ipcomp_alloc_scratches(void) diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index cced668fb2a3..b46a998d8f8d 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -136,8 +136,8 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type, jsonw_string_field(json_wtr, "attach_type", attach_type_str); else jsonw_uint_field(json_wtr, "attach_type", attach_type); - jsonw_string_field(json_wtr, "attach_flags", - attach_flags_str); + if (!(query_flags & BPF_F_QUERY_EFFECTIVE)) + jsonw_string_field(json_wtr, "attach_flags", attach_flags_str); jsonw_string_field(json_wtr, "name", prog_name); if (attach_btf_name) jsonw_string_field(json_wtr, "attach_btf_name", attach_btf_name); @@ -150,7 +150,10 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type, printf("%-15s", attach_type_str); else printf("type %-10u", attach_type); - printf(" %-15s %-15s", attach_flags_str, prog_name); + if (query_flags & BPF_F_QUERY_EFFECTIVE) + printf(" %-15s", prog_name); + else + printf(" %-15s %-15s", attach_flags_str, prog_name); if (attach_btf_name) printf(" %-15s", attach_btf_name); else if (info.attach_btf_id) @@ -195,6 +198,32 @@ static int cgroup_has_attached_progs(int cgroup_fd) return no_prog ? 0 : 1; } + +static int show_effective_bpf_progs(int cgroup_fd, enum bpf_attach_type type, + int level) +{ + LIBBPF_OPTS(bpf_prog_query_opts, p); + __u32 prog_ids[1024] = {0}; + __u32 iter; + int ret; + + p.query_flags = query_flags; + p.prog_cnt = ARRAY_SIZE(prog_ids); + p.prog_ids = prog_ids; + + ret = bpf_prog_query_opts(cgroup_fd, type, &p); + if (ret) + return ret; + + if (p.prog_cnt == 0) + return 0; + + for (iter = 0; iter < p.prog_cnt; iter++) + show_bpf_prog(prog_ids[iter], type, NULL, level); + + return 0; +} + static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type, int level) { @@ -245,6 +274,14 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type, return 0; } +static int show_bpf_progs(int cgroup_fd, enum bpf_attach_type type, + int level) +{ + return query_flags & BPF_F_QUERY_EFFECTIVE ? + show_effective_bpf_progs(cgroup_fd, type, level) : + show_attached_bpf_progs(cgroup_fd, type, level); +} + static int do_show(int argc, char **argv) { enum bpf_attach_type type; @@ -292,6 +329,8 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_start_array(json_wtr); + else if (query_flags & BPF_F_QUERY_EFFECTIVE) + printf("%-8s %-15s %-15s\n", "ID", "AttachType", "Name"); else printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType", "AttachFlags", "Name"); @@ -304,7 +343,7 @@ static int do_show(int argc, char **argv) * If we were able to get the show for at least one * attach type, let's return 0. */ - if (show_attached_bpf_progs(cgroup_fd, type, 0) == 0) + if (show_bpf_progs(cgroup_fd, type, 0) == 0) ret = 0; } @@ -362,7 +401,7 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb, btf_vmlinux = libbpf_find_kernel_btf(); for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) - show_attached_bpf_progs(cgroup_fd, type, ftw->level); + show_bpf_progs(cgroup_fd, type, ftw->level); if (errno == EINVAL) /* Last attach type does not support query. @@ -436,6 +475,11 @@ static int do_show_tree(int argc, char **argv) if (json_output) jsonw_start_array(json_wtr); + else if (query_flags & BPF_F_QUERY_EFFECTIVE) + printf("%s\n" + "%-8s %-15s %-15s\n", + "CgroupPath", + "ID", "AttachType", "Name"); else printf("%s\n" "%-8s %-15s %-15s %-15s\n", diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 3075018a4ef8..51b9aa640ad2 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1259,7 +1259,7 @@ enum { /* Query effective (directly attached + inherited from ancestor cgroups) * programs that will be executed for events within a cgroup. - * attach_flags with this flag are returned only for directly attached programs. + * attach_flags with this flag are always returned 0. */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) @@ -1458,7 +1458,10 @@ union bpf_attr { __u32 attach_flags; __aligned_u64 prog_ids; __u32 prog_cnt; - __aligned_u64 prog_attach_flags; /* output: per-program attach_flags */ + /* output: per-program attach_flags. + * not allowed to be set during effective query. + */ + __aligned_u64 prog_attach_flags; } query; struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c index 9e6e6aad347c..15093a69510e 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c @@ -71,10 +71,9 @@ void serial_test_cgroup_link(void) ping_and_check(cg_nr, 0); - /* query the number of effective progs and attach flags in root cg */ + /* query the number of attached progs and attach flags in root cg */ err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS, - BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL, - &prog_cnt); + 0, &attach_flags, NULL, &prog_cnt); CHECK_FAIL(err); CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI); if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt)) @@ -85,17 +84,15 @@ void serial_test_cgroup_link(void) BPF_F_QUERY_EFFECTIVE, NULL, NULL, &prog_cnt); CHECK_FAIL(err); - CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI); if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n", cg_nr, prog_cnt)) goto cleanup; /* query the effective prog IDs in last cg */ err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS, - BPF_F_QUERY_EFFECTIVE, &attach_flags, - prog_ids, &prog_cnt); + BPF_F_QUERY_EFFECTIVE, NULL, prog_ids, + &prog_cnt); CHECK_FAIL(err); - CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI); if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n", cg_nr, prog_cnt)) goto cleanup; diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index d1a5f3218c34..681a5db80dae 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -1842,6 +1842,8 @@ static struct ifobject *ifobject_create(void) if (!ifobj->umem) goto out_umem; + ifobj->ns_fd = -1; + return ifobj; out_umem: @@ -1853,6 +1855,8 @@ out_xsk_arr: static void ifobject_delete(struct ifobject *ifobj) { + if (ifobj->ns_fd != -1) + close(ifobj->ns_fd); free(ifobj->umem); free(ifobj->xsk_arr); free(ifobj); |