diff options
author | Björn Töpel <bjorn.topel@intel.com> | 2018-06-04 13:57:11 +0200 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2018-06-04 17:21:02 +0200 |
commit | 4e64c835254095f55044d393e628dd3e92fca304 (patch) | |
tree | 51db5592136aab51ca7451006085044715affbe9 /net/xdp/xsk_queue.h | |
parent | bd3a08aaa9a383ffbbd5b788b797ae6e64eaa7a1 (diff) |
xsk: proper fill queue descriptor validation
Previously the fill queue descriptor was not copied to kernel space
prior validating it, making it possible for userland to change the
descriptor post-kernel-validation.
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'net/xdp/xsk_queue.h')
-rw-r--r-- | net/xdp/xsk_queue.h | 32 |
1 files changed, 9 insertions, 23 deletions
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index cb8e5be35110..b5924e7aeb2b 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -85,14 +85,15 @@ static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx) return true; } -static inline u32 *xskq_validate_id(struct xsk_queue *q) +static inline u32 *xskq_validate_id(struct xsk_queue *q, u32 *id) { while (q->cons_tail != q->cons_head) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; unsigned int idx = q->cons_tail & q->ring_mask; - if (xskq_is_valid_id(q, ring->desc[idx])) - return &ring->desc[idx]; + *id = READ_ONCE(ring->desc[idx]); + if (xskq_is_valid_id(q, *id)) + return id; q->cons_tail++; } @@ -100,28 +101,22 @@ static inline u32 *xskq_validate_id(struct xsk_queue *q) return NULL; } -static inline u32 *xskq_peek_id(struct xsk_queue *q) +static inline u32 *xskq_peek_id(struct xsk_queue *q, u32 *id) { - struct xdp_umem_ring *ring; - if (q->cons_tail == q->cons_head) { WRITE_ONCE(q->ring->consumer, q->cons_tail); q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); /* Order consumer and data */ smp_rmb(); - - return xskq_validate_id(q); } - ring = (struct xdp_umem_ring *)q->ring; - return &ring->desc[q->cons_tail & q->ring_mask]; + return xskq_validate_id(q, id); } static inline void xskq_discard_id(struct xsk_queue *q) { q->cons_tail++; - (void)xskq_validate_id(q); } static inline int xskq_produce_id(struct xsk_queue *q, u32 id) @@ -174,11 +169,9 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; unsigned int idx = q->cons_tail & q->ring_mask; - if (xskq_is_valid_desc(q, &ring->desc[idx])) { - if (desc) - *desc = ring->desc[idx]; + *desc = READ_ONCE(ring->desc[idx]); + if (xskq_is_valid_desc(q, desc)) return desc; - } q->cons_tail++; } @@ -189,27 +182,20 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, struct xdp_desc *desc) { - struct xdp_rxtx_ring *ring; - if (q->cons_tail == q->cons_head) { WRITE_ONCE(q->ring->consumer, q->cons_tail); q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); /* Order consumer and data */ smp_rmb(); - - return xskq_validate_desc(q, desc); } - ring = (struct xdp_rxtx_ring *)q->ring; - *desc = ring->desc[q->cons_tail & q->ring_mask]; - return desc; + return xskq_validate_desc(q, desc); } static inline void xskq_discard_desc(struct xsk_queue *q) { q->cons_tail++; - (void)xskq_validate_desc(q, NULL); } static inline int xskq_produce_batch_desc(struct xsk_queue *q, |