diff options
author | Kuniyuki Iwashima <kuniyu@amazon.com> | 2024-09-05 12:32:39 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2024-09-09 17:14:26 -0700 |
commit | a0264a9f51fe0d196f22efd7538eb749e3448c2d (patch) | |
tree | 7995268cd33323d98330f6b4ca81662234468cd9 /net/unix | |
parent | beb2c5f19b6ab033b187e770a659c730c3bd05ca (diff) |
af_unix: Move spin_lock() in manage_oob().
When OOB skb has been already consumed, manage_oob() returns the next
skb if exists. In such a case, we need to fall back to the else branch
below.
Then, we want to keep holding spin_lock(&sk->sk_receive_queue.lock).
Let's move it out of if-else branch and add lightweight check before
spin_lock() for major use cases without OOB skb.
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20240905193240.17565-4-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/unix')
-rw-r--r-- | net/unix/af_unix.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 91d7877a1079..159d78fc3d14 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2657,9 +2657,12 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, struct sk_buff *read_skb = NULL, *unread_skb = NULL; struct unix_sock *u = unix_sk(sk); - if (!unix_skb_len(skb)) { - spin_lock(&sk->sk_receive_queue.lock); + if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb))) + return skb; + spin_lock(&sk->sk_receive_queue.lock); + + if (!unix_skb_len(skb)) { if (copied && (!u->oob_skb || skb == u->oob_skb)) { skb = NULL; } else if (flags & MSG_PEEK) { @@ -2670,14 +2673,9 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, __skb_unlink(read_skb, &sk->sk_receive_queue); } - spin_unlock(&sk->sk_receive_queue.lock); - - consume_skb(read_skb); - return skb; + goto unlock; } - spin_lock(&sk->sk_receive_queue.lock); - if (skb != u->oob_skb) goto unlock; @@ -2698,6 +2696,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, unlock: spin_unlock(&sk->sk_receive_queue.lock); + consume_skb(read_skb); kfree_skb(unread_skb); return skb; |