diff options
author | David Howells <dhowells@redhat.com> | 2022-10-21 15:31:21 +0100 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2022-12-01 13:36:39 +0000 |
commit | 9a36a6bc22ca1c0a9d82228171e05dc785fa1154 (patch) | |
tree | 68001931ec701301d3448b18d55662fe55d85b45 /net/rxrpc | |
parent | fa3492abb64b93b2b5d6fdf7a5b687a1fa810d74 (diff) |
rxrpc: trace: Don't use __builtin_return_address for sk_buff tracing
In rxrpc tracing, use enums to generate lists of points of interest rather
than __builtin_return_address() for the sk_buff tracepoint.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
Diffstat (limited to 'net/rxrpc')
-rw-r--r-- | net/rxrpc/call_event.c | 4 | ||||
-rw-r--r-- | net/rxrpc/call_object.c | 2 | ||||
-rw-r--r-- | net/rxrpc/conn_event.c | 6 | ||||
-rw-r--r-- | net/rxrpc/input.c | 36 | ||||
-rw-r--r-- | net/rxrpc/local_event.c | 4 | ||||
-rw-r--r-- | net/rxrpc/output.c | 6 | ||||
-rw-r--r-- | net/rxrpc/peer_event.c | 8 | ||||
-rw-r--r-- | net/rxrpc/recvmsg.c | 6 | ||||
-rw-r--r-- | net/rxrpc/skbuff.c | 36 |
9 files changed, 51 insertions, 57 deletions
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 0c8d2186cda8..29ca02e53c47 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -153,7 +153,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) spin_lock_bh(&call->acks_ack_lock); ack_skb = call->acks_soft_tbl; if (ack_skb) { - rxrpc_get_skb(ack_skb, rxrpc_skb_ack); + rxrpc_get_skb(ack_skb, rxrpc_skb_get_ack); ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header); } spin_unlock_bh(&call->acks_ack_lock); @@ -251,7 +251,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) no_further_resend: spin_unlock(&call->tx_lock); no_resend: - rxrpc_free_skb(ack_skb, rxrpc_skb_freed); + rxrpc_free_skb(ack_skb, rxrpc_skb_put_ack); resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index afd957f6dc1c..815209673115 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -663,7 +663,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned); } rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned); - rxrpc_free_skb(call->acks_soft_tbl, rxrpc_skb_cleaned); + rxrpc_free_skb(call->acks_soft_tbl, rxrpc_skb_put_ack); call_rcu(&call->rcu, rxrpc_rcu_destroy_call); } diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 817f895c77ca..49d885f73fa5 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -437,7 +437,7 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn) /* go through the conn-level event packets, releasing the ref on this * connection that each one has when we've finished with it */ while ((skb = skb_dequeue(&conn->rx_queue))) { - rxrpc_see_skb(skb, rxrpc_skb_seen); + rxrpc_see_skb(skb, rxrpc_skb_see_conn_work); ret = rxrpc_process_event(conn, skb, &abort_code); switch (ret) { case -EPROTO: @@ -449,7 +449,7 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn) goto requeue_and_leave; case -ECONNABORTED: default: - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_conn_work); break; } } @@ -463,7 +463,7 @@ requeue_and_leave: protocol_error: if (rxrpc_abort_connection(conn, ret, abort_code) < 0) goto requeue_and_leave; - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_conn_work); return; } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 09b44cd11c9b..ab8b7a1be935 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -485,7 +485,7 @@ send_ack: rxrpc_propose_ack_input_data); err_free: - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); } /* @@ -513,7 +513,7 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb kdebug("couldn't clone"); return false; } - rxrpc_new_skb(jskb, rxrpc_skb_cloned_jumbo); + rxrpc_new_skb(jskb, rxrpc_skb_new_jumbo_subpacket); jsp = rxrpc_skb(jskb); jsp->offset = offset; jsp->len = RXRPC_JUMBO_DATALEN; @@ -553,7 +553,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) state = READ_ONCE(call->state); if (state >= RXRPC_CALL_COMPLETE) { - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); return; } @@ -563,14 +563,14 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) if (sp->hdr.securityIndex != 0) { struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); if (!nskb) { - rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); + rxrpc_eaten_skb(skb, rxrpc_skb_eaten_by_unshare_nomem); return; } if (nskb != skb) { - rxrpc_eaten_skb(skb, rxrpc_skb_received); + rxrpc_eaten_skb(skb, rxrpc_skb_eaten_by_unshare); skb = nskb; - rxrpc_new_skb(skb, rxrpc_skb_unshared); + rxrpc_new_skb(skb, rxrpc_skb_new_unshared); sp = rxrpc_skb(skb); } } @@ -609,7 +609,7 @@ out: rxrpc_notify_socket(call); spin_unlock(&call->input_lock); - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); _leave(" [queued]"); } @@ -994,8 +994,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) out: spin_unlock(&call->input_lock); out_not_locked: - rxrpc_free_skb(skb_put, rxrpc_skb_freed); - rxrpc_free_skb(skb_old, rxrpc_skb_freed); + rxrpc_free_skb(skb_put, rxrpc_skb_put_input); + rxrpc_free_skb(skb_old, rxrpc_skb_put_ack); } /* @@ -1075,7 +1075,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, break; } - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); no_free: _leave(""); } @@ -1137,7 +1137,7 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, skb_queue_tail(&local->event_queue, skb); rxrpc_queue_local(local); } else { - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); } } @@ -1150,7 +1150,7 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) skb_queue_tail(&local->reject_queue, skb); rxrpc_queue_local(local); } else { - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); } } @@ -1228,7 +1228,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) if (skb->tstamp == 0) skb->tstamp = ktime_get_real(); - rxrpc_new_skb(skb, rxrpc_skb_received); + rxrpc_new_skb(skb, rxrpc_skb_new_encap_rcv); skb_pull(skb, sizeof(struct udphdr)); @@ -1245,7 +1245,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) static int lose; if ((lose++ & 7) == 7) { trace_rxrpc_rx_lose(sp); - rxrpc_free_skb(skb, rxrpc_skb_lost); + rxrpc_free_skb(skb, rxrpc_skb_put_lose); return 0; } } @@ -1286,14 +1286,14 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) if (sp->hdr.securityIndex != 0) { struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); if (!nskb) { - rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); + rxrpc_eaten_skb(skb, rxrpc_skb_eaten_by_unshare_nomem); goto out; } if (nskb != skb) { - rxrpc_eaten_skb(skb, rxrpc_skb_received); + rxrpc_eaten_skb(skb, rxrpc_skb_eaten_by_unshare); skb = nskb; - rxrpc_new_skb(skb, rxrpc_skb_unshared); + rxrpc_new_skb(skb, rxrpc_skb_new_unshared); sp = rxrpc_skb(skb); } } @@ -1434,7 +1434,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) goto out; discard: - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); out: trace_rxrpc_rx_done(0, 0); return 0; diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index f23a3fbabbda..c344383a20b2 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c @@ -88,7 +88,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) if (skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - rxrpc_see_skb(skb, rxrpc_skb_seen); + rxrpc_see_skb(skb, rxrpc_skb_see_local_work); _debug("{%d},{%u}", local->debug_id, sp->hdr.type); switch (sp->hdr.type) { @@ -105,7 +105,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) break; } - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); } _leave(""); diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index d324e88f7642..131c7a76fb06 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -615,7 +615,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) memset(&whdr, 0, sizeof(whdr)); while ((skb = skb_dequeue(&local->reject_queue))) { - rxrpc_see_skb(skb, rxrpc_skb_seen); + rxrpc_see_skb(skb, rxrpc_skb_see_reject); sp = rxrpc_skb(skb); switch (skb->mark) { @@ -631,7 +631,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) ioc = 2; break; default: - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); continue; } @@ -656,7 +656,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) rxrpc_tx_point_reject); } - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_input); } _leave(""); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index b28739d10927..f35cfc458dcf 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -158,12 +158,12 @@ void rxrpc_error_report(struct sock *sk) _leave("UDP socket errqueue empty"); return; } - rxrpc_new_skb(skb, rxrpc_skb_received); + rxrpc_new_skb(skb, rxrpc_skb_new_error_report); serr = SKB_EXT_ERR(skb); if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { _leave("UDP empty message"); rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_error_report); return; } @@ -172,7 +172,7 @@ void rxrpc_error_report(struct sock *sk) peer = NULL; if (!peer) { rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_error_report); _leave(" [no peer]"); return; } @@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk) rxrpc_store_error(peer, serr); out: rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_error_report); rxrpc_put_peer(peer, rxrpc_peer_put_input_error); _leave(""); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index c84d2b620396..bfac9e09347e 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -229,7 +229,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) _enter("%d", call->debug_id); skb = skb_dequeue(&call->recvmsg_queue); - rxrpc_see_skb(skb, rxrpc_skb_rotated); + rxrpc_see_skb(skb, rxrpc_skb_see_rotate); sp = rxrpc_skb(skb); tseq = sp->hdr.seq; @@ -240,7 +240,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) if (after(tseq, call->rx_consumed)) smp_store_release(&call->rx_consumed, tseq); - rxrpc_free_skb(skb, rxrpc_skb_freed); + rxrpc_free_skb(skb, rxrpc_skb_put_rotate); trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate, serial, call->rx_consumed); @@ -302,7 +302,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, */ skb = skb_peek(&call->recvmsg_queue); while (skb) { - rxrpc_see_skb(skb, rxrpc_skb_seen); + rxrpc_see_skb(skb, rxrpc_skb_see_recvmsg); sp = rxrpc_skb(skb); seq = sp->hdr.seq; diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 0c827d5bb2b8..ebe0c75e7b07 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* ar-skbuff.c: socket buffer destruction handling +/* Socket buffer accounting * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -19,56 +19,50 @@ /* * Note the allocation or reception of a socket buffer. */ -void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) +void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { - const void *here = __builtin_return_address(0); int n = atomic_inc_return(select_skb_count(skb)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why); } /* * Note the re-emergence of a socket buffer from a queue or buffer. */ -void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) +void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { - const void *here = __builtin_return_address(0); if (skb) { int n = atomic_read(select_skb_count(skb)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why); } } /* * Note the addition of a ref on a socket buffer. */ -void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) +void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { - const void *here = __builtin_return_address(0); int n = atomic_inc_return(select_skb_count(skb)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why); skb_get(skb); } /* * Note the dropping of a ref on a socket buffer by the core. */ -void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) +void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { - const void *here = __builtin_return_address(0); int n = atomic_inc_return(&rxrpc_n_rx_skbs); - trace_rxrpc_skb(skb, op, 0, n, here); + trace_rxrpc_skb(skb, 0, n, why); } /* * Note the destruction of a socket buffer. */ -void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) +void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { - const void *here = __builtin_return_address(0); if (skb) { - int n; - n = atomic_dec_return(select_skb_count(skb)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + int n = atomic_dec_return(select_skb_count(skb)); + trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why); kfree_skb(skb); } } @@ -78,12 +72,12 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) */ void rxrpc_purge_queue(struct sk_buff_head *list) { - const void *here = __builtin_return_address(0); struct sk_buff *skb; + while ((skb = skb_dequeue((list))) != NULL) { int n = atomic_dec_return(select_skb_count(skb)); - trace_rxrpc_skb(skb, rxrpc_skb_purged, - refcount_read(&skb->users), n, here); + trace_rxrpc_skb(skb, refcount_read(&skb->users), n, + rxrpc_skb_put_purge); kfree_skb(skb); } } |