summaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/afs.h184
-rw-r--r--include/trace/events/bpf.h347
-rw-r--r--include/trace/events/btrfs.h2
-rw-r--r--include/trace/events/cgroup.h20
-rw-r--r--include/trace/events/compaction.h60
-rw-r--r--include/trace/events/f2fs.h151
-rw-r--r--include/trace/events/fs_dax.h156
-rw-r--r--include/trace/events/mmflags.h98
-rw-r--r--include/trace/events/oom.h81
-rw-r--r--include/trace/events/rxrpc.h520
-rw-r--r--include/trace/events/sched.h2
-rw-r--r--include/trace/events/syscalls.h1
-rw-r--r--include/trace/events/timer.h14
-rw-r--r--include/trace/events/vmscan.h150
-rw-r--r--include/trace/events/writeback.h2
-rw-r--r--include/trace/events/xdp.h53
-rw-r--r--include/trace/trace_events.h19
17 files changed, 1663 insertions, 197 deletions
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
new file mode 100644
index 000000000000..8b95c16b7045
--- /dev/null
+++ b/include/trace/events/afs.h
@@ -0,0 +1,184 @@
+/* AFS tracepoints
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM afs
+
+#if !defined(_TRACE_AFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_AFS_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Define enums for tracing information.
+ */
+#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum afs_call_trace {
+ afs_call_trace_alloc,
+ afs_call_trace_free,
+ afs_call_trace_put,
+ afs_call_trace_wake,
+ afs_call_trace_work,
+};
+
+#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
+
+/*
+ * Declare tracing information enums and their string mappings for display.
+ */
+#define afs_call_traces \
+ EM(afs_call_trace_alloc, "ALLOC") \
+ EM(afs_call_trace_free, "FREE ") \
+ EM(afs_call_trace_put, "PUT ") \
+ EM(afs_call_trace_wake, "WAKE ") \
+ E_(afs_call_trace_work, "WORK ")
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+afs_call_traces;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+TRACE_EVENT(afs_recv_data,
+ TP_PROTO(struct afs_call *call, unsigned count, unsigned offset,
+ bool want_more, int ret),
+
+ TP_ARGS(call, count, offset, want_more, ret),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, rxcall )
+ __field(struct afs_call *, call )
+ __field(enum afs_call_state, state )
+ __field(unsigned int, count )
+ __field(unsigned int, offset )
+ __field(unsigned short, unmarshall )
+ __field(bool, want_more )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->rxcall = call->rxcall;
+ __entry->call = call;
+ __entry->state = call->state;
+ __entry->unmarshall = call->unmarshall;
+ __entry->count = count;
+ __entry->offset = offset;
+ __entry->want_more = want_more;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("c=%p ac=%p s=%u u=%u %u/%u wm=%u ret=%d",
+ __entry->rxcall,
+ __entry->call,
+ __entry->state, __entry->unmarshall,
+ __entry->offset, __entry->count,
+ __entry->want_more, __entry->ret)
+ );
+
+TRACE_EVENT(afs_notify_call,
+ TP_PROTO(struct rxrpc_call *rxcall, struct afs_call *call),
+
+ TP_ARGS(rxcall, call),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, rxcall )
+ __field(struct afs_call *, call )
+ __field(enum afs_call_state, state )
+ __field(unsigned short, unmarshall )
+ ),
+
+ TP_fast_assign(
+ __entry->rxcall = rxcall;
+ __entry->call = call;
+ __entry->state = call->state;
+ __entry->unmarshall = call->unmarshall;
+ ),
+
+ TP_printk("c=%p ac=%p s=%u u=%u",
+ __entry->rxcall,
+ __entry->call,
+ __entry->state, __entry->unmarshall)
+ );
+
+TRACE_EVENT(afs_cb_call,
+ TP_PROTO(struct afs_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, rxcall )
+ __field(struct afs_call *, call )
+ __field(const char *, name )
+ __field(u32, op )
+ ),
+
+ TP_fast_assign(
+ __entry->rxcall = call->rxcall;
+ __entry->call = call;
+ __entry->name = call->type->name;
+ __entry->op = call->operation_ID;
+ ),
+
+ TP_printk("c=%p ac=%p %s o=%u",
+ __entry->rxcall,
+ __entry->call,
+ __entry->name,
+ __entry->op)
+ );
+
+TRACE_EVENT(afs_call,
+ TP_PROTO(struct afs_call *call, enum afs_call_trace op,
+ int usage, int outstanding, const void *where),
+
+ TP_ARGS(call, op, usage, outstanding, where),
+
+ TP_STRUCT__entry(
+ __field(struct afs_call *, call )
+ __field(int, op )
+ __field(int, usage )
+ __field(int, outstanding )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->outstanding = outstanding;
+ __entry->where = where;
+ ),
+
+ TP_printk("c=%p %s u=%d o=%d sp=%pSR",
+ __entry->call,
+ __print_symbolic(__entry->op, afs_call_traces),
+ __entry->usage,
+ __entry->outstanding,
+ __entry->where)
+ );
+
+#endif /* _TRACE_AFS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h
new file mode 100644
index 000000000000..c3a53fd47ff1
--- /dev/null
+++ b/include/trace/events/bpf.h
@@ -0,0 +1,347 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bpf
+
+#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BPF_H
+
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/fs.h>
+#include <linux/tracepoint.h>
+
+#define __PROG_TYPE_MAP(FN) \
+ FN(SOCKET_FILTER) \
+ FN(KPROBE) \
+ FN(SCHED_CLS) \
+ FN(SCHED_ACT) \
+ FN(TRACEPOINT) \
+ FN(XDP) \
+ FN(PERF_EVENT) \
+ FN(CGROUP_SKB) \
+ FN(CGROUP_SOCK) \
+ FN(LWT_IN) \
+ FN(LWT_OUT) \
+ FN(LWT_XMIT)
+
+#define __MAP_TYPE_MAP(FN) \
+ FN(HASH) \
+ FN(ARRAY) \
+ FN(PROG_ARRAY) \
+ FN(PERF_EVENT_ARRAY) \
+ FN(PERCPU_HASH) \
+ FN(PERCPU_ARRAY) \
+ FN(STACK_TRACE) \
+ FN(CGROUP_ARRAY) \
+ FN(LRU_HASH) \
+ FN(LRU_PERCPU_HASH) \
+ FN(LPM_TRIE)
+
+#define __PROG_TYPE_TP_FN(x) \
+ TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x);
+#define __PROG_TYPE_SYM_FN(x) \
+ { BPF_PROG_TYPE_##x, #x },
+#define __PROG_TYPE_SYM_TAB \
+ __PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 }
+__PROG_TYPE_MAP(__PROG_TYPE_TP_FN)
+
+#define __MAP_TYPE_TP_FN(x) \
+ TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x);
+#define __MAP_TYPE_SYM_FN(x) \
+ { BPF_MAP_TYPE_##x, #x },
+#define __MAP_TYPE_SYM_TAB \
+ __MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 }
+__MAP_TYPE_MAP(__MAP_TYPE_TP_FN)
+
+DECLARE_EVENT_CLASS(bpf_prog_event,
+
+ TP_PROTO(const struct bpf_prog *prg),
+
+ TP_ARGS(prg),
+
+ TP_STRUCT__entry(
+ __array(u8, prog_tag, 8)
+ __field(u32, type)
+ ),
+
+ TP_fast_assign(
+ BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
+ memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
+ __entry->type = prg->type;
+ ),
+
+ TP_printk("prog=%s type=%s",
+ __print_hex_str(__entry->prog_tag, 8),
+ __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB))
+);
+
+DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type,
+
+ TP_PROTO(const struct bpf_prog *prg),
+
+ TP_ARGS(prg)
+);
+
+DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu,
+
+ TP_PROTO(const struct bpf_prog *prg),
+
+ TP_ARGS(prg)
+);
+
+TRACE_EVENT(bpf_prog_load,
+
+ TP_PROTO(const struct bpf_prog *prg, int ufd),
+
+ TP_ARGS(prg, ufd),
+
+ TP_STRUCT__entry(
+ __array(u8, prog_tag, 8)
+ __field(u32, type)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
+ memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
+ __entry->type = prg->type;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("prog=%s type=%s ufd=%d",
+ __print_hex_str(__entry->prog_tag, 8),
+ __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB),
+ __entry->ufd)
+);
+
+TRACE_EVENT(bpf_map_create,
+
+ TP_PROTO(const struct bpf_map *map, int ufd),
+
+ TP_ARGS(map, ufd),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, size_key)
+ __field(u32, size_value)
+ __field(u32, max_entries)
+ __field(u32, flags)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ __entry->type = map->map_type;
+ __entry->size_key = map->key_size;
+ __entry->size_value = map->value_size;
+ __entry->max_entries = map->max_entries;
+ __entry->flags = map->map_flags;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd, __entry->size_key, __entry->size_value,
+ __entry->max_entries, __entry->flags)
+);
+
+DECLARE_EVENT_CLASS(bpf_obj_prog,
+
+ TP_PROTO(const struct bpf_prog *prg, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(prg, ufd, pname),
+
+ TP_STRUCT__entry(
+ __array(u8, prog_tag, 8)
+ __field(int, ufd)
+ __string(path, pname->name)
+ ),
+
+ TP_fast_assign(
+ BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
+ memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
+ __assign_str(path, pname->name);
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("prog=%s path=%s ufd=%d",
+ __print_hex_str(__entry->prog_tag, 8),
+ __get_str(path), __entry->ufd)
+);
+
+DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog,
+
+ TP_PROTO(const struct bpf_prog *prg, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(prg, ufd, pname)
+);
+
+DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog,
+
+ TP_PROTO(const struct bpf_prog *prg, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(prg, ufd, pname)
+);
+
+DECLARE_EVENT_CLASS(bpf_obj_map,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(map, ufd, pname),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(int, ufd)
+ __string(path, pname->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(path, pname->name);
+ __entry->type = map->map_type;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d path=%s",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd, __get_str(path))
+);
+
+DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(map, ufd, pname)
+);
+
+DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(map, ufd, pname)
+);
+
+DECLARE_EVENT_CLASS(bpf_map_keyval,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key, const void *val),
+
+ TP_ARGS(map, ufd, key, val),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, key_len)
+ __dynamic_array(u8, key, map->key_size)
+ __field(bool, key_trunc)
+ __field(u32, val_len)
+ __dynamic_array(u8, val, map->value_size)
+ __field(bool, val_trunc)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ memcpy(__get_dynamic_array(key), key, map->key_size);
+ memcpy(__get_dynamic_array(val), val, map->value_size);
+ __entry->type = map->map_type;
+ __entry->key_len = min(map->key_size, 16U);
+ __entry->key_trunc = map->key_size != __entry->key_len;
+ __entry->val_len = min(map->value_size, 16U);
+ __entry->val_trunc = map->value_size != __entry->val_len;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd,
+ __print_hex(__get_dynamic_array(key), __entry->key_len),
+ __entry->key_trunc ? " ..." : "",
+ __print_hex(__get_dynamic_array(val), __entry->val_len),
+ __entry->val_trunc ? " ..." : "")
+);
+
+DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key, const void *val),
+
+ TP_ARGS(map, ufd, key, val)
+);
+
+DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key, const void *val),
+
+ TP_ARGS(map, ufd, key, val)
+);
+
+TRACE_EVENT(bpf_map_delete_elem,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key),
+
+ TP_ARGS(map, ufd, key),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, key_len)
+ __dynamic_array(u8, key, map->key_size)
+ __field(bool, key_trunc)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ memcpy(__get_dynamic_array(key), key, map->key_size);
+ __entry->type = map->map_type;
+ __entry->key_len = min(map->key_size, 16U);
+ __entry->key_trunc = map->key_size != __entry->key_len;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d key=[%s%s]",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd,
+ __print_hex(__get_dynamic_array(key), __entry->key_len),
+ __entry->key_trunc ? " ..." : "")
+);
+
+TRACE_EVENT(bpf_map_next_key,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key, const void *key_next),
+
+ TP_ARGS(map, ufd, key, key_next),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, key_len)
+ __dynamic_array(u8, key, map->key_size)
+ __dynamic_array(u8, nxt, map->key_size)
+ __field(bool, key_trunc)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ memcpy(__get_dynamic_array(key), key, map->key_size);
+ memcpy(__get_dynamic_array(nxt), key_next, map->key_size);
+ __entry->type = map->map_type;
+ __entry->key_len = min(map->key_size, 16U);
+ __entry->key_trunc = map->key_size != __entry->key_len;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd,
+ __print_hex(__get_dynamic_array(key), __entry->key_len),
+ __entry->key_trunc ? " ..." : "",
+ __print_hex(__get_dynamic_array(nxt), __entry->key_len),
+ __entry->key_trunc ? " ..." : "")
+);
+
+#endif /* _TRACE_BPF_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 88d18a8ceb59..a3c3cab643a9 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -184,7 +184,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
TRACE_EVENT_CONDITION(btrfs_get_extent,
- TP_PROTO(struct btrfs_root *root, struct inode *inode,
+ TP_PROTO(struct btrfs_root *root, struct btrfs_inode *inode,
struct extent_map *map),
TP_ARGS(root, inode, map),
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index ab68640a18d0..c226f50e88fa 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -61,19 +61,15 @@ DECLARE_EVENT_CLASS(cgroup,
__field( int, id )
__field( int, level )
__dynamic_array(char, path,
- cgrp->kn ? cgroup_path(cgrp, NULL, 0) + 1
- : strlen("(null)"))
+ cgroup_path(cgrp, NULL, 0) + 1)
),
TP_fast_assign(
__entry->root = cgrp->root->hierarchy_id;
__entry->id = cgrp->id;
__entry->level = cgrp->level;
- if (cgrp->kn)
- cgroup_path(cgrp, __get_dynamic_array(path),
- __get_dynamic_array_len(path));
- else
- __assign_str(path, "(null)");
+ cgroup_path(cgrp, __get_dynamic_array(path),
+ __get_dynamic_array_len(path));
),
TP_printk("root=%d id=%d level=%d path=%s",
@@ -119,8 +115,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
__field( int, dst_id )
__field( int, dst_level )
__dynamic_array(char, dst_path,
- dst_cgrp->kn ? cgroup_path(dst_cgrp, NULL, 0) + 1
- : strlen("(null)"))
+ cgroup_path(dst_cgrp, NULL, 0) + 1)
__field( int, pid )
__string( comm, task->comm )
),
@@ -129,11 +124,8 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
__entry->dst_root = dst_cgrp->root->hierarchy_id;
__entry->dst_id = dst_cgrp->id;
__entry->dst_level = dst_cgrp->level;
- if (dst_cgrp->kn)
- cgroup_path(dst_cgrp, __get_dynamic_array(dst_path),
- __get_dynamic_array_len(dst_path));
- else
- __assign_str(dst_path, "(null)");
+ cgroup_path(dst_cgrp, __get_dynamic_array(dst_path),
+ __get_dynamic_array_len(dst_path));
__entry->pid = task->pid;
__assign_str(comm, task->comm);
),
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index cbdb90b6b308..0a18ab6483ff 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -9,62 +9,6 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
-#define COMPACTION_STATUS \
- EM( COMPACT_SKIPPED, "skipped") \
- EM( COMPACT_DEFERRED, "deferred") \
- EM( COMPACT_CONTINUE, "continue") \
- EM( COMPACT_SUCCESS, "success") \
- EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
- EM( COMPACT_COMPLETE, "complete") \
- EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
- EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
- EMe(COMPACT_CONTENDED, "contended")
-
-#ifdef CONFIG_ZONE_DMA
-#define IFDEF_ZONE_DMA(X) X
-#else
-#define IFDEF_ZONE_DMA(X)
-#endif
-
-#ifdef CONFIG_ZONE_DMA32
-#define IFDEF_ZONE_DMA32(X) X
-#else
-#define IFDEF_ZONE_DMA32(X)
-#endif
-
-#ifdef CONFIG_HIGHMEM
-#define IFDEF_ZONE_HIGHMEM(X) X
-#else
-#define IFDEF_ZONE_HIGHMEM(X)
-#endif
-
-#define ZONE_TYPE \
- IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \
- IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \
- EM (ZONE_NORMAL, "Normal") \
- IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \
- EMe(ZONE_MOVABLE,"Movable")
-
-/*
- * First define the enums in the above macros to be exported to userspace
- * via TRACE_DEFINE_ENUM().
- */
-#undef EM
-#undef EMe
-#define EM(a, b) TRACE_DEFINE_ENUM(a);
-#define EMe(a, b) TRACE_DEFINE_ENUM(a);
-
-COMPACTION_STATUS
-ZONE_TYPE
-
-/*
- * Now redefine the EM() and EMe() macros to map the enums to the strings
- * that will be printed in the output.
- */
-#undef EM
-#undef EMe
-#define EM(a, b) {a, b},
-#define EMe(a, b) {a, b}
DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
@@ -187,6 +131,7 @@ TRACE_EVENT(mm_compaction_begin,
__entry->sync ? "sync" : "async")
);
+#ifdef CONFIG_COMPACTION
TRACE_EVENT(mm_compaction_end,
TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
unsigned long free_pfn, unsigned long zone_end, bool sync,
@@ -220,6 +165,7 @@ TRACE_EVENT(mm_compaction_end,
__entry->sync ? "sync" : "async",
__print_symbolic(__entry->status, COMPACTION_STATUS))
);
+#endif
TRACE_EVENT(mm_compaction_try_to_compact_pages,
@@ -248,6 +194,7 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages,
__entry->prio)
);
+#ifdef CONFIG_COMPACTION
DECLARE_EVENT_CLASS(mm_compaction_suitable_template,
TP_PROTO(struct zone *zone,
@@ -295,7 +242,6 @@ DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable,
TP_ARGS(zone, order, ret)
);
-#ifdef CONFIG_COMPACTION
DECLARE_EVENT_CLASS(mm_compaction_defer_template,
TP_PROTO(struct zone *zone, int order),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 01b3c9869a0d..c80fcad0a6c9 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -6,8 +6,8 @@
#include <linux/tracepoint.h>
-#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev)
-#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino
+#define show_dev(dev) MAJOR(dev), MINOR(dev)
+#define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino
TRACE_DEFINE_ENUM(NODE);
TRACE_DEFINE_ENUM(DATA);
@@ -55,25 +55,35 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
-#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA))
-#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
-
-#define show_bio_type(op_flags) show_bio_op_flags(op_flags), \
- show_bio_extra(op_flags)
+#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_PREFLUSH | REQ_META |\
+ REQ_PRIO)
+#define F2FS_BIO_FLAG_MASK(t) (t & F2FS_OP_FLAGS)
+
+#define show_bio_type(op,op_flags) show_bio_op(op), \
+ show_bio_op_flags(op_flags)
+
+#define show_bio_op(op) \
+ __print_symbolic(op, \
+ { REQ_OP_READ, "READ" }, \
+ { REQ_OP_WRITE, "WRITE" }, \
+ { REQ_OP_FLUSH, "FLUSH" }, \
+ { REQ_OP_DISCARD, "DISCARD" }, \
+ { REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \
+ { REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \
+ { REQ_OP_ZONE_RESET, "ZONE_RESET" }, \
+ { REQ_OP_WRITE_SAME, "WRITE_SAME" }, \
+ { REQ_OP_WRITE_ZEROES, "WRITE_ZEROES" })
#define show_bio_op_flags(flags) \
__print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
- { 0, "WRITE" }, \
- { REQ_RAHEAD, "READAHEAD" }, \
- { REQ_SYNC, "REQ_SYNC" }, \
- { REQ_PREFLUSH, "REQ_PREFLUSH" }, \
- { REQ_FUA, "REQ_FUA" })
-
-#define show_bio_extra(type) \
- __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \
+ { REQ_RAHEAD, "(RA)" }, \
+ { REQ_SYNC, "(S)" }, \
+ { REQ_SYNC | REQ_PRIO, "(SP)" }, \
{ REQ_META, "(M)" }, \
- { REQ_PRIO, "(P)" }, \
{ REQ_META | REQ_PRIO, "(MP)" }, \
+ { REQ_SYNC | REQ_PREFLUSH , "(SF)" }, \
+ { REQ_SYNC | REQ_META | REQ_PRIO, "(SMP)" }, \
+ { REQ_PREFLUSH | REQ_META | REQ_PRIO, "(FMP)" }, \
{ 0, " \b" })
#define show_data_type(type) \
@@ -235,7 +245,7 @@ TRACE_EVENT(f2fs_sync_fs,
),
TP_printk("dev = (%d,%d), superblock is %s, wait = %d",
- show_dev(__entry),
+ show_dev(__entry->dev),
__entry->dirty ? "dirty" : "not dirty",
__entry->wait)
);
@@ -305,6 +315,13 @@ DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit,
TP_ARGS(inode, ret)
);
+DEFINE_EVENT(f2fs__inode_exit, f2fs_drop_inode,
+
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret)
+);
+
DEFINE_EVENT(f2fs__inode, f2fs_truncate,
TP_PROTO(struct inode *inode),
@@ -534,7 +551,7 @@ TRACE_EVENT(f2fs_background_gc,
),
TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u",
- show_dev(__entry),
+ show_dev(__entry->dev),
__entry->wait_ms,
__entry->prefree,
__entry->free)
@@ -555,6 +572,7 @@ TRACE_EVENT(f2fs_get_victim,
__field(int, alloc_mode)
__field(int, gc_mode)
__field(unsigned int, victim)
+ __field(unsigned int, cost)
__field(unsigned int, ofs_unit)
__field(unsigned int, pre_victim)
__field(unsigned int, prefree)
@@ -568,20 +586,23 @@ TRACE_EVENT(f2fs_get_victim,
__entry->alloc_mode = p->alloc_mode;
__entry->gc_mode = p->gc_mode;
__entry->victim = p->min_segno;
+ __entry->cost = p->min_cost;
__entry->ofs_unit = p->ofs_unit;
__entry->pre_victim = pre_victim;
__entry->prefree = prefree;
__entry->free = free;
),
- TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u "
- "ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u",
- show_dev(__entry),
+ TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), "
+ "victim = %u, cost = %u, ofs_unit = %u, "
+ "pre_victim_secno = %d, prefree = %u, free = %u",
+ show_dev(__entry->dev),
show_data_type(__entry->type),
show_gc_type(__entry->gc_type),
show_alloc_mode(__entry->alloc_mode),
show_victim_policy(__entry->gc_mode),
__entry->victim,
+ __entry->cost,
__entry->ofs_unit,
(int)__entry->pre_victim,
__entry->prefree,
@@ -713,7 +734,7 @@ TRACE_EVENT(f2fs_reserve_new_blocks,
),
TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
- show_dev(__entry),
+ show_dev(__entry->dev),
(unsigned int)__entry->nid,
__entry->ofs_in_node,
(unsigned long long)__entry->count)
@@ -753,7 +774,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
(unsigned long)__entry->index,
(unsigned long long)__entry->old_blkaddr,
(unsigned long long)__entry->new_blkaddr,
- show_bio_type(__entry->op_flags),
+ show_bio_type(__entry->op, __entry->op_flags),
show_block_type(__entry->type))
);
@@ -775,15 +796,15 @@ DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio,
TP_CONDITION(page->mapping)
);
-DECLARE_EVENT_CLASS(f2fs__submit_bio,
+DECLARE_EVENT_CLASS(f2fs__bio,
- TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
- struct bio *bio),
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
- TP_ARGS(sb, fio, bio),
+ TP_ARGS(sb, type, bio),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(dev_t, target)
__field(int, op)
__field(int, op_flags)
__field(int, type)
@@ -793,37 +814,55 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->op = fio->op;
- __entry->op_flags = fio->op_flags;
- __entry->type = fio->type;
+ __entry->target = bio->bi_bdev->bd_dev;
+ __entry->op = bio_op(bio);
+ __entry->op_flags = bio->bi_opf;
+ __entry->type = type;
__entry->sector = bio->bi_iter.bi_sector;
__entry->size = bio->bi_iter.bi_size;
),
- TP_printk("dev = (%d,%d), rw = %s%s, %s, sector = %lld, size = %u",
- show_dev(__entry),
- show_bio_type(__entry->op_flags),
+ TP_printk("dev = (%d,%d)/(%d,%d), rw = %s%s, %s, sector = %lld, size = %u",
+ show_dev(__entry->target),
+ show_dev(__entry->dev),
+ show_bio_type(__entry->op, __entry->op_flags),
show_block_type(__entry->type),
(unsigned long long)__entry->sector,
__entry->size)
);
-DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio,
+DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_write_bio,
+
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
+
+ TP_ARGS(sb, type, bio),
+
+ TP_CONDITION(bio)
+);
+
+DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_read_bio,
- TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
- struct bio *bio),
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
- TP_ARGS(sb, fio, bio),
+ TP_ARGS(sb, type, bio),
TP_CONDITION(bio)
);
-DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio,
+DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_read_bio,
- TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
- struct bio *bio),
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
- TP_ARGS(sb, fio, bio),
+ TP_ARGS(sb, type, bio),
+
+ TP_CONDITION(bio)
+);
+
+DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio,
+
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
+
+ TP_ARGS(sb, type, bio),
TP_CONDITION(bio)
);
@@ -1082,16 +1121,16 @@ TRACE_EVENT(f2fs_write_checkpoint,
),
TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
- show_dev(__entry),
+ show_dev(__entry->dev),
show_cpreason(__entry->reason),
__entry->msg)
);
TRACE_EVENT(f2fs_issue_discard,
- TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen),
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
- TP_ARGS(sb, blkstart, blklen),
+ TP_ARGS(dev, blkstart, blklen),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1100,22 +1139,22 @@ TRACE_EVENT(f2fs_issue_discard,
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev = dev->bd_dev;
__entry->blkstart = blkstart;
__entry->blklen = blklen;
),
TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx",
- show_dev(__entry),
+ show_dev(__entry->dev),
(unsigned long long)__entry->blkstart,
(unsigned long long)__entry->blklen)
);
TRACE_EVENT(f2fs_issue_reset_zone,
- TP_PROTO(struct super_block *sb, block_t blkstart),
+ TP_PROTO(struct block_device *dev, block_t blkstart),
- TP_ARGS(sb, blkstart),
+ TP_ARGS(dev, blkstart),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1123,21 +1162,21 @@ TRACE_EVENT(f2fs_issue_reset_zone,
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev = dev->bd_dev;
__entry->blkstart = blkstart;
),
TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
- show_dev(__entry),
+ show_dev(__entry->dev),
(unsigned long long)__entry->blkstart)
);
TRACE_EVENT(f2fs_issue_flush,
- TP_PROTO(struct super_block *sb, unsigned int nobarrier,
+ TP_PROTO(struct block_device *dev, unsigned int nobarrier,
unsigned int flush_merge),
- TP_ARGS(sb, nobarrier, flush_merge),
+ TP_ARGS(dev, nobarrier, flush_merge),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1146,13 +1185,13 @@ TRACE_EVENT(f2fs_issue_flush,
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev = dev->bd_dev;
__entry->nobarrier = nobarrier;
__entry->flush_merge = flush_merge;
),
TP_printk("dev = (%d,%d), %s %s",
- show_dev(__entry),
+ show_dev(__entry->dev),
__entry->nobarrier ? "skip (nobarrier)" : "issue",
__entry->flush_merge ? " with flush_merge" : "")
);
@@ -1267,7 +1306,7 @@ TRACE_EVENT(f2fs_shrink_extent_tree,
),
TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u",
- show_dev(__entry),
+ show_dev(__entry->dev),
__entry->node_cnt,
__entry->tree_cnt)
);
@@ -1314,7 +1353,7 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
),
TP_printk("dev = (%d,%d), %s, dirty count = %lld",
- show_dev(__entry),
+ show_dev(__entry->dev),
show_file_type(__entry->type),
__entry->count)
);
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
new file mode 100644
index 000000000000..c566ddc87f73
--- /dev/null
+++ b/include/trace/events/fs_dax.h
@@ -0,0 +1,156 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fs_dax
+
+#if !defined(_TRACE_FS_DAX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FS_DAX_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(dax_pmd_fault_class,
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf,
+ pgoff_t max_pgoff, int result),
+ TP_ARGS(inode, vmf, max_pgoff, result),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long, vm_start)
+ __field(unsigned long, vm_end)
+ __field(unsigned long, vm_flags)
+ __field(unsigned long, address)
+ __field(pgoff_t, pgoff)
+ __field(pgoff_t, max_pgoff)
+ __field(dev_t, dev)
+ __field(unsigned int, flags)
+ __field(int, result)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->vm_start = vmf->vma->vm_start;
+ __entry->vm_end = vmf->vma->vm_end;
+ __entry->vm_flags = vmf->vma->vm_flags;
+ __entry->address = vmf->address;
+ __entry->flags = vmf->flags;
+ __entry->pgoff = vmf->pgoff;
+ __entry->max_pgoff = max_pgoff;
+ __entry->result = result;
+ ),
+ TP_printk("dev %d:%d ino %#lx %s %s address %#lx vm_start "
+ "%#lx vm_end %#lx pgoff %#lx max_pgoff %#lx %s",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->vm_flags & VM_SHARED ? "shared" : "private",
+ __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
+ __entry->address,
+ __entry->vm_start,
+ __entry->vm_end,
+ __entry->pgoff,
+ __entry->max_pgoff,
+ __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE)
+ )
+)
+
+#define DEFINE_PMD_FAULT_EVENT(name) \
+DEFINE_EVENT(dax_pmd_fault_class, name, \
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
+ pgoff_t max_pgoff, int result), \
+ TP_ARGS(inode, vmf, max_pgoff, result))
+
+DEFINE_PMD_FAULT_EVENT(dax_pmd_fault);
+DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
+
+DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf,
+ struct page *zero_page,
+ void *radix_entry),
+ TP_ARGS(inode, vmf, zero_page, radix_entry),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long, vm_flags)
+ __field(unsigned long, address)
+ __field(struct page *, zero_page)
+ __field(void *, radix_entry)
+ __field(dev_t, dev)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->vm_flags = vmf->vma->vm_flags;
+ __entry->address = vmf->address;
+ __entry->zero_page = zero_page;
+ __entry->radix_entry = radix_entry;
+ ),
+ TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p "
+ "radix_entry %#lx",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->vm_flags & VM_SHARED ? "shared" : "private",
+ __entry->address,
+ __entry->zero_page,
+ (unsigned long)__entry->radix_entry
+ )
+)
+
+#define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
+DEFINE_EVENT(dax_pmd_load_hole_class, name, \
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
+ struct page *zero_page, void *radix_entry), \
+ TP_ARGS(inode, vmf, zero_page, radix_entry))
+
+DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
+DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
+
+DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf,
+ long length, pfn_t pfn, void *radix_entry),
+ TP_ARGS(inode, vmf, length, pfn, radix_entry),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long, vm_flags)
+ __field(unsigned long, address)
+ __field(long, length)
+ __field(u64, pfn_val)
+ __field(void *, radix_entry)
+ __field(dev_t, dev)
+ __field(int, write)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->vm_flags = vmf->vma->vm_flags;
+ __entry->address = vmf->address;
+ __entry->write = vmf->flags & FAULT_FLAG_WRITE;
+ __entry->length = length;
+ __entry->pfn_val = pfn.val;
+ __entry->radix_entry = radix_entry;
+ ),
+ TP_printk("dev %d:%d ino %#lx %s %s address %#lx length %#lx "
+ "pfn %#llx %s radix_entry %#lx",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->vm_flags & VM_SHARED ? "shared" : "private",
+ __entry->write ? "write" : "read",
+ __entry->address,
+ __entry->length,
+ __entry->pfn_val & ~PFN_FLAGS_MASK,
+ __print_flags_u64(__entry->pfn_val & PFN_FLAGS_MASK, "|",
+ PFN_FLAGS_TRACE),
+ (unsigned long)__entry->radix_entry
+ )
+)
+
+#define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \
+DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
+ long length, pfn_t pfn, void *radix_entry), \
+ TP_ARGS(inode, vmf, length, pfn, radix_entry))
+
+DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
+DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
+
+#endif /* _TRACE_FS_DAX_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 15bf875d0e4a..304ff94363b2 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -1,3 +1,6 @@
+#include <linux/node.h>
+#include <linux/mmzone.h>
+#include <linux/compaction.h>
/*
* The order of these masks is important. Matching masks will be seen
* first and the left over flags will end up showing by themselves.
@@ -171,3 +174,98 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
(flags) ? __print_flags(flags, "|", \
__def_vmaflag_names \
) : "none"
+
+#ifdef CONFIG_COMPACTION
+#define COMPACTION_STATUS \
+ EM( COMPACT_SKIPPED, "skipped") \
+ EM( COMPACT_DEFERRED, "deferred") \
+ EM( COMPACT_CONTINUE, "continue") \
+ EM( COMPACT_SUCCESS, "success") \
+ EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
+ EM( COMPACT_COMPLETE, "complete") \
+ EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
+ EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
+ EMe(COMPACT_CONTENDED, "contended")
+
+/* High-level compaction status feedback */
+#define COMPACTION_FAILED 1
+#define COMPACTION_WITHDRAWN 2
+#define COMPACTION_PROGRESS 3
+
+#define compact_result_to_feedback(result) \
+({ \
+ enum compact_result __result = result; \
+ (compaction_failed(__result)) ? COMPACTION_FAILED : \
+ (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
+})
+
+#define COMPACTION_FEEDBACK \
+ EM(COMPACTION_FAILED, "failed") \
+ EM(COMPACTION_WITHDRAWN, "withdrawn") \
+ EMe(COMPACTION_PROGRESS, "progress")
+
+#define COMPACTION_PRIORITY \
+ EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \
+ EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \
+ EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC")
+#else
+#define COMPACTION_STATUS
+#define COMPACTION_PRIORITY
+#define COMPACTION_FEEDBACK
+#endif
+
+#ifdef CONFIG_ZONE_DMA
+#define IFDEF_ZONE_DMA(X) X
+#else
+#define IFDEF_ZONE_DMA(X)
+#endif
+
+#ifdef CONFIG_ZONE_DMA32
+#define IFDEF_ZONE_DMA32(X) X
+#else
+#define IFDEF_ZONE_DMA32(X)
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#define IFDEF_ZONE_HIGHMEM(X) X
+#else
+#define IFDEF_ZONE_HIGHMEM(X)
+#endif
+
+#define ZONE_TYPE \
+ IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \
+ IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \
+ EM (ZONE_NORMAL, "Normal") \
+ IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \
+ EMe(ZONE_MOVABLE,"Movable")
+
+#define LRU_NAMES \
+ EM (LRU_INACTIVE_ANON, "inactive_anon") \
+ EM (LRU_ACTIVE_ANON, "active_anon") \
+ EM (LRU_INACTIVE_FILE, "inactive_file") \
+ EM (LRU_ACTIVE_FILE, "active_file") \
+ EMe(LRU_UNEVICTABLE, "unevictable")
+
+/*
+ * First define the enums in the above macros to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+COMPACTION_STATUS
+COMPACTION_PRIORITY
+COMPACTION_FEEDBACK
+ZONE_TYPE
+LRU_NAMES
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index 1e974983757e..38baeb27221a 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -4,6 +4,7 @@
#if !defined(_TRACE_OOM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_OOM_H
#include <linux/tracepoint.h>
+#include <trace/events/mmflags.h>
TRACE_EVENT(oom_score_adj_update,
@@ -27,6 +28,86 @@ TRACE_EVENT(oom_score_adj_update,
__entry->pid, __entry->comm, __entry->oom_score_adj)
);
+TRACE_EVENT(reclaim_retry_zone,
+
+ TP_PROTO(struct zoneref *zoneref,
+ int order,
+ unsigned long reclaimable,
+ unsigned long available,
+ unsigned long min_wmark,
+ int no_progress_loops,
+ bool wmark_check),
+
+ TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check),
+
+ TP_STRUCT__entry(
+ __field( int, node)
+ __field( int, zone_idx)
+ __field( int, order)
+ __field( unsigned long, reclaimable)
+ __field( unsigned long, available)
+ __field( unsigned long, min_wmark)
+ __field( int, no_progress_loops)
+ __field( bool, wmark_check)
+ ),
+
+ TP_fast_assign(
+ __entry->node = zone_to_nid(zoneref->zone);
+ __entry->zone_idx = zoneref->zone_idx;
+ __entry->order = order;
+ __entry->reclaimable = reclaimable;
+ __entry->available = available;
+ __entry->min_wmark = min_wmark;
+ __entry->no_progress_loops = no_progress_loops;
+ __entry->wmark_check = wmark_check;
+ ),
+
+ TP_printk("node=%d zone=%-8s order=%d reclaimable=%lu available=%lu min_wmark=%lu no_progress_loops=%d wmark_check=%d",
+ __entry->node, __print_symbolic(__entry->zone_idx, ZONE_TYPE),
+ __entry->order,
+ __entry->reclaimable, __entry->available, __entry->min_wmark,
+ __entry->no_progress_loops,
+ __entry->wmark_check)
+);
+
+#ifdef CONFIG_COMPACTION
+TRACE_EVENT(compact_retry,
+
+ TP_PROTO(int order,
+ enum compact_priority priority,
+ enum compact_result result,
+ int retries,
+ int max_retries,
+ bool ret),
+
+ TP_ARGS(order, priority, result, retries, max_retries, ret),
+
+ TP_STRUCT__entry(
+ __field( int, order)
+ __field( int, priority)
+ __field( int, result)
+ __field( int, retries)
+ __field( int, max_retries)
+ __field( bool, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->order = order;
+ __entry->priority = priority;
+ __entry->result = compact_result_to_feedback(result);
+ __entry->retries = retries;
+ __entry->max_retries = max_retries;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("order=%d priority=%s compaction_result=%s retries=%d max_retries=%d should_retry=%d",
+ __entry->order,
+ __print_symbolic(__entry->priority, COMPACTION_PRIORITY),
+ __print_symbolic(__entry->result, COMPACTION_FEEDBACK),
+ __entry->retries, __entry->max_retries,
+ __entry->ret)
+);
+#endif /* CONFIG_COMPACTION */
#endif
/* This part must be outside protection */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 0383e5e9a0f3..39123c06a566 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -16,6 +16,388 @@
#include <linux/tracepoint.h>
+/*
+ * Define enums for tracing information.
+ *
+ * These should all be kept sorted, making it easier to match the string
+ * mapping tables further on.
+ */
+#ifndef __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum rxrpc_skb_trace {
+ rxrpc_skb_rx_cleaned,
+ rxrpc_skb_rx_freed,
+ rxrpc_skb_rx_got,
+ rxrpc_skb_rx_lost,
+ rxrpc_skb_rx_purged,
+ rxrpc_skb_rx_received,
+ rxrpc_skb_rx_rotated,
+ rxrpc_skb_rx_seen,
+ rxrpc_skb_tx_cleaned,
+ rxrpc_skb_tx_freed,
+ rxrpc_skb_tx_got,
+ rxrpc_skb_tx_new,
+ rxrpc_skb_tx_rotated,
+ rxrpc_skb_tx_seen,
+};
+
+enum rxrpc_conn_trace {
+ rxrpc_conn_got,
+ rxrpc_conn_new_client,
+ rxrpc_conn_new_service,
+ rxrpc_conn_put_client,
+ rxrpc_conn_put_service,
+ rxrpc_conn_queued,
+ rxrpc_conn_seen,
+};
+
+enum rxrpc_client_trace {
+ rxrpc_client_activate_chans,
+ rxrpc_client_alloc,
+ rxrpc_client_chan_activate,
+ rxrpc_client_chan_disconnect,
+ rxrpc_client_chan_pass,
+ rxrpc_client_chan_unstarted,
+ rxrpc_client_cleanup,
+ rxrpc_client_count,
+ rxrpc_client_discard,
+ rxrpc_client_duplicate,
+ rxrpc_client_exposed,
+ rxrpc_client_replace,
+ rxrpc_client_to_active,
+ rxrpc_client_to_culled,
+ rxrpc_client_to_idle,
+ rxrpc_client_to_inactive,
+ rxrpc_client_to_upgrade,
+ rxrpc_client_to_waiting,
+ rxrpc_client_uncount,
+};
+
+enum rxrpc_call_trace {
+ rxrpc_call_connected,
+ rxrpc_call_error,
+ rxrpc_call_got,
+ rxrpc_call_got_kernel,
+ rxrpc_call_got_userid,
+ rxrpc_call_new_client,
+ rxrpc_call_new_service,
+ rxrpc_call_put,
+ rxrpc_call_put_kernel,
+ rxrpc_call_put_noqueue,
+ rxrpc_call_put_userid,
+ rxrpc_call_queued,
+ rxrpc_call_queued_ref,
+ rxrpc_call_release,
+ rxrpc_call_seen,
+};
+
+enum rxrpc_transmit_trace {
+ rxrpc_transmit_await_reply,
+ rxrpc_transmit_end,
+ rxrpc_transmit_queue,
+ rxrpc_transmit_queue_last,
+ rxrpc_transmit_rotate,
+ rxrpc_transmit_rotate_last,
+ rxrpc_transmit_wait,
+};
+
+enum rxrpc_receive_trace {
+ rxrpc_receive_end,
+ rxrpc_receive_front,
+ rxrpc_receive_incoming,
+ rxrpc_receive_queue,
+ rxrpc_receive_queue_last,
+ rxrpc_receive_rotate,
+};
+
+enum rxrpc_recvmsg_trace {
+ rxrpc_recvmsg_cont,
+ rxrpc_recvmsg_data_return,
+ rxrpc_recvmsg_dequeue,
+ rxrpc_recvmsg_enter,
+ rxrpc_recvmsg_full,
+ rxrpc_recvmsg_hole,
+ rxrpc_recvmsg_next,
+ rxrpc_recvmsg_requeue,
+ rxrpc_recvmsg_return,
+ rxrpc_recvmsg_terminal,
+ rxrpc_recvmsg_to_be_accepted,
+ rxrpc_recvmsg_wait,
+};
+
+enum rxrpc_rtt_tx_trace {
+ rxrpc_rtt_tx_data,
+ rxrpc_rtt_tx_ping,
+};
+
+enum rxrpc_rtt_rx_trace {
+ rxrpc_rtt_rx_ping_response,
+ rxrpc_rtt_rx_requested_ack,
+};
+
+enum rxrpc_timer_trace {
+ rxrpc_timer_begin,
+ rxrpc_timer_expired,
+ rxrpc_timer_init_for_reply,
+ rxrpc_timer_init_for_send_reply,
+ rxrpc_timer_set_for_ack,
+ rxrpc_timer_set_for_ping,
+ rxrpc_timer_set_for_resend,
+ rxrpc_timer_set_for_send,
+};
+
+enum rxrpc_propose_ack_trace {
+ rxrpc_propose_ack_client_tx_end,
+ rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_lost_ack,
+ rxrpc_propose_ack_ping_for_lost_reply,
+ rxrpc_propose_ack_ping_for_params,
+ rxrpc_propose_ack_processing_op,
+ rxrpc_propose_ack_respond_to_ack,
+ rxrpc_propose_ack_respond_to_ping,
+ rxrpc_propose_ack_retry_tx,
+ rxrpc_propose_ack_rotate_rx,
+ rxrpc_propose_ack_terminal_ack,
+};
+
+enum rxrpc_propose_ack_outcome {
+ rxrpc_propose_ack_subsume,
+ rxrpc_propose_ack_update,
+ rxrpc_propose_ack_use,
+};
+
+enum rxrpc_congest_change {
+ rxrpc_cong_begin_retransmission,
+ rxrpc_cong_cleared_nacks,
+ rxrpc_cong_new_low_nack,
+ rxrpc_cong_no_change,
+ rxrpc_cong_progress,
+ rxrpc_cong_retransmit_again,
+ rxrpc_cong_rtt_window_end,
+ rxrpc_cong_saw_nack,
+};
+
+#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
+
+/*
+ * Declare tracing information enums and their string mappings for display.
+ */
+#define rxrpc_skb_traces \
+ EM(rxrpc_skb_rx_cleaned, "Rx CLN") \
+ EM(rxrpc_skb_rx_freed, "Rx FRE") \
+ EM(rxrpc_skb_rx_got, "Rx GOT") \
+ EM(rxrpc_skb_rx_lost, "Rx *L*") \
+ EM(rxrpc_skb_rx_purged, "Rx PUR") \
+ EM(rxrpc_skb_rx_received, "Rx RCV") \
+ EM(rxrpc_skb_rx_rotated, "Rx ROT") \
+ EM(rxrpc_skb_rx_seen, "Rx SEE") \
+ EM(rxrpc_skb_tx_cleaned, "Tx CLN") \
+ EM(rxrpc_skb_tx_freed, "Tx FRE") \
+ EM(rxrpc_skb_tx_got, "Tx GOT") \
+ EM(rxrpc_skb_tx_new, "Tx NEW") \
+ EM(rxrpc_skb_tx_rotated, "Tx ROT") \
+ E_(rxrpc_skb_tx_seen, "Tx SEE")
+
+#define rxrpc_conn_traces \
+ EM(rxrpc_conn_got, "GOT") \
+ EM(rxrpc_conn_new_client, "NWc") \
+ EM(rxrpc_conn_new_service, "NWs") \
+ EM(rxrpc_conn_put_client, "PTc") \
+ EM(rxrpc_conn_put_service, "PTs") \
+ EM(rxrpc_conn_queued, "QUE") \
+ E_(rxrpc_conn_seen, "SEE")
+
+#define rxrpc_client_traces \
+ EM(rxrpc_client_activate_chans, "Activa") \
+ EM(rxrpc_client_alloc, "Alloc ") \
+ EM(rxrpc_client_chan_activate, "ChActv") \
+ EM(rxrpc_client_chan_disconnect, "ChDisc") \
+ EM(rxrpc_client_chan_pass, "ChPass") \
+ EM(rxrpc_client_chan_unstarted, "ChUnst") \
+ EM(rxrpc_client_cleanup, "Clean ") \
+ EM(rxrpc_client_count, "Count ") \
+ EM(rxrpc_client_discard, "Discar") \
+ EM(rxrpc_client_duplicate, "Duplic") \
+ EM(rxrpc_client_exposed, "Expose") \
+ EM(rxrpc_client_replace, "Replac") \
+ EM(rxrpc_client_to_active, "->Actv") \
+ EM(rxrpc_client_to_culled, "->Cull") \
+ EM(rxrpc_client_to_idle, "->Idle") \
+ EM(rxrpc_client_to_inactive, "->Inac") \
+ EM(rxrpc_client_to_upgrade, "->Upgd") \
+ EM(rxrpc_client_to_waiting, "->Wait") \
+ E_(rxrpc_client_uncount, "Uncoun")
+
+#define rxrpc_conn_cache_states \
+ EM(RXRPC_CONN_CLIENT_INACTIVE, "Inac") \
+ EM(RXRPC_CONN_CLIENT_WAITING, "Wait") \
+ EM(RXRPC_CONN_CLIENT_ACTIVE, "Actv") \
+ EM(RXRPC_CONN_CLIENT_CULLED, "Cull") \
+ E_(RXRPC_CONN_CLIENT_IDLE, "Idle") \
+
+#define rxrpc_call_traces \
+ EM(rxrpc_call_connected, "CON") \
+ EM(rxrpc_call_error, "*E*") \
+ EM(rxrpc_call_got, "GOT") \
+ EM(rxrpc_call_got_kernel, "Gke") \
+ EM(rxrpc_call_got_userid, "Gus") \
+ EM(rxrpc_call_new_client, "NWc") \
+ EM(rxrpc_call_new_service, "NWs") \
+ EM(rxrpc_call_put, "PUT") \
+ EM(rxrpc_call_put_kernel, "Pke") \
+ EM(rxrpc_call_put_noqueue, "PNQ") \
+ EM(rxrpc_call_put_userid, "Pus") \
+ EM(rxrpc_call_queued, "QUE") \
+ EM(rxrpc_call_queued_ref, "QUR") \
+ EM(rxrpc_call_release, "RLS") \
+ E_(rxrpc_call_seen, "SEE")
+
+#define rxrpc_transmit_traces \
+ EM(rxrpc_transmit_await_reply, "AWR") \
+ EM(rxrpc_transmit_end, "END") \
+ EM(rxrpc_transmit_queue, "QUE") \
+ EM(rxrpc_transmit_queue_last, "QLS") \
+ EM(rxrpc_transmit_rotate, "ROT") \
+ EM(rxrpc_transmit_rotate_last, "RLS") \
+ E_(rxrpc_transmit_wait, "WAI")
+
+#define rxrpc_receive_traces \
+ EM(rxrpc_receive_end, "END") \
+ EM(rxrpc_receive_front, "FRN") \
+ EM(rxrpc_receive_incoming, "INC") \
+ EM(rxrpc_receive_queue, "QUE") \
+ EM(rxrpc_receive_queue_last, "QLS") \
+ E_(rxrpc_receive_rotate, "ROT")
+
+#define rxrpc_recvmsg_traces \
+ EM(rxrpc_recvmsg_cont, "CONT") \
+ EM(rxrpc_recvmsg_data_return, "DATA") \
+ EM(rxrpc_recvmsg_dequeue, "DEQU") \
+ EM(rxrpc_recvmsg_enter, "ENTR") \
+ EM(rxrpc_recvmsg_full, "FULL") \
+ EM(rxrpc_recvmsg_hole, "HOLE") \
+ EM(rxrpc_recvmsg_next, "NEXT") \
+ EM(rxrpc_recvmsg_requeue, "REQU") \
+ EM(rxrpc_recvmsg_return, "RETN") \
+ EM(rxrpc_recvmsg_terminal, "TERM") \
+ EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \
+ E_(rxrpc_recvmsg_wait, "WAIT")
+
+#define rxrpc_rtt_tx_traces \
+ EM(rxrpc_rtt_tx_data, "DATA") \
+ E_(rxrpc_rtt_tx_ping, "PING")
+
+#define rxrpc_rtt_rx_traces \
+ EM(rxrpc_rtt_rx_ping_response, "PONG") \
+ E_(rxrpc_rtt_rx_requested_ack, "RACK")
+
+#define rxrpc_timer_traces \
+ EM(rxrpc_timer_begin, "Begin ") \
+ EM(rxrpc_timer_expired, "*EXPR*") \
+ EM(rxrpc_timer_init_for_reply, "IniRpl") \
+ EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
+ EM(rxrpc_timer_set_for_ack, "SetAck") \
+ EM(rxrpc_timer_set_for_ping, "SetPng") \
+ EM(rxrpc_timer_set_for_resend, "SetRTx") \
+ E_(rxrpc_timer_set_for_send, "SetTx ")
+
+#define rxrpc_propose_ack_traces \
+ EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
+ EM(rxrpc_propose_ack_input_data, "DataIn ") \
+ EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
+ EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
+ EM(rxrpc_propose_ack_ping_for_params, "Params ") \
+ EM(rxrpc_propose_ack_processing_op, "ProcOp ") \
+ EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \
+ EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \
+ EM(rxrpc_propose_ack_retry_tx, "RetryTx") \
+ EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \
+ E_(rxrpc_propose_ack_terminal_ack, "ClTerm ")
+
+#define rxrpc_propose_ack_outcomes \
+ EM(rxrpc_propose_ack_subsume, " Subsume") \
+ EM(rxrpc_propose_ack_update, " Update") \
+ E_(rxrpc_propose_ack_use, "")
+
+#define rxrpc_congest_modes \
+ EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \
+ EM(RXRPC_CALL_FAST_RETRANSMIT, "FastReTx ") \
+ EM(RXRPC_CALL_PACKET_LOSS, "PktLoss ") \
+ E_(RXRPC_CALL_SLOW_START, "SlowStart")
+
+#define rxrpc_congest_changes \
+ EM(rxrpc_cong_begin_retransmission, " Retrans") \
+ EM(rxrpc_cong_cleared_nacks, " Cleared") \
+ EM(rxrpc_cong_new_low_nack, " NewLowN") \
+ EM(rxrpc_cong_no_change, "") \
+ EM(rxrpc_cong_progress, " Progres") \
+ EM(rxrpc_cong_retransmit_again, " ReTxAgn") \
+ EM(rxrpc_cong_rtt_window_end, " RttWinE") \
+ E_(rxrpc_cong_saw_nack, " SawNack")
+
+#define rxrpc_pkts \
+ EM(0, "?00") \
+ EM(RXRPC_PACKET_TYPE_DATA, "DATA") \
+ EM(RXRPC_PACKET_TYPE_ACK, "ACK") \
+ EM(RXRPC_PACKET_TYPE_BUSY, "BUSY") \
+ EM(RXRPC_PACKET_TYPE_ABORT, "ABORT") \
+ EM(RXRPC_PACKET_TYPE_ACKALL, "ACKALL") \
+ EM(RXRPC_PACKET_TYPE_CHALLENGE, "CHALL") \
+ EM(RXRPC_PACKET_TYPE_RESPONSE, "RESP") \
+ EM(RXRPC_PACKET_TYPE_DEBUG, "DEBUG") \
+ EM(9, "?09") \
+ EM(10, "?10") \
+ EM(11, "?11") \
+ EM(12, "?12") \
+ EM(RXRPC_PACKET_TYPE_VERSION, "VERSION") \
+ EM(14, "?14") \
+ E_(15, "?15")
+
+#define rxrpc_ack_names \
+ EM(0, "-0-") \
+ EM(RXRPC_ACK_REQUESTED, "REQ") \
+ EM(RXRPC_ACK_DUPLICATE, "DUP") \
+ EM(RXRPC_ACK_OUT_OF_SEQUENCE, "OOS") \
+ EM(RXRPC_ACK_EXCEEDS_WINDOW, "WIN") \
+ EM(RXRPC_ACK_NOSPACE, "MEM") \
+ EM(RXRPC_ACK_PING, "PNG") \
+ EM(RXRPC_ACK_PING_RESPONSE, "PNR") \
+ EM(RXRPC_ACK_DELAY, "DLY") \
+ EM(RXRPC_ACK_IDLE, "IDL") \
+ E_(RXRPC_ACK__INVALID, "-?-")
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+rxrpc_skb_traces;
+rxrpc_conn_traces;
+rxrpc_client_traces;
+rxrpc_call_traces;
+rxrpc_transmit_traces;
+rxrpc_receive_traces;
+rxrpc_recvmsg_traces;
+rxrpc_rtt_tx_traces;
+rxrpc_rtt_rx_traces;
+rxrpc_timer_traces;
+rxrpc_propose_ack_traces;
+rxrpc_propose_ack_outcomes;
+rxrpc_congest_changes;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
TRACE_EVENT(rxrpc_conn,
TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
int usage, const void *where),
@@ -38,7 +420,7 @@ TRACE_EVENT(rxrpc_conn,
TP_printk("C=%p %s u=%d sp=%pSR",
__entry->conn,
- rxrpc_conn_traces[__entry->op],
+ __print_symbolic(__entry->op, rxrpc_conn_traces),
__entry->usage,
__entry->where)
);
@@ -70,8 +452,8 @@ TRACE_EVENT(rxrpc_client,
TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
__entry->conn,
__entry->channel,
- rxrpc_client_traces[__entry->op],
- rxrpc_conn_cache_states[__entry->cs],
+ __print_symbolic(__entry->op, rxrpc_client_traces),
+ __print_symbolic(__entry->cs, rxrpc_conn_cache_states),
__entry->cid,
__entry->usage)
);
@@ -100,7 +482,7 @@ TRACE_EVENT(rxrpc_call,
TP_printk("c=%p %s u=%d sp=%pSR a=%p",
__entry->call,
- rxrpc_call_traces[__entry->op],
+ __print_symbolic(__entry->op, rxrpc_call_traces),
__entry->usage,
__entry->where,
__entry->aux)
@@ -130,7 +512,7 @@ TRACE_EVENT(rxrpc_skb,
TP_printk("s=%p %s u=%d m=%d p=%pSR",
__entry->skb,
- rxrpc_skb_traces[__entry->op],
+ __print_symbolic(__entry->op, rxrpc_skb_traces),
__entry->usage,
__entry->mod_count,
__entry->where)
@@ -154,7 +536,8 @@ TRACE_EVENT(rxrpc_rx_packet,
__entry->hdr.callNumber, __entry->hdr.serviceId,
__entry->hdr.serial, __entry->hdr.seq,
__entry->hdr.type, __entry->hdr.flags,
- __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ __entry->hdr.type <= 15 ?
+ __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK")
);
TRACE_EVENT(rxrpc_rx_done,
@@ -214,6 +597,7 @@ TRACE_EVENT(rxrpc_transmit,
__field(enum rxrpc_transmit_trace, why )
__field(rxrpc_seq_t, tx_hard_ack )
__field(rxrpc_seq_t, tx_top )
+ __field(int, tx_winsize )
),
TP_fast_assign(
@@ -221,38 +605,81 @@ TRACE_EVENT(rxrpc_transmit,
__entry->why = why;
__entry->tx_hard_ack = call->tx_hard_ack;
__entry->tx_top = call->tx_top;
+ __entry->tx_winsize = call->tx_winsize;
),
- TP_printk("c=%p %s f=%08x n=%u",
+ TP_printk("c=%p %s f=%08x n=%u/%u",
__entry->call,
- rxrpc_transmit_traces[__entry->why],
+ __print_symbolic(__entry->why, rxrpc_transmit_traces),
__entry->tx_hard_ack + 1,
- __entry->tx_top - __entry->tx_hard_ack)
+ __entry->tx_top - __entry->tx_hard_ack,
+ __entry->tx_winsize)
+ );
+
+TRACE_EVENT(rxrpc_rx_data,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ rxrpc_serial_t serial, u8 flags, u8 anno),
+
+ TP_ARGS(call, seq, serial, flags, anno),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, flags )
+ __field(u8, anno )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->serial = serial;
+ __entry->flags = flags;
+ __entry->anno = anno;
+ ),
+
+ TP_printk("c=%p DATA %08x q=%08x fl=%02x a=%02x",
+ __entry->call,
+ __entry->serial,
+ __entry->seq,
+ __entry->flags,
+ __entry->anno)
);
TRACE_EVENT(rxrpc_rx_ack,
- TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks),
+ TP_PROTO(struct rxrpc_call *call,
+ rxrpc_serial_t serial, rxrpc_serial_t ack_serial,
+ rxrpc_seq_t first, rxrpc_seq_t prev, u8 reason, u8 n_acks),
- TP_ARGS(call, first, reason, n_acks),
+ TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
TP_STRUCT__entry(
__field(struct rxrpc_call *, call )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_serial_t, ack_serial )
__field(rxrpc_seq_t, first )
+ __field(rxrpc_seq_t, prev )
__field(u8, reason )
__field(u8, n_acks )
),
TP_fast_assign(
__entry->call = call;
+ __entry->serial = serial;
+ __entry->ack_serial = ack_serial;
__entry->first = first;
+ __entry->prev = prev;
__entry->reason = reason;
__entry->n_acks = n_acks;
),
- TP_printk("c=%p %s f=%08x n=%u",
+ TP_printk("c=%p %08x %s r=%08x f=%08x p=%08x n=%u",
__entry->call,
- rxrpc_ack_names[__entry->reason],
+ __entry->serial,
+ __print_symbolic(__entry->reason, rxrpc_ack_names),
+ __entry->ack_serial,
__entry->first,
+ __entry->prev,
__entry->n_acks)
);
@@ -317,7 +744,7 @@ TRACE_EVENT(rxrpc_tx_ack,
TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u",
__entry->call,
__entry->serial,
- rxrpc_ack_names[__entry->reason],
+ __print_symbolic(__entry->reason, rxrpc_ack_names),
__entry->ack_first,
__entry->ack_serial,
__entry->n_acks)
@@ -349,7 +776,7 @@ TRACE_EVENT(rxrpc_receive,
TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
__entry->call,
- rxrpc_receive_traces[__entry->why],
+ __print_symbolic(__entry->why, rxrpc_receive_traces),
__entry->serial,
__entry->seq,
__entry->hard_ack,
@@ -383,7 +810,7 @@ TRACE_EVENT(rxrpc_recvmsg,
TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
__entry->call,
- rxrpc_recvmsg_traces[__entry->why],
+ __print_symbolic(__entry->why, rxrpc_recvmsg_traces),
__entry->seq,
__entry->offset,
__entry->len,
@@ -410,7 +837,7 @@ TRACE_EVENT(rxrpc_rtt_tx,
TP_printk("c=%p %s sr=%08x",
__entry->call,
- rxrpc_rtt_tx_traces[__entry->why],
+ __print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
__entry->send_serial)
);
@@ -443,7 +870,7 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
__entry->call,
- rxrpc_rtt_rx_traces[__entry->why],
+ __print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
__entry->send_serial,
__entry->resp_serial,
__entry->rtt,
@@ -481,7 +908,7 @@ TRACE_EVENT(rxrpc_timer,
TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
__entry->call,
- rxrpc_timer_traces[__entry->why],
+ __print_symbolic(__entry->why, rxrpc_timer_traces),
ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
@@ -506,7 +933,8 @@ TRACE_EVENT(rxrpc_rx_lose,
__entry->hdr.callNumber, __entry->hdr.serviceId,
__entry->hdr.serial, __entry->hdr.seq,
__entry->hdr.type, __entry->hdr.flags,
- __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ __entry->hdr.type <= 15 ?
+ __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK")
);
TRACE_EVENT(rxrpc_propose_ack,
@@ -539,12 +967,12 @@ TRACE_EVENT(rxrpc_propose_ack,
TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
__entry->call,
- rxrpc_propose_ack_traces[__entry->why],
- rxrpc_ack_names[__entry->ack_reason],
+ __print_symbolic(__entry->why, rxrpc_propose_ack_traces),
+ __print_symbolic(__entry->ack_reason, rxrpc_ack_names),
__entry->serial,
__entry->immediate,
__entry->background,
- rxrpc_propose_ack_outcomes[__entry->outcome])
+ __print_symbolic(__entry->outcome, rxrpc_propose_ack_outcomes))
);
TRACE_EVENT(rxrpc_retransmit,
@@ -603,9 +1031,9 @@ TRACE_EVENT(rxrpc_congest,
TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
__entry->ack_serial,
- rxrpc_ack_names[__entry->sum.ack_reason],
+ __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
__entry->hard_ack,
- rxrpc_congest_modes[__entry->sum.mode],
+ __print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
__entry->sum.cwnd,
__entry->sum.ssthresh,
__entry->sum.nr_acks, __entry->sum.nr_nacks,
@@ -615,10 +1043,50 @@ TRACE_EVENT(rxrpc_congest,
__entry->sum.cumulative_acks,
__entry->sum.dup_acks,
__entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
- rxrpc_congest_changes[__entry->change],
+ __print_symbolic(__entry->change, rxrpc_congest_changes),
__entry->sum.retrans_timeo ? " rTxTo" : "")
);
+TRACE_EVENT(rxrpc_disconnect_call,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(u32, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->abort_code = call->abort_code;
+ ),
+
+ TP_printk("c=%p ab=%08x",
+ __entry->call,
+ __entry->abort_code)
+ );
+
+TRACE_EVENT(rxrpc_improper_term,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(u32, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->abort_code = call->abort_code;
+ ),
+
+ TP_printk("c=%p ab=%08x",
+ __entry->call,
+ __entry->abort_code)
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9b90c57517a9..9e3ef6c99e4b 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -4,7 +4,7 @@
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
-#include <linux/sched.h>
+#include <linux/sched/numa_balancing.h>
#include <linux/tracepoint.h>
#include <linux/binfmts.h>
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 14e49c798135..b35533b94277 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -1,5 +1,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM raw_syscalls
+#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE syscalls
#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 1bca99dbb98f..80787eafba99 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -36,6 +36,13 @@ DEFINE_EVENT(timer_class, timer_init,
TP_ARGS(timer)
);
+#define decode_timer_flags(flags) \
+ __print_flags(flags, "|", \
+ { TIMER_MIGRATING, "M" }, \
+ { TIMER_DEFERRABLE, "D" }, \
+ { TIMER_PINNED, "P" }, \
+ { TIMER_IRQSAFE, "I" })
+
/**
* timer_start - called when the timer is started
* @timer: pointer to struct timer_list
@@ -65,9 +72,12 @@ TRACE_EVENT(timer_start,
__entry->flags = flags;
),
- TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x",
+ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
- (long)__entry->expires - __entry->now, __entry->flags)
+ (long)__entry->expires - __entry->now,
+ __entry->flags & TIMER_CPUMASK,
+ __entry->flags >> TIMER_ARRAYSHIFT,
+ decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
);
/**
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index c88fd0934e7e..27e8a5c77579 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -15,6 +15,7 @@
#define RECLAIM_WB_MIXED 0x0010u
#define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */
#define RECLAIM_WB_ASYNC 0x0008u
+#define RECLAIM_WB_LRU (RECLAIM_WB_ANON|RECLAIM_WB_FILE)
#define show_reclaim_flags(flags) \
(flags) ? __print_flags(flags, "|", \
@@ -269,26 +270,27 @@ TRACE_EVENT(mm_shrink_slab_end,
__entry->retval)
);
-DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
-
+TRACE_EVENT(mm_vmscan_lru_isolate,
TP_PROTO(int classzone_idx,
int order,
unsigned long nr_requested,
unsigned long nr_scanned,
+ unsigned long nr_skipped,
unsigned long nr_taken,
isolate_mode_t isolate_mode,
- int file),
+ int lru),
- TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),
+ TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru),
TP_STRUCT__entry(
__field(int, classzone_idx)
__field(int, order)
__field(unsigned long, nr_requested)
__field(unsigned long, nr_scanned)
+ __field(unsigned long, nr_skipped)
__field(unsigned long, nr_taken)
__field(isolate_mode_t, isolate_mode)
- __field(int, file)
+ __field(int, lru)
),
TP_fast_assign(
@@ -296,47 +298,21 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
__entry->order = order;
__entry->nr_requested = nr_requested;
__entry->nr_scanned = nr_scanned;
+ __entry->nr_skipped = nr_skipped;
__entry->nr_taken = nr_taken;
__entry->isolate_mode = isolate_mode;
- __entry->file = file;
+ __entry->lru = lru;
),
- TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
+ TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s",
__entry->isolate_mode,
__entry->classzone_idx,
__entry->order,
__entry->nr_requested,
__entry->nr_scanned,
+ __entry->nr_skipped,
__entry->nr_taken,
- __entry->file)
-);
-
-DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
-
- TP_PROTO(int classzone_idx,
- int order,
- unsigned long nr_requested,
- unsigned long nr_scanned,
- unsigned long nr_taken,
- isolate_mode_t isolate_mode,
- int file),
-
- TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
-
-);
-
-DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
-
- TP_PROTO(int classzone_idx,
- int order,
- unsigned long nr_requested,
- unsigned long nr_scanned,
- unsigned long nr_taken,
- isolate_mode_t isolate_mode,
- int file),
-
- TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
-
+ __print_symbolic(__entry->lru, LRU_NAMES))
);
TRACE_EVENT(mm_vmscan_writepage,
@@ -365,14 +341,27 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
TP_PROTO(int nid,
unsigned long nr_scanned, unsigned long nr_reclaimed,
+ unsigned long nr_dirty, unsigned long nr_writeback,
+ unsigned long nr_congested, unsigned long nr_immediate,
+ unsigned long nr_activate, unsigned long nr_ref_keep,
+ unsigned long nr_unmap_fail,
int priority, int file),
- TP_ARGS(nid, nr_scanned, nr_reclaimed, priority, file),
+ TP_ARGS(nid, nr_scanned, nr_reclaimed, nr_dirty, nr_writeback,
+ nr_congested, nr_immediate, nr_activate, nr_ref_keep,
+ nr_unmap_fail, priority, file),
TP_STRUCT__entry(
__field(int, nid)
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_reclaimed)
+ __field(unsigned long, nr_dirty)
+ __field(unsigned long, nr_writeback)
+ __field(unsigned long, nr_congested)
+ __field(unsigned long, nr_immediate)
+ __field(unsigned long, nr_activate)
+ __field(unsigned long, nr_ref_keep)
+ __field(unsigned long, nr_unmap_fail)
__field(int, priority)
__field(int, reclaim_flags)
),
@@ -381,17 +370,102 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
__entry->nid = nid;
__entry->nr_scanned = nr_scanned;
__entry->nr_reclaimed = nr_reclaimed;
+ __entry->nr_dirty = nr_dirty;
+ __entry->nr_writeback = nr_writeback;
+ __entry->nr_congested = nr_congested;
+ __entry->nr_immediate = nr_immediate;
+ __entry->nr_activate = nr_activate;
+ __entry->nr_ref_keep = nr_ref_keep;
+ __entry->nr_unmap_fail = nr_unmap_fail;
__entry->priority = priority;
__entry->reclaim_flags = trace_shrink_flags(file);
),
- TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s",
+ TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate=%ld nr_ref_keep=%ld nr_unmap_fail=%ld priority=%d flags=%s",
__entry->nid,
__entry->nr_scanned, __entry->nr_reclaimed,
+ __entry->nr_dirty, __entry->nr_writeback,
+ __entry->nr_congested, __entry->nr_immediate,
+ __entry->nr_activate, __entry->nr_ref_keep,
+ __entry->nr_unmap_fail, __entry->priority,
+ show_reclaim_flags(__entry->reclaim_flags))
+);
+
+TRACE_EVENT(mm_vmscan_lru_shrink_active,
+
+ TP_PROTO(int nid, unsigned long nr_taken,
+ unsigned long nr_active, unsigned long nr_deactivated,
+ unsigned long nr_referenced, int priority, int file),
+
+ TP_ARGS(nid, nr_taken, nr_active, nr_deactivated, nr_referenced, priority, file),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(unsigned long, nr_taken)
+ __field(unsigned long, nr_active)
+ __field(unsigned long, nr_deactivated)
+ __field(unsigned long, nr_referenced)
+ __field(int, priority)
+ __field(int, reclaim_flags)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->nr_taken = nr_taken;
+ __entry->nr_active = nr_active;
+ __entry->nr_deactivated = nr_deactivated;
+ __entry->nr_referenced = nr_referenced;
+ __entry->priority = priority;
+ __entry->reclaim_flags = trace_shrink_flags(file);
+ ),
+
+ TP_printk("nid=%d nr_taken=%ld nr_active=%ld nr_deactivated=%ld nr_referenced=%ld priority=%d flags=%s",
+ __entry->nid,
+ __entry->nr_taken,
+ __entry->nr_active, __entry->nr_deactivated, __entry->nr_referenced,
__entry->priority,
show_reclaim_flags(__entry->reclaim_flags))
);
+TRACE_EVENT(mm_vmscan_inactive_list_is_low,
+
+ TP_PROTO(int nid, int reclaim_idx,
+ unsigned long total_inactive, unsigned long inactive,
+ unsigned long total_active, unsigned long active,
+ unsigned long ratio, int file),
+
+ TP_ARGS(nid, reclaim_idx, total_inactive, inactive, total_active, active, ratio, file),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, reclaim_idx)
+ __field(unsigned long, total_inactive)
+ __field(unsigned long, inactive)
+ __field(unsigned long, total_active)
+ __field(unsigned long, active)
+ __field(unsigned long, ratio)
+ __field(int, reclaim_flags)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->reclaim_idx = reclaim_idx;
+ __entry->total_inactive = total_inactive;
+ __entry->inactive = inactive;
+ __entry->total_active = total_active;
+ __entry->active = active;
+ __entry->ratio = ratio;
+ __entry->reclaim_flags = trace_shrink_flags(file) & RECLAIM_WB_LRU;
+ ),
+
+ TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s",
+ __entry->nid,
+ __entry->reclaim_idx,
+ __entry->total_inactive, __entry->inactive,
+ __entry->total_active, __entry->active,
+ __entry->ratio,
+ show_reclaim_flags(__entry->reclaim_flags))
+);
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 2ccd9ccbf9ef..7bd8783a590f 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -31,7 +31,7 @@
#define WB_WORK_REASON \
EM( WB_REASON_BACKGROUND, "background") \
- EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \
+ EM( WB_REASON_VMSCAN, "vmscan") \
EM( WB_REASON_SYNC, "sync") \
EM( WB_REASON_PERIODIC, "periodic") \
EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
new file mode 100644
index 000000000000..1b61357d3f57
--- /dev/null
+++ b/include/trace/events/xdp.h
@@ -0,0 +1,53 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xdp
+
+#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_XDP_H
+
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/tracepoint.h>
+
+#define __XDP_ACT_MAP(FN) \
+ FN(ABORTED) \
+ FN(DROP) \
+ FN(PASS) \
+ FN(TX)
+
+#define __XDP_ACT_TP_FN(x) \
+ TRACE_DEFINE_ENUM(XDP_##x);
+#define __XDP_ACT_SYM_FN(x) \
+ { XDP_##x, #x },
+#define __XDP_ACT_SYM_TAB \
+ __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
+__XDP_ACT_MAP(__XDP_ACT_TP_FN)
+
+TRACE_EVENT(xdp_exception,
+
+ TP_PROTO(const struct net_device *dev,
+ const struct bpf_prog *xdp, u32 act),
+
+ TP_ARGS(dev, xdp, act),
+
+ TP_STRUCT__entry(
+ __string(name, dev->name)
+ __array(u8, prog_tag, 8)
+ __field(u32, act)
+ ),
+
+ TP_fast_assign(
+ BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
+ memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
+ __assign_str(name, dev->name);
+ __entry->act = act;
+ ),
+
+ TP_printk("prog=%s device=%s action=%s",
+ __print_hex_str(__entry->prog_tag, 8),
+ __get_str(name),
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
+);
+
+#endif /* _TRACE_XDP_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 467e12f780d8..00f643164ca2 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -283,8 +283,16 @@ TRACE_MAKE_SYSTEM_STR();
trace_print_symbols_seq(p, value, symbols); \
})
+#undef __print_flags_u64
#undef __print_symbolic_u64
#if BITS_PER_LONG == 32
+#define __print_flags_u64(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 __flags[] = \
+ { flag_array, { -1, NULL } }; \
+ trace_print_flags_seq_u64(p, delim, flag, __flags); \
+ })
+
#define __print_symbolic_u64(value, symbol_array...) \
({ \
static const struct trace_print_flags_u64 symbols[] = \
@@ -292,12 +300,20 @@ TRACE_MAKE_SYSTEM_STR();
trace_print_symbols_seq_u64(p, value, symbols); \
})
#else
+#define __print_flags_u64(flag, delim, flag_array...) \
+ __print_flags(flag, delim, flag_array)
+
#define __print_symbolic_u64(value, symbol_array...) \
__print_symbolic(value, symbol_array)
#endif
#undef __print_hex
-#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
+#define __print_hex(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, false)
+
+#undef __print_hex_str
+#define __print_hex_str(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, true)
#undef __print_array
#define __print_array(array, count, el_size) \
@@ -711,6 +727,7 @@ static inline void ftrace_test_probe_##call(void) \
#undef __print_flags
#undef __print_symbolic
#undef __print_hex
+#undef __print_hex_str
#undef __get_dynamic_array
#undef __get_dynamic_array_len
#undef __get_str