summaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-13 19:29:45 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-13 19:29:45 -0800
commite2ca6ba6ba0152361aa4fcbf6067db71b2c7a770 (patch)
treef7ed7753a2e66486a4ffe0fbbf98404ec4ba2212 /include/trace
parent7e68dd7d07a28faa2e6574dd6b9dbd90cdeaae91 (diff)
parentc45bc55a99957b20e4e0333bcd42e12d1833a7f5 (diff)
Merge tag 'mm-stable-2022-12-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: - More userfaultfs work from Peter Xu - Several convert-to-folios series from Sidhartha Kumar and Huang Ying - Some filemap cleanups from Vishal Moola - David Hildenbrand added the ability to selftest anon memory COW handling - Some cpuset simplifications from Liu Shixin - Addition of vmalloc tracing support by Uladzislau Rezki - Some pagecache folioifications and simplifications from Matthew Wilcox - A pagemap cleanup from Kefeng Wang: we have VM_ACCESS_FLAGS, so use it - Miguel Ojeda contributed some cleanups for our use of the __no_sanitize_thread__ gcc keyword. This series should have been in the non-MM tree, my bad - Naoya Horiguchi improved the interaction between memory poisoning and memory section removal for huge pages - DAMON cleanups and tuneups from SeongJae Park - Tony Luck fixed the handling of COW faults against poisoned pages - Peter Xu utilized the PTE marker code for handling swapin errors - Hugh Dickins reworked compound page mapcount handling, simplifying it and making it more efficient - Removal of the autonuma savedwrite infrastructure from Nadav Amit and David Hildenbrand - zram support for multiple compression streams from Sergey Senozhatsky - David Hildenbrand reworked the GUP code's R/O long-term pinning so that drivers no longer need to use the FOLL_FORCE workaround which didn't work very well anyway - Mel Gorman altered the page allocator so that local IRQs can remnain enabled during per-cpu page allocations - Vishal Moola removed the try_to_release_page() wrapper - Stefan Roesch added some per-BDI sysfs tunables which are used to prevent network block devices from dirtying excessive amounts of pagecache - David Hildenbrand did some cleanup and repair work on KSM COW breaking - Nhat Pham and Johannes Weiner have implemented writeback in zswap's zsmalloc backend - Brian Foster has fixed a longstanding corner-case oddity in file[map]_write_and_wait_range() - sparse-vmemmap changes for MIPS, LoongArch and NIOS2 from Feiyang Chen - Shiyang Ruan has done some work on fsdax, to make its reflink mode work better under xfstests. Better, but still not perfect - Christoph Hellwig has removed the .writepage() method from several filesystems. They only need .writepages() - Yosry Ahmed wrote a series which fixes the memcg reclaim target beancounting - David Hildenbrand has fixed some of our MM selftests for 32-bit machines - Many singleton patches, as usual * tag 'mm-stable-2022-12-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (313 commits) mm/hugetlb: set head flag before setting compound_order in __prep_compound_gigantic_folio mm: mmu_gather: allow more than one batch of delayed rmaps mm: fix typo in struct pglist_data code comment kmsan: fix memcpy tests mm: add cond_resched() in swapin_walk_pmd_entry() mm: do not show fs mm pc for VM_LOCKONFAULT pages selftests/vm: ksm_functional_tests: fixes for 32bit selftests/vm: cow: fix compile warning on 32bit selftests/vm: madv_populate: fix missing MADV_POPULATE_(READ|WRITE) definitions mm/gup_test: fix PIN_LONGTERM_TEST_READ with highmem mm,thp,rmap: fix races between updates of subpages_mapcount mm: memcg: fix swapcached stat accounting mm: add nodes= arg to memory.reclaim mm: disable top-tier fallback to reclaim on proactive reclaim selftests: cgroup: make sure reclaim target memcg is unprotected selftests: cgroup: refactor proactive reclaim code to reclaim_until() mm: memcg: fix stale protection of reclaim target memcg mm/mmap: properly unaccount memory on mas_preallocate() failure omfs: remove ->writepage jfs: remove ->writepage ...
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/huge_memory.h38
-rw-r--r--include/trace/events/kmem.h8
-rw-r--r--include/trace/events/vmalloc.h123
3 files changed, 165 insertions, 4 deletions
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 760455dfa860..3e6fb05852f9 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -203,5 +203,43 @@ TRACE_EVENT(mm_khugepaged_scan_file,
__print_symbolic(__entry->result, SCAN_STATUS))
);
+TRACE_EVENT(mm_khugepaged_collapse_file,
+ TP_PROTO(struct mm_struct *mm, struct page *hpage, pgoff_t index,
+ bool is_shmem, unsigned long addr, struct file *file,
+ int nr, int result),
+ TP_ARGS(mm, hpage, index, addr, is_shmem, file, nr, result),
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, hpfn)
+ __field(pgoff_t, index)
+ __field(unsigned long, addr)
+ __field(bool, is_shmem)
+ __string(filename, file->f_path.dentry->d_iname)
+ __field(int, nr)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->hpfn = hpage ? page_to_pfn(hpage) : -1;
+ __entry->index = index;
+ __entry->addr = addr;
+ __entry->is_shmem = is_shmem;
+ __assign_str(filename, file->f_path.dentry->d_iname);
+ __entry->nr = nr;
+ __entry->result = result;
+ ),
+
+ TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%ld, is_shmem=%d, filename=%s, nr=%d, result=%s",
+ __entry->mm,
+ __entry->hpfn,
+ __entry->index,
+ __entry->addr,
+ __entry->is_shmem,
+ __get_str(filename),
+ __entry->nr,
+ __print_symbolic(__entry->result, SCAN_STATUS))
+);
+
#endif /* __HUGE_MEMORY_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 243073cfc29d..58688768ef0f 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -346,10 +346,9 @@ TRACE_MM_PAGES
TRACE_EVENT(rss_stat,
TP_PROTO(struct mm_struct *mm,
- int member,
- long count),
+ int member),
- TP_ARGS(mm, member, count),
+ TP_ARGS(mm, member),
TP_STRUCT__entry(
__field(unsigned int, mm_id)
@@ -362,7 +361,8 @@ TRACE_EVENT(rss_stat,
__entry->mm_id = mm_ptr_to_hash(mm);
__entry->curr = !!(current->mm == mm);
__entry->member = member;
- __entry->size = (count << PAGE_SHIFT);
+ __entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member])
+ << PAGE_SHIFT);
),
TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
diff --git a/include/trace/events/vmalloc.h b/include/trace/events/vmalloc.h
new file mode 100644
index 000000000000..ad4e02191f35
--- /dev/null
+++ b/include/trace/events/vmalloc.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vmalloc
+
+#if !defined(_TRACE_VMALLOC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VMALLOC_H
+
+#include <linux/tracepoint.h>
+
+/**
+ * alloc_vmap_area - called when a new vmap allocation occurs
+ * @addr: an allocated address
+ * @size: a requested size
+ * @align: a requested alignment
+ * @vstart: a requested start range
+ * @vend: a requested end range
+ * @failed: an allocation failed or not
+ *
+ * This event is used for a debug purpose, it can give an extra
+ * information for a developer about how often it occurs and which
+ * parameters are passed for further validation.
+ */
+TRACE_EVENT(alloc_vmap_area,
+
+ TP_PROTO(unsigned long addr, unsigned long size, unsigned long align,
+ unsigned long vstart, unsigned long vend, int failed),
+
+ TP_ARGS(addr, size, align, vstart, vend, failed),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, size)
+ __field(unsigned long, align)
+ __field(unsigned long, vstart)
+ __field(unsigned long, vend)
+ __field(int, failed)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->align = align;
+ __entry->vstart = vstart;
+ __entry->vend = vend;
+ __entry->failed = failed;
+ ),
+
+ TP_printk("va_start: %lu size=%lu align=%lu vstart=0x%lx vend=0x%lx failed=%d",
+ __entry->addr, __entry->size, __entry->align,
+ __entry->vstart, __entry->vend, __entry->failed)
+);
+
+/**
+ * purge_vmap_area_lazy - called when vmap areas were lazily freed
+ * @start: purging start address
+ * @end: purging end address
+ * @npurged: numbed of purged vmap areas
+ *
+ * This event is used for a debug purpose. It gives some
+ * indication about start:end range and how many objects
+ * are released.
+ */
+TRACE_EVENT(purge_vmap_area_lazy,
+
+ TP_PROTO(unsigned long start, unsigned long end,
+ unsigned int npurged),
+
+ TP_ARGS(start, end, npurged),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned int, npurged)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->npurged = npurged;
+ ),
+
+ TP_printk("start=0x%lx end=0x%lx num_purged=%u",
+ __entry->start, __entry->end, __entry->npurged)
+);
+
+/**
+ * free_vmap_area_noflush - called when a vmap area is freed
+ * @va_start: a start address of VA
+ * @nr_lazy: number of current lazy pages
+ * @nr_lazy_max: number of maximum lazy pages
+ *
+ * This event is used for a debug purpose. It gives some
+ * indication about a VA that is released, number of current
+ * outstanding areas and a maximum allowed threshold before
+ * dropping all of them.
+ */
+TRACE_EVENT(free_vmap_area_noflush,
+
+ TP_PROTO(unsigned long va_start, unsigned long nr_lazy,
+ unsigned long nr_lazy_max),
+
+ TP_ARGS(va_start, nr_lazy, nr_lazy_max),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, va_start)
+ __field(unsigned long, nr_lazy)
+ __field(unsigned long, nr_lazy_max)
+ ),
+
+ TP_fast_assign(
+ __entry->va_start = va_start;
+ __entry->nr_lazy = nr_lazy;
+ __entry->nr_lazy_max = nr_lazy_max;
+ ),
+
+ TP_printk("va_start=0x%lx nr_lazy=%lu nr_lazy_max=%lu",
+ __entry->va_start, __entry->nr_lazy, __entry->nr_lazy_max)
+);
+
+#endif /* _TRACE_VMALLOC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>