aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoropeneuler-ci-bot <george@openeuler.sh>2022-10-29 06:43:15 +0000
committerGitee <noreply@gitee.com>2022-10-29 06:43:15 +0000
commit3b7ab23f58939ff461ab8d55970ed0170916d12f (patch)
treead41ba6196db89f2237ff697594d7ccba4271f76
parent639060341ee29dfe652ff21e24077ed7e5b2e5ac (diff)
parentf2bae51b4b6bca1f91bafecd8df2e5e5f206493b (diff)
downloadopenEuler-kernel-3b7ab23f58939ff461ab8d55970ed0170916d12f.tar.gz
!189 mm: page_alloc: Add a tracepoint to trace the call of __alloc_pages() and export symbols
Merge Pull Request from: @AoDaMo This is the result of the OSPP 2022 Project /proc/meminfo is the main way for user to know the physical memory usage, but it cannot know every operation of allocating physical pages. Some kernel modules can call alloc_pages to get physical pages directly, which part of physical memory will not be counted by /proc/meminfo. In order to trace the specific process of physical page allocation and release, a solution is to insert a new modules into kernel and register tracepoint handlers to obtain the physical page allocation information. So i added a new tracepoint named mm_page_alloc_enter at the entrance of mm/page_alloc.c:__alloc_pages(), and exported relevant tracepoint symbols for kernel module programming. Link:https://gitee.com/openeuler/kernel/pulls/189 Reviewed-by: Liu YongQiang <liuyongqiang13@huawei.com> Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com>
-rw-r--r--Documentation/trace/events-kmem.rst8
-rw-r--r--include/trace/events/kmem.h20
-rw-r--r--mm/page_alloc.c8
3 files changed, 33 insertions, 3 deletions
diff --git a/Documentation/trace/events-kmem.rst b/Documentation/trace/events-kmem.rst
index 68fa75247488b1..01ebedfcff41a8 100644
--- a/Documentation/trace/events-kmem.rst
+++ b/Documentation/trace/events-kmem.rst
@@ -47,13 +47,15 @@ but the call_site can usually be used to extrapolate that information.
::
mm_page_alloc page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s
+ mm_page_alloc_enter order=%d gfp_flags=%s
mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d
mm_page_free page=%p pfn=%lu order=%d
mm_page_free_batched page=%p pfn=%lu order=%d cold=%d
-These four events deal with page allocation and freeing. mm_page_alloc is
-a simple indicator of page allocator activity. Pages may be allocated from
-the per-CPU allocator (high performance) or the buddy allocator.
+These five events deal with page allocation and freeing. mm_page_alloc_enter
+is the entry point of page allocation and can be used to analyze the time cost
+of page allocation. mm_page_alloc is a simple indicator of page allocator activity.
+Pages may be allocated from the per-CPU allocator (high performance) or the buddy allocator.
If pages are allocated directly from the buddy allocator, the
mm_page_alloc_zone_locked event is triggered. This event is important as high
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f65b1f6db22d86..1f030912345566 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -190,6 +190,26 @@ TRACE_EVENT(mm_page_free_batched,
__entry->pfn)
);
+TRACE_EVENT(mm_page_alloc_enter,
+ TP_PROTO(unsigned int order, gfp_t gfp_flags),
+
+ TP_ARGS(order, gfp_flags),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, order )
+ __field( gfp_t, gfp_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->order = order;
+ __entry->gfp_flags = gfp_flags;
+ ),
+
+ TP_printk("order=%d gfp_flags=%s",
+ __entry->order,
+ show_gfp_flags(__entry->gfp_flags))
+);
+
TRACE_EVENT(mm_page_alloc,
TP_PROTO(struct page *page, unsigned int order,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 60b2351ede77d5..2612b767ce8d0c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -82,6 +82,12 @@
#include "shuffle.h"
#include "page_reporting.h"
+EXPORT_TRACEPOINT_SYMBOL_GPL(mm_page_alloc_enter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mm_page_alloc);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mm_page_alloc_zone_locked);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mm_page_free);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mm_page_free_batched);
+
/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
typedef int __bitwise fpi_t;
@@ -5291,6 +5297,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = { };
+ trace_mm_page_alloc_enter(order, gfp);
+
/*
* There are several places where we assume that the order value is sane
* so bail out early if the request is out of bound.