aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoropeneuler-ci-bot <george@openeuler.sh>2023-10-13 02:49:28 +0000
committerGitee <noreply@gitee.com>2023-10-13 02:49:28 +0000
commit36a3281da474d77777167e256eacff9e69d2480e (patch)
tree3348b93e6cbb9b9075b21772de924a0846d3f22e
parentc28f5243585f9088f7e181098cec39ff3ad3a7bd (diff)
parentfd538c7663436c96df073440c4f28ac32df9808f (diff)
downloadopenEuler-kernel-36a3281da474d77777167e256eacff9e69d2480e.tar.gz
!2011 trace event: add stack filter
Merge Pull Request from: @lsc2001 Stack filter can be used to filter event call stacks, which enables people to choose desired call paths. Only those trace events whose call stacks match the stack filter can appear in trace output. Link:https://gitee.com/openeuler/kernel/pulls/2011 Reviewed-by: Xu Kuohai <xukuohai@huawei.com> Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com>
-rw-r--r--Documentation/trace/events.rst83
-rw-r--r--include/linux/trace_events.h38
-rw-r--r--kernel/trace/Kconfig8
-rw-r--r--kernel/trace/Makefile3
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace.h10
-rw-r--r--kernel/trace/trace_events.c5
-rw-r--r--kernel/trace/trace_events_stack_filter.c819
8 files changed, 972 insertions, 1 deletions
diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst
index 9df29a935757af..d7e87500f676b4 100644
--- a/Documentation/trace/events.rst
+++ b/Documentation/trace/events.rst
@@ -324,6 +324,89 @@ To add more PIDs without losing the PIDs already included, use '>>'.
# echo 123 244 1 >> set_event_pid
+5.5 Stack filters
+---------------------
+
+Trace events can be filtered by their call stacks. There could exist
+various paths to tigger an trace event, but people sometimes only care
+about some of them. Once the stack filter is set, call stack of the
+corresponding event will be compared with the stack filter. An event
+with matched call stack will appear in the trace output and the rest will
+be discarded.
+
+5.5.1 Expression syntax
+------------------------
+
+Stack filters have the form (in regular expression style) below::
+ '!'?function('/'(function|'**'))*
+
+In the expression, '!' means negating the filter and '**' matches any
+call path (maybe empty). The top of call stack will be ``stack_filter_match``,
+which means the call path will be something like ``**/stack_filter_match``,
+so we recommand that you add a '**' at the end of stack_filter, if you
+don't know the implementation details of trace system.
+
+Bottom of the call stack can be ignored, which means what
+``work_pending/do_notify_resume/schedule/__schedule/**`` matches can also
+be matched by ``do_notify_resume/schedule/__schedule/**``.
+
+A call stack is matched successfully if the following conditions are
+met simultaneously:
+[1] It matches any positive stack filter.
+[2] It doesn't match any negative stack filter.
+If no positive filter are set, condition 1 don't need to be satisified.
+
+5.5.2 Setting stack filters
+------------------------
+
+Stack filters are add by echo commands to 'stack_filter' file for a given
+event, and unset by echo 0 or blank string to the same file.
+
+Some usage examples:
+Set up a kprobe::
+ # echo 1 > /sys/kernel/tracing/options/stacktrace
+ # echo 'p alloc_pages' > /sys/kernel/tracing/kprobe_events
+
+The call stack contains ``do_sys_openat2``::
+ # echo 'do_sys_openat2/**' > \
+ /sys/kernel/tracing/events/kprobes/p_alloc_pages_0/stack_filter
+
+The call stack doesn't contain ``do_sys_openat2``::
+ # echo '!do_sys_openat2/**' > \
+ /sys/kernel/tracing/events/kprobes/p_alloc_pages_0/stack_filter
+
+The call stack contains ``do_sys_openat2`` or ``do_translation_fault``,
+but not ``el0_sync_handler``::
+ # echo 'do_sys_openat2/**' > \
+ /sys/kernel/tracing/events/kprobes/p_alloc_pages_0/stack_filter
+ # echo 'do_translation_fault/**' >> \
+ /sys/kernel/tracing/events/kprobes/p_alloc_pages_0/stack_filter
+ # echo '!el0_sync_handler/**' >> \
+ /sys/kernel/tracing/events/kprobes/p_alloc_pages_0/stack_filter
+
+The call stack contains ``el0_sync_handler -> el0_da``::
+ # echo 'el0_sync_handler/el0_da/**' > \
+ /sys/kernel/tracing/events/kprobes/p_alloc_pages_0/stack_filter
+
+The call stack contains ``el0_sync_handler -> ... -> do_page_fault``::
+ # echo 'el0_sync_handler/**/do_page_fault/**' > \
+ /sys/kernel/tracing/events/kprobes/p_alloc_pages_0/stack_filter
+
+Enable the kprobe event and check the trace log::
+ # echo 1 > /sys/kernel/tracing/events/kprobes/enable
+ # cat /sys/kernel/tracing/trace
+
+Another example::
+ # cd /sys/kernel/tracing/events/sched/sched_switch
+ # echo \
+ 'work_pending/do_notify_resume/schedule/__schedule/**' > stack_filter
+ # echo \
+ '!ret_from_fork/**/kthread/worker_thread/schedule/**' >> stack_filter
+ # cat stack_filter
+
+Disable the stack filter::
+ # echo 0 > stack_filter
+ # echo > stack_filter
6. Event triggers
=================
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 409385b25ecb37..f9928abe8c93b9 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -186,6 +186,18 @@ int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
struct event_filter;
+#define STACK_FILTER_ADDR_MAP_SIZE 31
+
+struct stack_filter_addr_map {
+ struct hlist_head map[STACK_FILTER_ADDR_MAP_SIZE];
+ spinlock_t lock;
+};
+
+struct event_stack_filter {
+ struct list_head filters;
+ struct stack_filter_addr_map *addr_map;
+};
+
enum trace_reg {
TRACE_REG_REGISTER,
TRACE_REG_UNREGISTER,
@@ -376,6 +388,7 @@ enum {
EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT,
EVENT_FILE_FL_WAS_ENABLED_BIT,
+ EVENT_FILE_FL_STACK_FILTER_BIT,
};
extern struct trace_event_file *trace_get_event_file(const char *instance,
@@ -527,12 +540,16 @@ enum {
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
+ EVENT_FILE_FL_STACK_FILTER = (1 << EVENT_FILE_FL_STACK_FILTER_BIT),
};
struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
+#ifdef CONFIG_TRACE_EVENT_STACK_FILTER
+ struct event_stack_filter __rcu *stack_filter;
+#endif
struct dentry *dir;
struct trace_array *tr;
struct trace_subsystem_dir *system;
@@ -596,6 +613,27 @@ enum event_trigger_type {
extern int filter_match_preds(struct event_filter *filter, void *rec);
+#ifdef CONFIG_TRACE_EVENT_STACK_FILTER
+extern int stack_filter_match(struct event_stack_filter *stack_filter);
+
+static inline struct event_stack_filter *
+get_stack_filter(struct trace_event_file *file)
+{
+ return rcu_dereference(file->stack_filter);
+}
+#else
+static inline int stack_filter_match(struct event_stack_filter *stack_filter)
+{
+ return 1;
+}
+
+static inline struct event_stack_filter *
+get_stack_filter(struct trace_event_file *file)
+{
+ return NULL;
+}
+#endif
+
extern enum event_trigger_type
event_triggers_call(struct trace_event_file *file, void *rec,
struct ring_buffer_event *event);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 9682ceb1f3dfac..8a3a392abb52bf 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -932,6 +932,14 @@ config HIST_TRIGGERS_DEBUG
If unsure, say N.
+config TRACE_EVENT_STACK_FILTER
+ bool "Enable call stack filter for trace events"
+ default n
+ depends on STACKTRACE
+ help
+ This option enables call stack filter for trace events.
+ See Documentation/trace/events.rst for details.
+
endif # FTRACE
endif # TRACING_SUPPORT
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 8ab4d4290101b7..3288c466f9c851 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -77,6 +77,9 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
+ifeq ($(CONFIG_TRACE_EVENT_STACK_FILTER),y)
+obj-$(CONFIG_EVENT_TRACING) += trace_events_stack_filter.o
+endif
obj-$(CONFIG_TRACE_EVENT_INJECT) += trace_events_inject.o
obj-$(CONFIG_SYNTH_EVENTS) += trace_events_synth.o
obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b3a0ee21d31c0b..0c5bdf89215cee 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2782,7 +2782,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
*current_rb = trace_file->tr->array_buffer.buffer;
if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
- (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
+ (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED | EVENT_FILE_FL_STACK_FILTER)) &&
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
@@ -2837,6 +2837,11 @@ static void output_printk(struct trace_event_buffer *fbuffer)
!filter_match_preds(file->filter, fbuffer->entry)))
return;
+ if (IS_ENABLED(CONFIG_TRACE_EVENT_STACK_FILTER) &&
+ unlikely(file->flags & EVENT_FILE_FL_STACK_FILTER) &&
+ !stack_filter_match(get_stack_filter(file)))
+ return;
+
event = &fbuffer->trace_file->event_call->event;
spin_lock_irqsave(&tracepoint_iter_lock, flags);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c0596e250c2ab7..78c7d5d4868e62 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1512,6 +1512,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
EVENT_FILE_FL_FILTERED |
+ EVENT_FILE_FL_STACK_FILTER |
EVENT_FILE_FL_PID_FILTER))))
return false;
@@ -1522,6 +1523,11 @@ __event_trigger_test_discard(struct trace_event_file *file,
!filter_match_preds(file->filter, entry))
goto discard;
+ if (IS_ENABLED(CONFIG_TRACE_EVENT_STACK_FILTER) &&
+ (file->flags & EVENT_FILE_FL_STACK_FILTER) &&
+ !stack_filter_match(get_stack_filter(file)))
+ goto discard;
+
if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
trace_event_ignore_this_pid(file))
goto discard;
@@ -1694,6 +1700,10 @@ static inline void *event_file_data(struct file *filp)
extern struct mutex event_mutex;
extern struct list_head ftrace_events;
+#ifdef CONFIG_TRACE_EVENT_STACK_FILTER
+extern const struct file_operations event_stack_filter_fops;
+#endif
+
extern const struct file_operations event_trigger_fops;
extern const struct file_operations event_hist_fops;
extern const struct file_operations event_hist_debug_fops;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f4b11f6098ae3d..f9e604f0fa35a4 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2211,6 +2211,11 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
trace_create_file("filter", TRACE_MODE_WRITE, file->dir,
file, &ftrace_event_filter_fops);
+#ifdef CONFIG_TRACE_EVENT_STACK_FILTER
+ trace_create_file("stack_filter", TRACE_MODE_WRITE, file->dir,
+ file, &event_stack_filter_fops);
+#endif
+
trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
file, &event_trigger_fops);
}
diff --git a/kernel/trace/trace_events_stack_filter.c b/kernel/trace/trace_events_stack_filter.c
new file mode 100644
index 00000000000000..9e79d4784fde6f
--- /dev/null
+++ b/kernel/trace/trace_events_stack_filter.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "trace.h"
+
+#define TP_BUF_SIZE 1023 /* trace parser buf size */
+#define CS_BUF_SIZE 64 /* call stack buf size */
+
+#define MAX_SF_LEN 64 /* max stack filter length */
+#define DSTARS_ADDR 1 /* '**' wildcard */
+
+#define list_length(head) ({ \
+ int __len = 0; \
+ struct list_head *__pos; \
+ list_for_each(__pos, head) \
+ __len++; \
+ __len; \
+})
+
+#define ADDR_MAP_HASH(key) \
+ (((key) >> 2) % STACK_FILTER_ADDR_MAP_SIZE)
+
+struct function_address {
+ struct list_head list;
+ size_t addr; /* some addresses may represent wildcards */
+};
+
+struct stack_filter {
+ struct list_head list;
+ char *string; /* original string */
+ struct list_head addrs; /* function addresses */
+ bool neg; /* negate the filter */
+};
+
+struct addr_map_node {
+ struct hlist_node node;
+ unsigned long key;
+ unsigned long value;
+};
+
+static inline void
+function_address_list_clear(struct list_head *faddrs)
+{
+ struct function_address *faddr, *tmp;
+
+ list_for_each_entry_safe(faddr, tmp, faddrs, list) {
+ list_del(&faddr->list);
+ kfree(faddr);
+ }
+}
+
+static inline int
+function_address_list_copy(struct list_head *copy, struct list_head *faddrs)
+{
+ struct function_address *faddr, *new_faddr;
+
+ INIT_LIST_HEAD(copy);
+ list_for_each_entry_reverse(faddr, faddrs, list) {
+ new_faddr = kmalloc(sizeof(*new_faddr), GFP_KERNEL);
+ if (!new_faddr) {
+ function_address_list_clear(copy);
+ return -ENOMEM;
+ }
+ new_faddr->addr = faddr->addr;
+ list_add(&new_faddr->list, copy);
+ }
+ return 0;
+}
+
+static inline void
+stack_filter_init(struct stack_filter *filter)
+{
+ INIT_LIST_HEAD(&filter->addrs);
+}
+
+static inline struct stack_filter *
+stack_filter_new(void)
+{
+ struct stack_filter *filter;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return NULL;
+
+ stack_filter_init(filter);
+ return filter;
+}
+
+static inline void
+stack_filter_free(struct stack_filter *filter)
+{
+ struct function_address *faddr, *tmp;
+
+ list_for_each_entry_safe(faddr, tmp, &filter->addrs, list) {
+ list_del(&faddr->list);
+ kfree(faddr);
+ }
+
+ kfree(filter->string);
+ kfree(filter);
+}
+
+static inline int
+stack_filter_copy(struct stack_filter *copy, struct stack_filter *filter)
+{
+ int ret = 0;
+
+ copy->string = kstrdup(filter->string, GFP_KERNEL);
+ if (!copy->string)
+ return -ENOMEM;
+
+ ret = function_address_list_copy(&copy->addrs, &filter->addrs);
+ if (ret < 0) {
+ kfree(copy->string);
+ return ret;
+ }
+
+ copy->neg = filter->neg;
+ return 0;
+}
+
+static inline void
+stack_filter_list_clear(struct list_head *filters)
+{
+ struct stack_filter *filter, *tmp;
+
+ list_for_each_entry_safe(filter, tmp, filters, list) {
+ list_del(&filter->list);
+ stack_filter_free(filter);
+ }
+}
+
+static inline int
+stack_filter_list_copy(struct list_head *copy, struct list_head *filters)
+{
+ int ret = 0;
+ struct stack_filter *filter, *new_filter;
+
+ /* merge initialization with copy */
+ INIT_LIST_HEAD(copy);
+ list_for_each_entry_reverse(filter, filters, list) {
+ new_filter = kmalloc(sizeof(*new_filter), GFP_KERNEL);
+ if (!new_filter) {
+ ret = -ENOMEM;
+ goto bad;
+ }
+
+ ret = stack_filter_copy(new_filter, filter);
+ if (ret < 0)
+ goto bad;
+
+ list_add(&new_filter->list, copy);
+ }
+ return 0;
+
+ bad:
+ stack_filter_list_clear(copy);
+ return ret;
+}
+
+static inline void
+stack_filter_enable(struct trace_event_file *file)
+{
+ unsigned long old_flags = file->flags;
+
+ file->flags |= EVENT_FILE_FL_STACK_FILTER;
+ if (file->flags != old_flags)
+ trace_buffered_event_enable();
+}
+
+static inline void
+stack_filter_disable(struct trace_event_file *file)
+{
+ unsigned long old_flags = file->flags;
+
+ file->flags &= ~EVENT_FILE_FL_STACK_FILTER;
+ if (file->flags != old_flags)
+ trace_buffered_event_disable();
+}
+
+static inline void
+addr_map_init(struct stack_filter_addr_map *addr_map)
+{
+ int i;
+
+ for (i = 0; i < STACK_FILTER_ADDR_MAP_SIZE; i++)
+ INIT_HLIST_HEAD(&addr_map->map[i]);
+ spin_lock_init(&addr_map->lock);
+}
+
+/*
+ * Typically, the number of functions in the call stack of a trace event
+ * is not large, so we use a simple hash table to store the mapping,
+ * without limiting its cache size.
+ */
+static inline int
+addr_map_insert(struct stack_filter_addr_map *addr_map, unsigned long key, unsigned long value)
+{
+ struct addr_map_node *node;
+ int idx, ret = 0;
+ unsigned long flags;
+
+ idx = ADDR_MAP_HASH(key);
+ spin_lock_irqsave(&addr_map->lock, flags);
+
+ hlist_for_each_entry(node, &addr_map->map[idx], node) {
+ /* new value is always the same as the old here... maybe */
+ if (node->key == key)
+ goto out;
+ }
+
+ node = kmalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ node->key = key;
+ node->value = value;
+
+ hlist_add_head_rcu(&node->node, &addr_map->map[idx]);
+
+ out:
+ spin_unlock_irqrestore(&addr_map->lock, flags);
+ return ret;
+}
+
+static inline unsigned long
+addr_map_get(struct stack_filter_addr_map *addr_map, unsigned long key)
+{
+ struct addr_map_node *node;
+ int idx;
+ unsigned long ret = 0; /* value can't be 0 */
+
+ idx = ADDR_MAP_HASH(key);
+ /* nested critical section, not necessary in fact */
+ rcu_read_lock_sched();
+
+ hlist_for_each_entry_rcu(node, &addr_map->map[idx], node) {
+ if (node->key == key) {
+ ret = node->value;
+ goto out;
+ }
+ }
+
+ out:
+ rcu_read_unlock_sched();
+ return ret;
+}
+
+/* require holding event_mutex */
+static inline void
+addr_map_clear(struct hlist_head *addr_map)
+{
+ int i;
+ struct addr_map_node *node;
+ struct hlist_node *tmp;
+
+ for (i = 0; i < STACK_FILTER_ADDR_MAP_SIZE; i++) {
+ hlist_for_each_entry_safe(node, tmp, &addr_map[i], node) {
+ hlist_del(&node->node);
+ kfree(node);
+ }
+ }
+}
+
+static inline void
+addr_map_free(struct stack_filter_addr_map *addr_map)
+{
+ addr_map_clear(addr_map->map);
+ kfree(addr_map);
+}
+
+static inline void
+event_stack_filter_init(struct event_stack_filter *esf)
+{
+ INIT_LIST_HEAD(&esf->filters);
+
+ /* addr_map should be pre-allocated, just init it here */
+ addr_map_init(esf->addr_map);
+}
+
+static inline struct event_stack_filter *
+event_stack_filter_new(void)
+{
+ struct event_stack_filter *esf;
+
+ esf = kmalloc(sizeof(*esf), GFP_KERNEL);
+ if (!esf)
+ return NULL;
+
+ esf->addr_map = kmalloc(sizeof(*esf->addr_map), GFP_KERNEL);
+ if (!esf->addr_map)
+ return NULL;
+
+ event_stack_filter_init(esf);
+ return esf;
+}
+
+static inline void
+event_stack_filter_free(struct event_stack_filter *esf, bool free_addr_map)
+{
+ stack_filter_list_clear(&esf->filters);
+
+ /*
+ * addr_map may be passed to a new event_stack_filter,
+ * in this situation, we cannot free it.
+ */
+ if (free_addr_map)
+ addr_map_free(esf->addr_map);
+
+ kfree(esf);
+}
+
+/* require holding event_mutex */
+static inline int
+event_stack_filter_copy(struct event_stack_filter *copy,
+ struct event_stack_filter *esf)
+{
+ int ret;
+
+ ret = stack_filter_list_copy(&copy->filters, &esf->filters);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Not use deepcopy here to speed up copy.
+ * Must be vigilant about this when use or free addr_map.
+ */
+ copy->addr_map = esf->addr_map;
+ return 0;
+}
+
+/*
+ * require holding event_mutex
+ * combine new and copy
+ */
+static inline struct event_stack_filter *
+event_stack_filter_clone(struct event_stack_filter *esf)
+{
+ struct event_stack_filter *copy;
+
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ if (event_stack_filter_copy(copy, esf) < 0) {
+ kfree(copy);
+ return NULL;
+ }
+
+ return copy;
+}
+
+/*
+ * parse a string with the form below:
+ * '!'?function(/(function|'**'))*
+ * where:
+ * '!' negates the filter
+ * '**' matches any function call path
+ * e.g.
+ * [1] work_pending/do_notify_resume/schedule/__schedule/'**'
+ * [2] '**'/kthread/kcompactd/schedule_timeout/schedule/'**'
+ * [3] !el0_sync/el0_sync_handler/'**'/invoke_syscall/'**'/schedule/'**'
+ * [4] !ret_from_fork/'**'/kthread/worker_thread/schedule/'**'
+ * Please remove '' around '**' if you want to use it.
+ *
+ * The full call path will end at stack_filter_match function,
+ * like
+ * work_pending/do_notify_resume/schedule/__schedule/\
+ * trace_event_raw_event_sched_switch/trace_event_buffer_commit/stack_filter_match.
+ *
+ * We recommand that you use '**' at the end of the string,
+ * because it will match any function call path.
+ * So that you don't have to know the deeper call path.
+ *
+ * Call paths that matches example [1] can also match
+ * schedule/__schedule/'**' or '**'/schedule/__schedule/'**',
+ * because we are matching call stacks, not the full path, to speed up filtering.
+ * Function calls at the bottom of stack will be ignored.
+ *
+ * We convert symbols to their addresses here to avoid
+ * changing stacktrace addresses to their names at runtime,
+ * which would greatly slow down the function call.
+ * The downside is that we can't handle '*' wildcard.
+ */
+static int
+stack_filter_parse(struct stack_filter *filter, char *buf)
+{
+ char *p = buf;
+ char name[NAME_MAX + 1];
+ struct function_address *faddr, *tmp;
+ size_t addr;
+ int i, len = 0, ret = 0;
+
+ if (*p == '!') {
+ filter->neg = true;
+ p++;
+ }
+ if (*p == '\0')
+ return -EINVAL;
+
+ while (*p) {
+ i = 0;
+ while (*p && *p != '/') {
+ name[i++] = *(p++);
+ if (i > NAME_MAX) {
+ ret = -EINVAL;
+ goto bad;
+ }
+ }
+ name[i] = '\0';
+
+ while (*p == '/')
+ p++;
+
+ if (!strcmp(name, "**")) {
+ /* wildcard '**' */
+ addr = DSTARS_ADDR;
+ } else {
+ /* function name (maybe empty) */
+ addr = kallsyms_lookup_name(name);
+ if (!addr) {
+ ret = -EINVAL;
+ goto bad;
+ }
+ }
+
+ /* remove repetitive '**' */
+ if (addr == DSTARS_ADDR && !list_empty(&filter->addrs)) {
+ faddr = list_first_entry(&filter->addrs, struct function_address, list);
+
+ if (faddr->addr == DSTARS_ADDR)
+ continue;
+ }
+
+ if (++len > MAX_SF_LEN) {
+ ret = -EINVAL;
+ goto bad;
+ }
+
+ faddr = kzalloc(sizeof(*faddr), GFP_KERNEL);
+ if (!faddr) {
+ ret = -ENOMEM;
+ goto bad;
+ }
+
+ faddr->addr = addr;
+ list_add(&faddr->list, &filter->addrs);
+ }
+
+ if (list_empty(&filter->addrs))
+ return -EINVAL;
+
+ /* save original string as well */
+ filter->string = kstrdup(buf, GFP_KERNEL);
+ if (!filter->string) {
+ ret = -ENOMEM;
+ goto bad;
+ }
+
+ return ret;
+
+ bad:
+ list_for_each_entry_safe(faddr, tmp, &filter->addrs, list) {
+ list_del(&faddr->list);
+ kfree(faddr);
+ }
+ return ret;
+}
+
+static bool
+__stack_filter_match_one(struct stack_filter *filter,
+ unsigned long *buf, int num_entries, bool *dp)
+{
+ int num_faddrs, i, j;
+ bool ok;
+ struct function_address *faddr;
+
+ num_faddrs = list_length(&filter->addrs);
+
+#define pos(i, j) ((i) * (num_faddrs + 1) + (j))
+
+ /* dynamic programming */
+ dp[pos(0, 0)] = true;
+ ok = false;
+
+ for (i = 0; i <= num_entries; i++) {
+ faddr = list_entry(&filter->addrs, struct function_address, list);
+ for (j = 1; j <= num_faddrs; j++) {
+ faddr = list_next_entry(faddr, list);
+ dp[pos(i, j)] = false;
+
+ if (faddr->addr == DSTARS_ADDR) {
+ dp[pos(i, j)] = dp[pos(i, j - 1)];
+ if (i > 0)
+ dp[pos(i, j)] |= dp[pos(i - 1, j)];
+ } else if (i > 0 && buf[i - 1] == faddr->addr)
+ dp[pos(i, j)] = dp[pos(i - 1, j - 1)];
+ }
+
+ if (dp[pos(i, num_faddrs)]) {
+ ok = true;
+ break;
+ }
+ }
+
+#undef pos
+
+ return ok;
+}
+
+/* return 0 on error */
+static inline unsigned long
+addr_remove_offset(struct event_stack_filter *esf, unsigned long addr)
+{
+ unsigned long new_addr;
+ char name[KSYM_NAME_LEN];
+
+ /*
+ * This operation is very slow,
+ * so we use a small cache to optimize it.
+ */
+ new_addr = addr_map_get(esf->addr_map, addr);
+ if (new_addr)
+ return new_addr;
+
+ if (lookup_symbol_name(addr, name) < 0)
+ return 0;
+
+ new_addr = kallsyms_lookup_name(name);
+ if (!new_addr)
+ return 0;
+
+ if (addr_map_insert(esf->addr_map, addr, new_addr) < 0)
+ return 0;
+
+ return new_addr;
+}
+
+/*
+ * return 1 on matching and 0 otherwise.
+ *
+ * A call path is matched successfully if the following conditions are met simultaneously:
+ * [1] It matches any positive stack filter.
+ * [2] It doesn't match any negative stack filter.
+ * If no positive filter are set, condition [1] don't need to be satisified.
+ */
+int stack_filter_match(struct event_stack_filter *esf)
+{
+ int i, num_entries, num_faddrs;
+ int size, maxsize;
+ bool hasp, okp, *dp;
+ struct stack_filter *filter;
+ unsigned long buf[CS_BUF_SIZE], new_addr;
+ struct list_head *stack_filters;
+
+ /*
+ * We have already been inside rcu_read_lock_sched critical section.
+ * It's safe to visit esf.
+ */
+ if (!esf)
+ return 1;
+
+ stack_filters = &esf->filters;
+ if (list_empty(stack_filters))
+ return 1;
+
+ num_entries = stack_trace_save(buf, CS_BUF_SIZE, 0);
+
+ for (i = num_entries - 1; i >= 0; i--) {
+ /*
+ * buf[i] contains addr of a symbol plus an offset.
+ * We should remove the offset here.
+ */
+ new_addr = addr_remove_offset(esf, buf[i]);
+ if (new_addr)
+ buf[i] = new_addr;
+ }
+
+ /* pre allocate memory for dp */
+ maxsize = 0;
+ list_for_each_entry(filter, stack_filters, list) {
+ num_faddrs = list_length(&filter->addrs);
+ size = (num_entries + 1) * (num_faddrs + 1);
+
+ if (size > maxsize)
+ maxsize = size;
+ }
+
+ dp = kmalloc(maxsize, GFP_ATOMIC);
+ if (!dp)
+ return 0;
+
+ hasp = 0; okp = 0;
+ list_for_each_entry(filter, stack_filters, list) {
+ if (!filter->neg) {
+ hasp = 1;
+ if (__stack_filter_match_one(filter, buf, num_entries, dp)) {
+ okp = 1;
+ break;
+ }
+ }
+ }
+ if (hasp && !okp)
+ goto bad_match;
+
+ list_for_each_entry(filter, stack_filters, list) {
+ if (filter->neg && __stack_filter_match_one(filter, buf, num_entries, dp))
+ goto bad_match;
+ }
+
+ kfree(dp);
+ return 1;
+
+ bad_match:
+ kfree(dp);
+ return 0;
+}
+
+/*
+ * use seq_file APIs to read from stack_filters
+ */
+static void *sf_start(struct seq_file *m, loff_t *pos)
+{
+ struct trace_event_file *file;
+ loff_t n = *pos;
+
+ mutex_lock(&event_mutex);
+ file = m->private;
+
+ if (!file->stack_filter)
+ return NULL;
+
+ return seq_list_start(&file->stack_filter->filters, n);
+}
+
+static void *sf_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct trace_event_file *file = m->private;
+
+ return seq_list_next(v, &file->stack_filter->filters, pos);
+}
+
+static void sf_stop(struct seq_file *m, void *v)
+{
+ mutex_unlock(&event_mutex);
+}
+
+static int sf_show(struct seq_file *m, void *v)
+{
+ struct stack_filter *filter = v;
+
+ seq_printf(m, "%s\n", filter->string);
+ return 0;
+}
+
+const struct seq_operations stack_filter_seq_ops = {
+ .start = sf_start,
+ .stop = sf_stop,
+ .next = sf_next,
+ .show = sf_show,
+};
+
+/*
+ * operations for stack_filter file
+ * not for 'struct event_stack_filter'
+ */
+static ssize_t
+event_stack_filter_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct trace_event_file *event_file;
+ struct trace_parser parser;
+ struct stack_filter *filter;
+ struct event_stack_filter *esf, *old;
+ int read, ret;
+
+ filter = stack_filter_new();
+ if (!filter)
+ return -ENOMEM;
+
+ if (trace_parser_get_init(&parser, TP_BUF_SIZE + 1)) {
+ kfree(filter);
+ return -ENOMEM;
+ }
+
+ read = trace_get_user(&parser, ubuf, cnt, ppos);
+
+ if (read >= 0 && trace_parser_loaded(&parser)) {
+ /*
+ * e.g. use 'echo 0 > stack_filter' to disable stack_filter
+ * Most data structures has been cleared in event_stack_filter_open.
+ * Just make some judgements to avoid reporting error.
+ */
+ if (!strcmp(strstrip(parser.buffer), "0")) {
+ kfree(filter);
+ trace_parser_put(&parser);
+
+ event_file = event_file_data(filp);
+ if (!rcu_dereference(event_file->stack_filter))
+ return read;
+
+ /* maybe use append mode or something else */
+ return -EINVAL;
+ }
+
+ ret = stack_filter_parse(filter, parser.buffer);
+ if (ret < 0) {
+ kfree(filter);
+ trace_parser_put(&parser);
+ return ret;
+ }
+ } else {
+ kfree(filter);
+ goto out;
+ }
+
+ mutex_lock(&event_mutex);
+ event_file = event_file_data(filp);
+
+ if (event_file->stack_filter) {
+ /*
+ * Copy the old and replace it with the new one to follow rcu rules.
+ * It doesn't cost much time since this function is called seldomly.
+ * In this way, codes can be simple.
+ *
+ * We didn't use a separate rcu for stack_filter->filters
+ * since its elements cannot be deleted one by one.
+ */
+ esf = event_stack_filter_clone(event_file->stack_filter);
+ if (!esf) {
+ mutex_unlock(&event_mutex);
+ stack_filter_free(filter);
+ goto out;
+ }
+ list_add_tail(&filter->list, &esf->filters);
+
+ old = event_file->stack_filter;
+ rcu_assign_pointer(event_file->stack_filter, esf);
+
+ /* make sure old esf is not being used */
+ tracepoint_synchronize_unregister();
+ event_stack_filter_free(old, false);
+
+ } else {
+ esf = event_stack_filter_new();
+ if (!esf) {
+ mutex_unlock(&event_mutex);
+ stack_filter_free(filter);
+ goto out;
+ }
+ list_add_tail(&filter->list, &esf->filters);
+
+ rcu_assign_pointer(event_file->stack_filter, esf);
+ tracepoint_synchronize_unregister();
+
+ stack_filter_enable(event_file);
+ }
+
+ mutex_unlock(&event_mutex);
+
+ out:
+ trace_parser_put(&parser);
+ return read;
+}
+
+static int event_stack_filter_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ struct trace_event_file *event_file;
+ struct event_stack_filter *esf;
+ struct seq_file *seq;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
+ mutex_lock(&event_mutex);
+
+ event_file = inode->i_private;
+ if (!event_file) {
+ mutex_unlock(&event_mutex);
+ return -ENODEV;
+ }
+
+ if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_TRUNC)) {
+ stack_filter_disable(event_file);
+
+ if (event_file->stack_filter) {
+ esf = event_file->stack_filter;
+ RCU_INIT_POINTER(event_file->stack_filter, NULL);
+
+ /* wait until esf is not being used */
+ tracepoint_synchronize_unregister();
+ event_stack_filter_free(esf, true);
+ }
+ }
+
+ ret = seq_open(filp, &stack_filter_seq_ops);
+ if (!ret) {
+ seq = filp->private_data;
+ seq->private = inode->i_private;
+ }
+
+ mutex_unlock(&event_mutex);
+
+ return ret;
+}
+
+const struct file_operations event_stack_filter_fops = {
+ .open = event_stack_filter_open,
+ .read = seq_read,
+ .write = event_stack_filter_write,
+ .llseek = tracing_lseek,
+ .release = seq_release,
+};