summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0076-tracing-Merge-irqflags-preempt-counter.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0076-tracing-Merge-irqflags-preempt-counter.patch1900
1 files changed, 1900 insertions, 0 deletions
diff --git a/debian/patches-rt/0076-tracing-Merge-irqflags-preempt-counter.patch b/debian/patches-rt/0076-tracing-Merge-irqflags-preempt-counter.patch
new file mode 100644
index 000000000..8cd083750
--- /dev/null
+++ b/debian/patches-rt/0076-tracing-Merge-irqflags-preempt-counter.patch
@@ -0,0 +1,1900 @@
+From 1ab76ef52a7db4158a5d86604c2a318cfe847e9d Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 3 Feb 2021 11:05:23 -0500
+Subject: [PATCH 076/323] tracing: Merge irqflags + preempt counter.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+The state of the interrupts (irqflags) and the preemption counter are
+both passed down to tracing_generic_entry_update(). Only one bit of
+irqflags is actually required: The on/off state. The complete 32bit
+of the preemption counter isn't needed. Just whether of the upper bits
+(softirq, hardirq and NMI) are set and the preemption depth is needed.
+
+The irqflags and the preemption counter could be evaluated early and the
+information stored in an integer `trace_ctx'.
+tracing_generic_entry_update() would use the upper bits as the
+TRACE_FLAG_* and the lower 8bit as the disabled-preemption depth
+(considering that one must be substracted from the counter in one
+special cases).
+
+The actual preemption value is not used except for the tracing record.
+The `irqflags' variable is mostly used only for the tracing record. An
+exception here is for instance wakeup_tracer_call() or
+probe_wakeup_sched_switch() which explicilty disable interrupts and use
+that `irqflags' to save (and restore) the IRQ state and to record the
+state.
+
+Struct trace_event_buffer has also the `pc' and flags' members which can
+be replaced with `trace_ctx' since their actual value is not used
+outside of trace recording.
+
+This will reduce tracing_generic_entry_update() to simply assign values
+to struct trace_entry. The evaluation of the TRACE_FLAG_* bits is moved
+to _tracing_gen_ctx_flags() which replaces preempt_count() and
+local_save_flags() invocations.
+
+As an example, ftrace_syscall_enter() may invoke:
+- trace_buffer_lock_reserve() -> … -> tracing_generic_entry_update()
+- event_trigger_unlock_commit()
+ -> ftrace_trace_stack() -> … -> tracing_generic_entry_update()
+ -> ftrace_trace_userstack() -> … -> tracing_generic_entry_update()
+
+In this case the TRACE_FLAG_* bits were evaluated three times. By using
+the `trace_ctx' they are evaluated once and assigned three times.
+
+A build with all tracers enabled on x86-64 with and without the patch:
+
+ text data bss dec hex filename
+21970669 17084168 7639260 46694097 2c87ed1 vmlinux.old
+21970293 17084168 7639260 46693721 2c87d59 vmlinux.new
+
+text shrank by 379 bytes, data remained constant.
+
+Link: https://lkml.kernel.org/r/20210125194511.3924915-2-bigeasy@linutronix.de
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/trace_events.h | 25 +++-
+ kernel/trace/blktrace.c | 17 +--
+ kernel/trace/trace.c | 206 +++++++++++++++------------
+ kernel/trace/trace.h | 38 +++--
+ kernel/trace/trace_branch.c | 6 +-
+ kernel/trace/trace_event_perf.c | 5 +-
+ kernel/trace/trace_events.c | 18 +--
+ kernel/trace/trace_events_inject.c | 6 +-
+ kernel/trace/trace_functions.c | 28 ++--
+ kernel/trace/trace_functions_graph.c | 32 ++---
+ kernel/trace/trace_hwlat.c | 7 +-
+ kernel/trace/trace_irqsoff.c | 86 +++++------
+ kernel/trace/trace_kprobe.c | 10 +-
+ kernel/trace/trace_mmiotrace.c | 14 +-
+ kernel/trace/trace_sched_wakeup.c | 71 +++++----
+ kernel/trace/trace_syscalls.c | 20 ++-
+ kernel/trace/trace_uprobe.c | 4 +-
+ 17 files changed, 286 insertions(+), 307 deletions(-)
+
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index f7ed0471d5a8..2a98c40526a0 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -152,17 +152,29 @@ enum print_line_t {
+
+ enum print_line_t trace_handle_return(struct trace_seq *s);
+
+-void tracing_generic_entry_update(struct trace_entry *entry,
+- unsigned short type,
+- unsigned long flags,
+- int pc);
++static inline void tracing_generic_entry_update(struct trace_entry *entry,
++ unsigned short type,
++ unsigned int trace_ctx)
++{
++ struct task_struct *tsk = current;
++
++ entry->preempt_count = trace_ctx & 0xff;
++ entry->pid = (tsk) ? tsk->pid : 0;
++ entry->type = type;
++ entry->flags = trace_ctx >> 16;
++}
++
++unsigned int tracing_gen_ctx_flags(unsigned long irqflags);
++unsigned int tracing_gen_ctx(void);
++unsigned int tracing_gen_ctx_dec(void);
++
+ struct trace_event_file;
+
+ struct ring_buffer_event *
+ trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
+ struct trace_event_file *trace_file,
+ int type, unsigned long len,
+- unsigned long flags, int pc);
++ unsigned int trace_ctx);
+
+ #define TRACE_RECORD_CMDLINE BIT(0)
+ #define TRACE_RECORD_TGID BIT(1)
+@@ -236,8 +248,7 @@ struct trace_event_buffer {
+ struct ring_buffer_event *event;
+ struct trace_event_file *trace_file;
+ void *entry;
+- unsigned long flags;
+- int pc;
++ unsigned int trace_ctx;
+ struct pt_regs *regs;
+ };
+
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index ab912cc60760..a95a2027eefd 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -72,17 +72,17 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
+ struct blk_io_trace *t;
+ struct ring_buffer_event *event = NULL;
+ struct trace_buffer *buffer = NULL;
+- int pc = 0;
++ unsigned int trace_ctx = 0;
+ int cpu = smp_processor_id();
+ bool blk_tracer = blk_tracer_enabled;
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
+
+ if (blk_tracer) {
+ buffer = blk_tr->array_buffer.buffer;
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx_flags(0);
+ event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
+ sizeof(*t) + len + cgid_len,
+- 0, pc);
++ trace_ctx);
+ if (!event)
+ return;
+ t = ring_buffer_event_data(event);
+@@ -107,7 +107,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
+ memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
+
+ if (blk_tracer)
+- trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
++ trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
+ }
+ }
+
+@@ -222,8 +222,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ struct blk_io_trace *t;
+ unsigned long flags = 0;
+ unsigned long *sequence;
++ unsigned int trace_ctx = 0;
+ pid_t pid;
+- int cpu, pc = 0;
++ int cpu;
+ bool blk_tracer = blk_tracer_enabled;
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
+
+@@ -252,10 +253,10 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ tracing_record_cmdline(current);
+
+ buffer = blk_tr->array_buffer.buffer;
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx_flags(0);
+ event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
+ sizeof(*t) + pdu_len + cgid_len,
+- 0, pc);
++ trace_ctx);
+ if (!event)
+ return;
+ t = ring_buffer_event_data(event);
+@@ -301,7 +302,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
+
+ if (blk_tracer) {
+- trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
++ trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
+ return;
+ }
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 4e0411b19ef9..376eb8a1c913 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -176,7 +176,7 @@ static union trace_eval_map_item *trace_eval_maps;
+ int tracing_set_tracer(struct trace_array *tr, const char *buf);
+ static void ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+- unsigned long flags, int pc);
++ unsigned int trace_ctx);
+
+ #define MAX_TRACER_SIZE 100
+ static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
+@@ -909,23 +909,23 @@ static inline void trace_access_lock_init(void)
+
+ #ifdef CONFIG_STACKTRACE
+ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+- unsigned long flags,
+- int skip, int pc, struct pt_regs *regs);
++ unsigned int trace_ctx,
++ int skip, struct pt_regs *regs);
+ static inline void ftrace_trace_stack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+- unsigned long flags,
+- int skip, int pc, struct pt_regs *regs);
++ unsigned int trace_ctx,
++ int skip, struct pt_regs *regs);
+
+ #else
+ static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
+- unsigned long flags,
+- int skip, int pc, struct pt_regs *regs)
++ unsigned int trace_ctx,
++ int skip, struct pt_regs *regs)
+ {
+ }
+ static inline void ftrace_trace_stack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+- unsigned long flags,
+- int skip, int pc, struct pt_regs *regs)
++ unsigned long trace_ctx,
++ int skip, struct pt_regs *regs)
+ {
+ }
+
+@@ -933,24 +933,24 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
+
+ static __always_inline void
+ trace_event_setup(struct ring_buffer_event *event,
+- int type, unsigned long flags, int pc)
++ int type, unsigned int trace_ctx)
+ {
+ struct trace_entry *ent = ring_buffer_event_data(event);
+
+- tracing_generic_entry_update(ent, type, flags, pc);
++ tracing_generic_entry_update(ent, type, trace_ctx);
+ }
+
+ static __always_inline struct ring_buffer_event *
+ __trace_buffer_lock_reserve(struct trace_buffer *buffer,
+ int type,
+ unsigned long len,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+ struct ring_buffer_event *event;
+
+ event = ring_buffer_lock_reserve(buffer, len);
+ if (event != NULL)
+- trace_event_setup(event, type, flags, pc);
++ trace_event_setup(event, type, trace_ctx);
+
+ return event;
+ }
+@@ -1011,25 +1011,22 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+ struct print_entry *entry;
+- unsigned long irq_flags;
++ unsigned int trace_ctx;
+ int alloc;
+- int pc;
+
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+- pc = preempt_count();
+-
+ if (unlikely(tracing_selftest_running || tracing_disabled))
+ return 0;
+
+ alloc = sizeof(*entry) + size + 2; /* possible \n added */
+
+- local_save_flags(irq_flags);
++ trace_ctx = tracing_gen_ctx();
+ buffer = global_trace.array_buffer.buffer;
+ ring_buffer_nest_start(buffer);
+- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+- irq_flags, pc);
++ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
++ trace_ctx);
+ if (!event) {
+ size = 0;
+ goto out;
+@@ -1048,7 +1045,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ entry->buf[size] = '\0';
+
+ __buffer_unlock_commit(buffer, event);
+- ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
++ ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
+ out:
+ ring_buffer_nest_end(buffer);
+ return size;
+@@ -1065,25 +1062,22 @@ int __trace_bputs(unsigned long ip, const char *str)
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+ struct bputs_entry *entry;
+- unsigned long irq_flags;
++ unsigned int trace_ctx;
+ int size = sizeof(struct bputs_entry);
+ int ret = 0;
+- int pc;
+
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+- pc = preempt_count();
+-
+ if (unlikely(tracing_selftest_running || tracing_disabled))
+ return 0;
+
+- local_save_flags(irq_flags);
++ trace_ctx = tracing_gen_ctx();
+ buffer = global_trace.array_buffer.buffer;
+
+ ring_buffer_nest_start(buffer);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+- irq_flags, pc);
++ trace_ctx);
+ if (!event)
+ goto out;
+
+@@ -1092,7 +1086,7 @@ int __trace_bputs(unsigned long ip, const char *str)
+ entry->str = str;
+
+ __buffer_unlock_commit(buffer, event);
+- ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
++ ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
+
+ ret = 1;
+ out:
+@@ -2581,36 +2575,69 @@ enum print_line_t trace_handle_return(struct trace_seq *s)
+ }
+ EXPORT_SYMBOL_GPL(trace_handle_return);
+
+-void
+-tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
+- unsigned long flags, int pc)
++unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
+ {
+- struct task_struct *tsk = current;
++ unsigned int trace_flags = 0;
++ unsigned int pc;
++
++ pc = preempt_count();
+
+- entry->preempt_count = pc & 0xff;
+- entry->pid = (tsk) ? tsk->pid : 0;
+- entry->type = type;
+- entry->flags =
+ #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+- (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
++ if (irqs_disabled_flags(irqflags))
++ trace_flags |= TRACE_FLAG_IRQS_OFF;
+ #else
+- TRACE_FLAG_IRQS_NOSUPPORT |
++ trace_flags |= TRACE_FLAG_IRQS_NOSUPPORT;
+ #endif
+- ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
+- ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+- ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
+- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
+- (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
++
++ if (pc & NMI_MASK)
++ trace_flags |= TRACE_FLAG_NMI;
++ if (pc & HARDIRQ_MASK)
++ trace_flags |= TRACE_FLAG_HARDIRQ;
++
++ if (pc & SOFTIRQ_OFFSET)
++ trace_flags |= TRACE_FLAG_SOFTIRQ;
++
++ if (tif_need_resched())
++ trace_flags |= TRACE_FLAG_NEED_RESCHED;
++ if (test_preempt_need_resched())
++ trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
++ return (trace_flags << 16) | (pc & 0xff);
++}
++
++unsigned int tracing_gen_ctx(void)
++{
++ unsigned long irqflags;
++
++#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
++ local_save_flags(irqflags);
++#else
++ irqflags = 0;
++#endif
++ return tracing_gen_ctx_flags(irqflags);
++}
++
++unsigned int tracing_gen_ctx_dec(void)
++{
++ unsigned int trace_ctx;
++
++ trace_ctx = tracing_gen_ctx();
++
++ /*
++ * Subtract one from the preeption counter if preemption is enabled,
++ * see trace_event_buffer_reserve()for details.
++ */
++ if (IS_ENABLED(CONFIG_PREEMPTION))
++ trace_ctx--;
++ return trace_ctx;
+ }
+-EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+
+ struct ring_buffer_event *
+ trace_buffer_lock_reserve(struct trace_buffer *buffer,
+ int type,
+ unsigned long len,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+- return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
++ return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
+ }
+
+ DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
+@@ -2729,7 +2756,7 @@ struct ring_buffer_event *
+ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
+ struct trace_event_file *trace_file,
+ int type, unsigned long len,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+ struct ring_buffer_event *entry;
+ int val;
+@@ -2742,7 +2769,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
+ /* Try to use the per cpu buffer first */
+ val = this_cpu_inc_return(trace_buffered_event_cnt);
+ if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
+- trace_event_setup(entry, type, flags, pc);
++ trace_event_setup(entry, type, trace_ctx);
+ entry->array[0] = len;
+ return entry;
+ }
+@@ -2750,7 +2777,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
+ }
+
+ entry = __trace_buffer_lock_reserve(*current_rb,
+- type, len, flags, pc);
++ type, len, trace_ctx);
+ /*
+ * If tracing is off, but we have triggers enabled
+ * we still need to look at the event data. Use the temp_buffer
+@@ -2759,8 +2786,8 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
+ */
+ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
+ *current_rb = temp_buffer;
+- entry = __trace_buffer_lock_reserve(*current_rb,
+- type, len, flags, pc);
++ entry = __trace_buffer_lock_reserve(*current_rb, type, len,
++ trace_ctx);
+ }
+ return entry;
+ }
+@@ -2846,7 +2873,7 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+ ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
+ event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
+ fbuffer->event, fbuffer->entry,
+- fbuffer->flags, fbuffer->pc, fbuffer->regs);
++ fbuffer->trace_ctx, fbuffer->regs);
+ }
+ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
+
+@@ -2862,7 +2889,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
+ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *event,
+- unsigned long flags, int pc,
++ unsigned int trace_ctx,
+ struct pt_regs *regs)
+ {
+ __buffer_unlock_commit(buffer, event);
+@@ -2873,8 +2900,8 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+ * and mmiotrace, but that's ok if they lose a function or
+ * two. They are not that meaningful.
+ */
+- ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
+- ftrace_trace_userstack(tr, buffer, flags, pc);
++ ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
++ ftrace_trace_userstack(tr, buffer, trace_ctx);
+ }
+
+ /*
+@@ -2888,9 +2915,8 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
+ }
+
+ void
+-trace_function(struct trace_array *tr,
+- unsigned long ip, unsigned long parent_ip, unsigned long flags,
+- int pc)
++trace_function(struct trace_array *tr, unsigned long ip, unsigned long
++ parent_ip, unsigned int trace_ctx)
+ {
+ struct trace_event_call *call = &event_function;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+@@ -2898,7 +2924,7 @@ trace_function(struct trace_array *tr,
+ struct ftrace_entry *entry;
+
+ event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
+- flags, pc);
++ trace_ctx);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+@@ -2932,8 +2958,8 @@ static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
+ static DEFINE_PER_CPU(int, ftrace_stack_reserve);
+
+ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+- unsigned long flags,
+- int skip, int pc, struct pt_regs *regs)
++ unsigned int trace_ctx,
++ int skip, struct pt_regs *regs)
+ {
+ struct trace_event_call *call = &event_kernel_stack;
+ struct ring_buffer_event *event;
+@@ -2981,7 +3007,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+ size = nr_entries * sizeof(unsigned long);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
+ (sizeof(*entry) - sizeof(entry->caller)) + size,
+- flags, pc);
++ trace_ctx);
+ if (!event)
+ goto out;
+ entry = ring_buffer_event_data(event);
+@@ -3002,22 +3028,22 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+
+ static inline void ftrace_trace_stack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+- unsigned long flags,
+- int skip, int pc, struct pt_regs *regs)
++ unsigned int trace_ctx,
++ int skip, struct pt_regs *regs)
+ {
+ if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
+ return;
+
+- __ftrace_trace_stack(buffer, flags, skip, pc, regs);
++ __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
+ }
+
+-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
+- int pc)
++void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
++ int skip)
+ {
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+
+ if (rcu_is_watching()) {
+- __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
++ __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
+ return;
+ }
+
+@@ -3031,7 +3057,7 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
+ return;
+
+ rcu_irq_enter_irqson();
+- __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
++ __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
+ rcu_irq_exit_irqson();
+ }
+
+@@ -3041,19 +3067,15 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
+ */
+ void trace_dump_stack(int skip)
+ {
+- unsigned long flags;
+-
+ if (tracing_disabled || tracing_selftest_running)
+ return;
+
+- local_save_flags(flags);
+-
+ #ifndef CONFIG_UNWINDER_ORC
+ /* Skip 1 to skip this function. */
+ skip++;
+ #endif
+ __ftrace_trace_stack(global_trace.array_buffer.buffer,
+- flags, skip, preempt_count(), NULL);
++ tracing_gen_ctx(), skip, NULL);
+ }
+ EXPORT_SYMBOL_GPL(trace_dump_stack);
+
+@@ -3062,7 +3084,7 @@ static DEFINE_PER_CPU(int, user_stack_count);
+
+ static void
+ ftrace_trace_userstack(struct trace_array *tr,
+- struct trace_buffer *buffer, unsigned long flags, int pc)
++ struct trace_buffer *buffer, unsigned int trace_ctx)
+ {
+ struct trace_event_call *call = &event_user_stack;
+ struct ring_buffer_event *event;
+@@ -3089,7 +3111,7 @@ ftrace_trace_userstack(struct trace_array *tr,
+ __this_cpu_inc(user_stack_count);
+
+ event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
+- sizeof(*entry), flags, pc);
++ sizeof(*entry), trace_ctx);
+ if (!event)
+ goto out_drop_count;
+ entry = ring_buffer_event_data(event);
+@@ -3109,7 +3131,7 @@ ftrace_trace_userstack(struct trace_array *tr,
+ #else /* CONFIG_USER_STACKTRACE_SUPPORT */
+ static void ftrace_trace_userstack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+ }
+ #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
+@@ -3239,9 +3261,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+ struct trace_buffer *buffer;
+ struct trace_array *tr = &global_trace;
+ struct bprint_entry *entry;
+- unsigned long flags;
++ unsigned int trace_ctx;
+ char *tbuffer;
+- int len = 0, size, pc;
++ int len = 0, size;
+
+ if (unlikely(tracing_selftest_running || tracing_disabled))
+ return 0;
+@@ -3249,7 +3271,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+ /* Don't pollute graph traces with trace_vprintk internals */
+ pause_graph_tracing();
+
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx();
+ preempt_disable_notrace();
+
+ tbuffer = get_trace_buf();
+@@ -3263,12 +3285,11 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+ if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
+ goto out_put;
+
+- local_save_flags(flags);
+ size = sizeof(*entry) + sizeof(u32) * len;
+ buffer = tr->array_buffer.buffer;
+ ring_buffer_nest_start(buffer);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
+- flags, pc);
++ trace_ctx);
+ if (!event)
+ goto out;
+ entry = ring_buffer_event_data(event);
+@@ -3278,7 +3299,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
+ memcpy(entry->buf, tbuffer, sizeof(u32) * len);
+ if (!call_filter_check_discard(call, entry, buffer, event)) {
+ __buffer_unlock_commit(buffer, event);
+- ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
++ ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
+ }
+
+ out:
+@@ -3301,9 +3322,9 @@ __trace_array_vprintk(struct trace_buffer *buffer,
+ {
+ struct trace_event_call *call = &event_print;
+ struct ring_buffer_event *event;
+- int len = 0, size, pc;
++ int len = 0, size;
+ struct print_entry *entry;
+- unsigned long flags;
++ unsigned int trace_ctx;
+ char *tbuffer;
+
+ if (tracing_disabled || tracing_selftest_running)
+@@ -3312,7 +3333,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
+ /* Don't pollute graph traces with trace_vprintk internals */
+ pause_graph_tracing();
+
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx();
+ preempt_disable_notrace();
+
+
+@@ -3324,11 +3345,10 @@ __trace_array_vprintk(struct trace_buffer *buffer,
+
+ len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
+
+- local_save_flags(flags);
+ size = sizeof(*entry) + len + 1;
+ ring_buffer_nest_start(buffer);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+- flags, pc);
++ trace_ctx);
+ if (!event)
+ goto out;
+ entry = ring_buffer_event_data(event);
+@@ -3337,7 +3357,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
+ memcpy(&entry->buf, tbuffer, len + 1);
+ if (!call_filter_check_discard(call, entry, buffer, event)) {
+ __buffer_unlock_commit(buffer, event);
+- ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
++ ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
+ }
+
+ out:
+@@ -6831,7 +6851,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+ enum event_trigger_type tt = ETT_NONE;
+ struct trace_buffer *buffer;
+ struct print_entry *entry;
+- unsigned long irq_flags;
+ ssize_t written;
+ int size;
+ int len;
+@@ -6851,7 +6870,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+
+ BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
+
+- local_save_flags(irq_flags);
+ size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
+
+ /* If less than "<faulted>", then make sure we can still add that */
+@@ -6860,7 +6878,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+
+ buffer = tr->array_buffer.buffer;
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+- irq_flags, preempt_count());
++ tracing_gen_ctx());
+ if (unlikely(!event))
+ /* Ring buffer disabled, return as if not open for write */
+ return -EBADF;
+@@ -6912,7 +6930,6 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+ struct raw_data_entry *entry;
+- unsigned long irq_flags;
+ ssize_t written;
+ int size;
+ int len;
+@@ -6934,14 +6951,13 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
+
+ BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
+
+- local_save_flags(irq_flags);
+ size = sizeof(*entry) + cnt;
+ if (cnt < FAULT_SIZE_ID)
+ size += FAULT_SIZE_ID - cnt;
+
+ buffer = tr->array_buffer.buffer;
+ event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
+- irq_flags, preempt_count());
++ tracing_gen_ctx());
+ if (!event)
+ /* Ring buffer disabled, return as if not open for write */
+ return -EBADF;
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 7c90872f2435..27ed42bccd7f 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -750,8 +750,7 @@ struct ring_buffer_event *
+ trace_buffer_lock_reserve(struct trace_buffer *buffer,
+ int type,
+ unsigned long len,
+- unsigned long flags,
+- int pc);
++ unsigned int trace_ctx);
+
+ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
+ struct trace_array_cpu *data);
+@@ -778,11 +777,11 @@ unsigned long trace_total_entries(struct trace_array *tr);
+ void trace_function(struct trace_array *tr,
+ unsigned long ip,
+ unsigned long parent_ip,
+- unsigned long flags, int pc);
++ unsigned int trace_ctx);
+ void trace_graph_function(struct trace_array *tr,
+ unsigned long ip,
+ unsigned long parent_ip,
+- unsigned long flags, int pc);
++ unsigned int trace_ctx);
+ void trace_latency_header(struct seq_file *m);
+ void trace_default_header(struct seq_file *m);
+ void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
+@@ -850,11 +849,10 @@ static inline void latency_fsnotify(struct trace_array *tr) { }
+ #endif
+
+ #ifdef CONFIG_STACKTRACE
+-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
+- int pc);
++void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
+ #else
+-static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
+- int skip, int pc)
++static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
++ int skip)
+ {
+ }
+ #endif /* CONFIG_STACKTRACE */
+@@ -994,10 +992,10 @@ extern void graph_trace_open(struct trace_iterator *iter);
+ extern void graph_trace_close(struct trace_iterator *iter);
+ extern int __trace_graph_entry(struct trace_array *tr,
+ struct ftrace_graph_ent *trace,
+- unsigned long flags, int pc);
++ unsigned int trace_ctx);
+ extern void __trace_graph_return(struct trace_array *tr,
+ struct ftrace_graph_ret *trace,
+- unsigned long flags, int pc);
++ unsigned int trace_ctx);
+
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ extern struct ftrace_hash __rcu *ftrace_graph_hash;
+@@ -1460,15 +1458,15 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
+ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *event,
+- unsigned long flags, int pc,
++ unsigned int trcace_ctx,
+ struct pt_regs *regs);
+
+ static inline void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *event,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+- trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
++ trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
+ }
+
+ DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
+@@ -1541,8 +1539,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
+ * @buffer: The ring buffer that the event is being written to
+ * @event: The event meta data in the ring buffer
+ * @entry: The event itself
+- * @irq_flags: The state of the interrupts at the start of the event
+- * @pc: The state of the preempt count at the start of the event.
++ * @trace_ctx: The tracing context flags.
+ *
+ * This is a helper function to handle triggers that require data
+ * from the event itself. It also tests the event against filters and
+@@ -1552,12 +1549,12 @@ static inline void
+ event_trigger_unlock_commit(struct trace_event_file *file,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *event,
+- void *entry, unsigned long irq_flags, int pc)
++ void *entry, unsigned int trace_ctx)
+ {
+ enum event_trigger_type tt = ETT_NONE;
+
+ if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
+- trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
++ trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
+
+ if (tt)
+ event_triggers_post_call(file, tt);
+@@ -1569,8 +1566,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
+ * @buffer: The ring buffer that the event is being written to
+ * @event: The event meta data in the ring buffer
+ * @entry: The event itself
+- * @irq_flags: The state of the interrupts at the start of the event
+- * @pc: The state of the preempt count at the start of the event.
++ * @trace_ctx: The tracing context flags.
+ *
+ * This is a helper function to handle triggers that require data
+ * from the event itself. It also tests the event against filters and
+@@ -1583,14 +1579,14 @@ static inline void
+ event_trigger_unlock_commit_regs(struct trace_event_file *file,
+ struct trace_buffer *buffer,
+ struct ring_buffer_event *event,
+- void *entry, unsigned long irq_flags, int pc,
++ void *entry, unsigned int trace_ctx,
+ struct pt_regs *regs)
+ {
+ enum event_trigger_type tt = ETT_NONE;
+
+ if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
+ trace_buffer_unlock_commit_regs(file->tr, buffer, event,
+- irq_flags, pc, regs);
++ trace_ctx, regs);
+
+ if (tt)
+ event_triggers_post_call(file, tt);
+diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
+index eff099123aa2..e47fdb4c92fb 100644
+--- a/kernel/trace/trace_branch.c
++++ b/kernel/trace/trace_branch.c
+@@ -37,7 +37,7 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
+ struct ring_buffer_event *event;
+ struct trace_branch *entry;
+ unsigned long flags;
+- int pc;
++ unsigned int trace_ctx;
+ const char *p;
+
+ if (current->trace_recursion & TRACE_BRANCH_BIT)
+@@ -59,10 +59,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
+ if (atomic_read(&data->disabled))
+ goto out;
+
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx_flags(flags);
+ buffer = tr->array_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
+- sizeof(*entry), flags, pc);
++ sizeof(*entry), trace_ctx);
+ if (!event)
+ goto out;
+
+diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
+index 643e0b19920d..0443dd61667b 100644
+--- a/kernel/trace/trace_event_perf.c
++++ b/kernel/trace/trace_event_perf.c
+@@ -421,11 +421,8 @@ NOKPROBE_SYMBOL(perf_trace_buf_alloc);
+ void perf_trace_buf_update(void *record, u16 type)
+ {
+ struct trace_entry *entry = record;
+- int pc = preempt_count();
+- unsigned long flags;
+
+- local_save_flags(flags);
+- tracing_generic_entry_update(entry, type, flags, pc);
++ tracing_generic_entry_update(entry, type, tracing_gen_ctx());
+ }
+ NOKPROBE_SYMBOL(perf_trace_buf_update);
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 4b5a8d7275be..df64b92c5edc 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -259,22 +259,19 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
+ trace_event_ignore_this_pid(trace_file))
+ return NULL;
+
+- local_save_flags(fbuffer->flags);
+- fbuffer->pc = preempt_count();
+ /*
+ * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
+ * preemption (adding one to the preempt_count). Since we are
+ * interested in the preempt_count at the time the tracepoint was
+ * hit, we need to subtract one to offset the increment.
+ */
+- if (IS_ENABLED(CONFIG_PREEMPTION))
+- fbuffer->pc--;
++ fbuffer->trace_ctx = tracing_gen_ctx_dec();
+ fbuffer->trace_file = trace_file;
+
+ fbuffer->event =
+ trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
+ event_call->event.type, len,
+- fbuffer->flags, fbuffer->pc);
++ fbuffer->trace_ctx);
+ if (!fbuffer->event)
+ return NULL;
+
+@@ -3709,12 +3706,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
+ struct trace_buffer *buffer;
+ struct ring_buffer_event *event;
+ struct ftrace_entry *entry;
+- unsigned long flags;
++ unsigned int trace_ctx;
+ long disabled;
+ int cpu;
+- int pc;
+
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx();
+ preempt_disable_notrace();
+ cpu = raw_smp_processor_id();
+ disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
+@@ -3722,11 +3718,9 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
+ if (disabled != 1)
+ goto out;
+
+- local_save_flags(flags);
+-
+ event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
+ TRACE_FN, sizeof(*entry),
+- flags, pc);
++ trace_ctx);
+ if (!event)
+ goto out;
+ entry = ring_buffer_event_data(event);
+@@ -3734,7 +3728,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
+ entry->parent_ip = parent_ip;
+
+ event_trigger_unlock_commit(&event_trace_file, buffer, event,
+- entry, flags, pc);
++ entry, trace_ctx);
+ out:
+ atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
+ preempt_enable_notrace();
+diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c
+index 149c7dc6a447..b1fce64e126c 100644
+--- a/kernel/trace/trace_events_inject.c
++++ b/kernel/trace/trace_events_inject.c
+@@ -192,7 +192,6 @@ static void *trace_alloc_entry(struct trace_event_call *call, int *size)
+ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
+ {
+ struct ftrace_event_field *field;
+- unsigned long irq_flags;
+ void *entry = NULL;
+ int entry_size;
+ u64 val = 0;
+@@ -203,9 +202,8 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
+ if (!entry)
+ return -ENOMEM;
+
+- local_save_flags(irq_flags);
+- tracing_generic_entry_update(entry, call->event.type, irq_flags,
+- preempt_count());
++ tracing_generic_entry_update(entry, call->event.type,
++ tracing_gen_ctx());
+
+ while ((len = parse_field(str, call, &field, &val)) > 0) {
+ if (is_function_field(field))
+diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
+index 93e20ed642e5..8606cb73341e 100644
+--- a/kernel/trace/trace_functions.c
++++ b/kernel/trace/trace_functions.c
+@@ -133,15 +133,14 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
+ {
+ struct trace_array *tr = op->private;
+ struct trace_array_cpu *data;
+- unsigned long flags;
++ unsigned int trace_ctx;
+ int bit;
+ int cpu;
+- int pc;
+
+ if (unlikely(!tr->function_enabled))
+ return;
+
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx();
+ preempt_disable_notrace();
+
+ bit = trace_test_and_set_recursion(TRACE_FTRACE_START);
+@@ -150,10 +149,9 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
+
+ cpu = smp_processor_id();
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
+- if (!atomic_read(&data->disabled)) {
+- local_save_flags(flags);
+- trace_function(tr, ip, parent_ip, flags, pc);
+- }
++ if (!atomic_read(&data->disabled))
++ trace_function(tr, ip, parent_ip, trace_ctx);
++
+ trace_clear_recursion(bit);
+
+ out:
+@@ -187,7 +185,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
+ unsigned long flags;
+ long disabled;
+ int cpu;
+- int pc;
++ unsigned int trace_ctx;
+
+ if (unlikely(!tr->function_enabled))
+ return;
+@@ -202,9 +200,9 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+- pc = preempt_count();
+- trace_function(tr, ip, parent_ip, flags, pc);
+- __trace_stack(tr, flags, STACK_SKIP, pc);
++ trace_ctx = tracing_gen_ctx_flags(flags);
++ trace_function(tr, ip, parent_ip, trace_ctx);
++ __trace_stack(tr, trace_ctx, STACK_SKIP);
+ }
+
+ atomic_dec(&data->disabled);
+@@ -407,13 +405,11 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
+
+ static __always_inline void trace_stack(struct trace_array *tr)
+ {
+- unsigned long flags;
+- int pc;
++ unsigned int trace_ctx;
+
+- local_save_flags(flags);
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx();
+
+- __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
++ __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
+ }
+
+ static void
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index 60d66278aa0d..b086ba8bb3d6 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -96,8 +96,7 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
+
+ int __trace_graph_entry(struct trace_array *tr,
+ struct ftrace_graph_ent *trace,
+- unsigned long flags,
+- int pc)
++ unsigned int trace_ctx)
+ {
+ struct trace_event_call *call = &event_funcgraph_entry;
+ struct ring_buffer_event *event;
+@@ -105,7 +104,7 @@ int __trace_graph_entry(struct trace_array *tr,
+ struct ftrace_graph_ent_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
+- sizeof(*entry), flags, pc);
++ sizeof(*entry), trace_ctx);
+ if (!event)
+ return 0;
+ entry = ring_buffer_event_data(event);
+@@ -129,10 +128,10 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
+ struct trace_array *tr = graph_array;
+ struct trace_array_cpu *data;
+ unsigned long flags;
++ unsigned int trace_ctx;
+ long disabled;
+ int ret;
+ int cpu;
+- int pc;
+
+ if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
+ return 0;
+@@ -174,8 +173,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
+ disabled = atomic_inc_return(&data->disabled);
+ if (likely(disabled == 1)) {
+- pc = preempt_count();
+- ret = __trace_graph_entry(tr, trace, flags, pc);
++ trace_ctx = tracing_gen_ctx_flags(flags);
++ ret = __trace_graph_entry(tr, trace, trace_ctx);
+ } else {
+ ret = 0;
+ }
+@@ -188,7 +187,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
+
+ static void
+ __trace_graph_function(struct trace_array *tr,
+- unsigned long ip, unsigned long flags, int pc)
++ unsigned long ip, unsigned int trace_ctx)
+ {
+ u64 time = trace_clock_local();
+ struct ftrace_graph_ent ent = {
+@@ -202,22 +201,21 @@ __trace_graph_function(struct trace_array *tr,
+ .rettime = time,
+ };
+
+- __trace_graph_entry(tr, &ent, flags, pc);
+- __trace_graph_return(tr, &ret, flags, pc);
++ __trace_graph_entry(tr, &ent, trace_ctx);
++ __trace_graph_return(tr, &ret, trace_ctx);
+ }
+
+ void
+ trace_graph_function(struct trace_array *tr,
+ unsigned long ip, unsigned long parent_ip,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+- __trace_graph_function(tr, ip, flags, pc);
++ __trace_graph_function(tr, ip, trace_ctx);
+ }
+
+ void __trace_graph_return(struct trace_array *tr,
+ struct ftrace_graph_ret *trace,
+- unsigned long flags,
+- int pc)
++ unsigned int trace_ctx)
+ {
+ struct trace_event_call *call = &event_funcgraph_exit;
+ struct ring_buffer_event *event;
+@@ -225,7 +223,7 @@ void __trace_graph_return(struct trace_array *tr,
+ struct ftrace_graph_ret_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
+- sizeof(*entry), flags, pc);
++ sizeof(*entry), trace_ctx);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+@@ -239,9 +237,9 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
+ struct trace_array *tr = graph_array;
+ struct trace_array_cpu *data;
+ unsigned long flags;
++ unsigned int trace_ctx;
+ long disabled;
+ int cpu;
+- int pc;
+
+ ftrace_graph_addr_finish(trace);
+
+@@ -255,8 +253,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
+ disabled = atomic_inc_return(&data->disabled);
+ if (likely(disabled == 1)) {
+- pc = preempt_count();
+- __trace_graph_return(tr, trace, flags, pc);
++ trace_ctx = tracing_gen_ctx_flags(flags);
++ __trace_graph_return(tr, trace, trace_ctx);
+ }
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index d071fc271eef..4c01c5d8b9a7 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -108,14 +108,9 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct ring_buffer_event *event;
+ struct hwlat_entry *entry;
+- unsigned long flags;
+- int pc;
+-
+- pc = preempt_count();
+- local_save_flags(flags);
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
+- flags, pc);
++ tracing_gen_ctx());
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 619a60944bb6..4a11967c8daa 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -143,11 +143,14 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
++ unsigned int trace_ctx;
+
+ if (!func_prolog_dec(tr, &data, &flags))
+ return;
+
+- trace_function(tr, ip, parent_ip, flags, preempt_count());
++ trace_ctx = tracing_gen_ctx_flags(flags);
++
++ trace_function(tr, ip, parent_ip, trace_ctx);
+
+ atomic_dec(&data->disabled);
+ }
+@@ -177,8 +180,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
++ unsigned int trace_ctx;
+ int ret;
+- int pc;
+
+ if (ftrace_graph_ignore_func(trace))
+ return 0;
+@@ -195,8 +198,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
+ if (!func_prolog_dec(tr, &data, &flags))
+ return 0;
+
+- pc = preempt_count();
+- ret = __trace_graph_entry(tr, trace, flags, pc);
++ trace_ctx = tracing_gen_ctx_flags(flags);
++ ret = __trace_graph_entry(tr, trace, trace_ctx);
+ atomic_dec(&data->disabled);
+
+ return ret;
+@@ -207,15 +210,15 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+- int pc;
++ unsigned int trace_ctx;
+
+ ftrace_graph_addr_finish(trace);
+
+ if (!func_prolog_dec(tr, &data, &flags))
+ return;
+
+- pc = preempt_count();
+- __trace_graph_return(tr, trace, flags, pc);
++ trace_ctx = tracing_gen_ctx_flags(flags);
++ __trace_graph_return(tr, trace, trace_ctx);
+ atomic_dec(&data->disabled);
+ }
+
+@@ -268,12 +271,12 @@ static void irqsoff_print_header(struct seq_file *s)
+ static void
+ __trace_function(struct trace_array *tr,
+ unsigned long ip, unsigned long parent_ip,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+ if (is_graph(tr))
+- trace_graph_function(tr, ip, parent_ip, flags, pc);
++ trace_graph_function(tr, ip, parent_ip, trace_ctx);
+ else
+- trace_function(tr, ip, parent_ip, flags, pc);
++ trace_function(tr, ip, parent_ip, trace_ctx);
+ }
+
+ #else
+@@ -323,15 +326,13 @@ check_critical_timing(struct trace_array *tr,
+ {
+ u64 T0, T1, delta;
+ unsigned long flags;
+- int pc;
++ unsigned int trace_ctx;
+
+ T0 = data->preempt_timestamp;
+ T1 = ftrace_now(cpu);
+ delta = T1-T0;
+
+- local_save_flags(flags);
+-
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx();
+
+ if (!report_latency(tr, delta))
+ goto out;
+@@ -342,9 +343,9 @@ check_critical_timing(struct trace_array *tr,
+ if (!report_latency(tr, delta))
+ goto out_unlock;
+
+- __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
++ __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
+ /* Skip 5 functions to get to the irq/preempt enable function */
+- __trace_stack(tr, flags, 5, pc);
++ __trace_stack(tr, trace_ctx, 5);
+
+ if (data->critical_sequence != max_sequence)
+ goto out_unlock;
+@@ -364,16 +365,15 @@ check_critical_timing(struct trace_array *tr,
+ out:
+ data->critical_sequence = max_sequence;
+ data->preempt_timestamp = ftrace_now(cpu);
+- __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
++ __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
+ }
+
+ static nokprobe_inline void
+-start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
++start_critical_timing(unsigned long ip, unsigned long parent_ip)
+ {
+ int cpu;
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+- unsigned long flags;
+
+ if (!tracer_enabled || !tracing_is_enabled())
+ return;
+@@ -394,9 +394,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
+ data->preempt_timestamp = ftrace_now(cpu);
+ data->critical_start = parent_ip ? : ip;
+
+- local_save_flags(flags);
+-
+- __trace_function(tr, ip, parent_ip, flags, pc);
++ __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
+
+ per_cpu(tracing_cpu, cpu) = 1;
+
+@@ -404,12 +402,12 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
+ }
+
+ static nokprobe_inline void
+-stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
++stop_critical_timing(unsigned long ip, unsigned long parent_ip)
+ {
+ int cpu;
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+- unsigned long flags;
++ unsigned int trace_ctx;
+
+ cpu = raw_smp_processor_id();
+ /* Always clear the tracing cpu on stopping the trace */
+@@ -429,8 +427,8 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
+
+ atomic_inc(&data->disabled);
+
+- local_save_flags(flags);
+- __trace_function(tr, ip, parent_ip, flags, pc);
++ trace_ctx = tracing_gen_ctx();
++ __trace_function(tr, ip, parent_ip, trace_ctx);
+ check_critical_timing(tr, data, parent_ip ? : ip, cpu);
+ data->critical_start = 0;
+ atomic_dec(&data->disabled);
+@@ -439,20 +437,16 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
+ /* start and stop critical timings used to for stoppage (in idle) */
+ void start_critical_timings(void)
+ {
+- int pc = preempt_count();
+-
+- if (preempt_trace(pc) || irq_trace())
+- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
++ if (preempt_trace(preempt_count()) || irq_trace())
++ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+ EXPORT_SYMBOL_GPL(start_critical_timings);
+ NOKPROBE_SYMBOL(start_critical_timings);
+
+ void stop_critical_timings(void)
+ {
+- int pc = preempt_count();
+-
+- if (preempt_trace(pc) || irq_trace())
+- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
++ if (preempt_trace(preempt_count()) || irq_trace())
++ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+ EXPORT_SYMBOL_GPL(stop_critical_timings);
+ NOKPROBE_SYMBOL(stop_critical_timings);
+@@ -614,19 +608,15 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
+ */
+ void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
+ {
+- unsigned int pc = preempt_count();
+-
+- if (!preempt_trace(pc) && irq_trace())
+- stop_critical_timing(a0, a1, pc);
++ if (!preempt_trace(preempt_count()) && irq_trace())
++ stop_critical_timing(a0, a1);
+ }
+ NOKPROBE_SYMBOL(tracer_hardirqs_on);
+
+ void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
+ {
+- unsigned int pc = preempt_count();
+-
+- if (!preempt_trace(pc) && irq_trace())
+- start_critical_timing(a0, a1, pc);
++ if (!preempt_trace(preempt_count()) && irq_trace())
++ start_critical_timing(a0, a1);
+ }
+ NOKPROBE_SYMBOL(tracer_hardirqs_off);
+
+@@ -666,18 +656,14 @@ static struct tracer irqsoff_tracer __read_mostly =
+ #ifdef CONFIG_PREEMPT_TRACER
+ void tracer_preempt_on(unsigned long a0, unsigned long a1)
+ {
+- int pc = preempt_count();
+-
+- if (preempt_trace(pc) && !irq_trace())
+- stop_critical_timing(a0, a1, pc);
++ if (preempt_trace(preempt_count()) && !irq_trace())
++ stop_critical_timing(a0, a1);
+ }
+
+ void tracer_preempt_off(unsigned long a0, unsigned long a1)
+ {
+- int pc = preempt_count();
+-
+- if (preempt_trace(pc) && !irq_trace())
+- start_critical_timing(a0, a1, pc);
++ if (preempt_trace(preempt_count()) && !irq_trace())
++ start_critical_timing(a0, a1);
+ }
+
+ static int preemptoff_tracer_init(struct trace_array *tr)
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 718357289899..a2478605e761 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1394,8 +1394,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+- local_save_flags(fbuffer.flags);
+- fbuffer.pc = preempt_count();
++ fbuffer.trace_ctx = tracing_gen_ctx();
+ fbuffer.trace_file = trace_file;
+
+ dsize = __get_data_size(&tk->tp, regs);
+@@ -1404,7 +1403,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
+ trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
+ call->event.type,
+ sizeof(*entry) + tk->tp.size + dsize,
+- fbuffer.flags, fbuffer.pc);
++ fbuffer.trace_ctx);
+ if (!fbuffer.event)
+ return;
+
+@@ -1442,8 +1441,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+- local_save_flags(fbuffer.flags);
+- fbuffer.pc = preempt_count();
++ fbuffer.trace_ctx = tracing_gen_ctx();
+ fbuffer.trace_file = trace_file;
+
+ dsize = __get_data_size(&tk->tp, regs);
+@@ -1451,7 +1449,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
+ trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
+ call->event.type,
+ sizeof(*entry) + tk->tp.size + dsize,
+- fbuffer.flags, fbuffer.pc);
++ fbuffer.trace_ctx);
+ if (!fbuffer.event)
+ return;
+
+diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
+index 84582bf1ed5f..7221ae0b4c47 100644
+--- a/kernel/trace/trace_mmiotrace.c
++++ b/kernel/trace/trace_mmiotrace.c
+@@ -300,10 +300,11 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct ring_buffer_event *event;
+ struct trace_mmiotrace_rw *entry;
+- int pc = preempt_count();
++ unsigned int trace_ctx;
+
++ trace_ctx = tracing_gen_ctx_flags(0);
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
+- sizeof(*entry), 0, pc);
++ sizeof(*entry), trace_ctx);
+ if (!event) {
+ atomic_inc(&dropped_count);
+ return;
+@@ -312,7 +313,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
+ entry->rw = *rw;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+- trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
++ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
+ }
+
+ void mmio_trace_rw(struct mmiotrace_rw *rw)
+@@ -330,10 +331,11 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct ring_buffer_event *event;
+ struct trace_mmiotrace_map *entry;
+- int pc = preempt_count();
++ unsigned int trace_ctx;
+
++ trace_ctx = tracing_gen_ctx_flags(0);
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
+- sizeof(*entry), 0, pc);
++ sizeof(*entry), trace_ctx);
+ if (!event) {
+ atomic_inc(&dropped_count);
+ return;
+@@ -342,7 +344,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
+ entry->map = *map;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+- trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
++ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
+ }
+
+ void mmio_trace_mapping(struct mmiotrace_map *map)
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index 037e1e863b17..c1f582e0e97f 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -67,7 +67,7 @@ static bool function_enabled;
+ static int
+ func_prolog_preempt_disable(struct trace_array *tr,
+ struct trace_array_cpu **data,
+- int *pc)
++ unsigned int *trace_ctx)
+ {
+ long disabled;
+ int cpu;
+@@ -75,7 +75,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
+ if (likely(!wakeup_task))
+ return 0;
+
+- *pc = preempt_count();
++ *trace_ctx = tracing_gen_ctx();
+ preempt_disable_notrace();
+
+ cpu = raw_smp_processor_id();
+@@ -116,8 +116,8 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
+ {
+ struct trace_array *tr = wakeup_trace;
+ struct trace_array_cpu *data;
+- unsigned long flags;
+- int pc, ret = 0;
++ unsigned int trace_ctx;
++ int ret = 0;
+
+ if (ftrace_graph_ignore_func(trace))
+ return 0;
+@@ -131,11 +131,10 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
+ if (ftrace_graph_notrace_addr(trace->func))
+ return 1;
+
+- if (!func_prolog_preempt_disable(tr, &data, &pc))
++ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
+ return 0;
+
+- local_save_flags(flags);
+- ret = __trace_graph_entry(tr, trace, flags, pc);
++ ret = __trace_graph_entry(tr, trace, trace_ctx);
+ atomic_dec(&data->disabled);
+ preempt_enable_notrace();
+
+@@ -146,16 +145,14 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
+ {
+ struct trace_array *tr = wakeup_trace;
+ struct trace_array_cpu *data;
+- unsigned long flags;
+- int pc;
++ unsigned int trace_ctx;
+
+ ftrace_graph_addr_finish(trace);
+
+- if (!func_prolog_preempt_disable(tr, &data, &pc))
++ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
+ return;
+
+- local_save_flags(flags);
+- __trace_graph_return(tr, trace, flags, pc);
++ __trace_graph_return(tr, trace, trace_ctx);
+ atomic_dec(&data->disabled);
+
+ preempt_enable_notrace();
+@@ -219,13 +216,13 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
+ struct trace_array *tr = wakeup_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+- int pc;
++ unsigned int trace_ctx;
+
+- if (!func_prolog_preempt_disable(tr, &data, &pc))
++ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
+ return;
+
+ local_irq_save(flags);
+- trace_function(tr, ip, parent_ip, flags, pc);
++ trace_function(tr, ip, parent_ip, trace_ctx);
+ local_irq_restore(flags);
+
+ atomic_dec(&data->disabled);
+@@ -305,12 +302,12 @@ static void wakeup_print_header(struct seq_file *s)
+ static void
+ __trace_function(struct trace_array *tr,
+ unsigned long ip, unsigned long parent_ip,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+ if (is_graph(tr))
+- trace_graph_function(tr, ip, parent_ip, flags, pc);
++ trace_graph_function(tr, ip, parent_ip, trace_ctx);
+ else
+- trace_function(tr, ip, parent_ip, flags, pc);
++ trace_function(tr, ip, parent_ip, trace_ctx);
+ }
+
+ static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
+@@ -377,7 +374,7 @@ static void
+ tracing_sched_switch_trace(struct trace_array *tr,
+ struct task_struct *prev,
+ struct task_struct *next,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+ struct trace_event_call *call = &event_context_switch;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+@@ -385,7 +382,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
+ struct ctx_switch_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
+- sizeof(*entry), flags, pc);
++ sizeof(*entry), trace_ctx);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+@@ -398,14 +395,14 @@ tracing_sched_switch_trace(struct trace_array *tr,
+ entry->next_cpu = task_cpu(next);
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
++ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
+ }
+
+ static void
+ tracing_sched_wakeup_trace(struct trace_array *tr,
+ struct task_struct *wakee,
+ struct task_struct *curr,
+- unsigned long flags, int pc)
++ unsigned int trace_ctx)
+ {
+ struct trace_event_call *call = &event_wakeup;
+ struct ring_buffer_event *event;
+@@ -413,7 +410,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
+- sizeof(*entry), flags, pc);
++ sizeof(*entry), trace_ctx);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+@@ -426,7 +423,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
+ entry->next_cpu = task_cpu(wakee);
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
++ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
+ }
+
+ static void notrace
+@@ -438,7 +435,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
+ unsigned long flags;
+ long disabled;
+ int cpu;
+- int pc;
++ unsigned int trace_ctx;
+
+ tracing_record_cmdline(prev);
+
+@@ -457,8 +454,6 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
+ if (next != wakeup_task)
+ return;
+
+- pc = preempt_count();
+-
+ /* disable local data, not wakeup_cpu data */
+ cpu = raw_smp_processor_id();
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
+@@ -466,6 +461,8 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
+ goto out;
+
+ local_irq_save(flags);
++ trace_ctx = tracing_gen_ctx_flags(flags);
++
+ arch_spin_lock(&wakeup_lock);
+
+ /* We could race with grabbing wakeup_lock */
+@@ -475,9 +472,9 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
+ /* The task we are waiting for is waking up */
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
+
+- __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
+- tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
+- __trace_stack(wakeup_trace, flags, 0, pc);
++ __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
++ tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
++ __trace_stack(wakeup_trace, trace_ctx, 0);
+
+ T0 = data->preempt_timestamp;
+ T1 = ftrace_now(cpu);
+@@ -529,9 +526,8 @@ probe_wakeup(void *ignore, struct task_struct *p)
+ {
+ struct trace_array_cpu *data;
+ int cpu = smp_processor_id();
+- unsigned long flags;
+ long disabled;
+- int pc;
++ unsigned int trace_ctx;
+
+ if (likely(!tracer_enabled))
+ return;
+@@ -552,11 +548,12 @@ probe_wakeup(void *ignore, struct task_struct *p)
+ (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
+ return;
+
+- pc = preempt_count();
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
+ if (unlikely(disabled != 1))
+ goto out;
+
++ trace_ctx = tracing_gen_ctx();
++
+ /* interrupts should be off from try_to_wake_up */
+ arch_spin_lock(&wakeup_lock);
+
+@@ -583,19 +580,17 @@ probe_wakeup(void *ignore, struct task_struct *p)
+
+ wakeup_task = get_task_struct(p);
+
+- local_save_flags(flags);
+-
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
+ data->preempt_timestamp = ftrace_now(cpu);
+- tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
+- __trace_stack(wakeup_trace, flags, 0, pc);
++ tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
++ __trace_stack(wakeup_trace, trace_ctx, 0);
+
+ /*
+ * We must be careful in using CALLER_ADDR2. But since wake_up
+ * is not called by an assembly function (where as schedule is)
+ * it should be safe to use it here.
+ */
+- __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
++ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
+
+ out_locked:
+ arch_spin_unlock(&wakeup_lock);
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index d85a2f0f316b..8bfcd3b09422 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -298,9 +298,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+- unsigned long irq_flags;
++ unsigned int trace_ctx;
+ unsigned long args[6];
+- int pc;
+ int syscall_nr;
+ int size;
+
+@@ -322,12 +321,11 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
+
+ size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
+
+- local_save_flags(irq_flags);
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx();
+
+ buffer = tr->array_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer,
+- sys_data->enter_event->event.type, size, irq_flags, pc);
++ sys_data->enter_event->event.type, size, trace_ctx);
+ if (!event)
+ return;
+
+@@ -337,7 +335,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
+ memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
+
+ event_trigger_unlock_commit(trace_file, buffer, event, entry,
+- irq_flags, pc);
++ trace_ctx);
+ }
+
+ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
+@@ -348,8 +346,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+- unsigned long irq_flags;
+- int pc;
++ unsigned int trace_ctx;
+ int syscall_nr;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+@@ -368,13 +365,12 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
+ if (!sys_data)
+ return;
+
+- local_save_flags(irq_flags);
+- pc = preempt_count();
++ trace_ctx = tracing_gen_ctx();
+
+ buffer = tr->array_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer,
+ sys_data->exit_event->event.type, sizeof(*entry),
+- irq_flags, pc);
++ trace_ctx);
+ if (!event)
+ return;
+
+@@ -383,7 +379,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
+ entry->ret = syscall_get_return_value(current, regs);
+
+ event_trigger_unlock_commit(trace_file, buffer, event, entry,
+- irq_flags, pc);
++ trace_ctx);
+ }
+
+ static int reg_event_syscall_enter(struct trace_event_file *file,
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 60ff36f5d7f9..0b07bb07127d 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -966,7 +966,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
+ esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
+ size = esize + tu->tp.size + dsize;
+ event = trace_event_buffer_lock_reserve(&buffer, trace_file,
+- call->event.type, size, 0, 0);
++ call->event.type, size, 0);
+ if (!event)
+ return;
+
+@@ -982,7 +982,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
+
+ memcpy(data, ucb->buf, tu->tp.size + dsize);
+
+- event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
++ event_trigger_unlock_commit(trace_file, buffer, event, entry, 0);
+ }
+
+ /* uprobe handler */
+--
+2.43.0
+