diff -Nupr src.orig/fs/proc/array.c src/fs/proc/array.c --- src.orig/fs/proc/array.c 2016-12-15 19:55:39.080000000 +0000 +++ src/fs/proc/array.c 2016-12-15 19:58:00.840000000 +0000 @@ -334,13 +334,20 @@ static inline void task_seccomp(struct s #endif } +#include "kpatch.h" static inline void task_context_switch_counts(struct seq_file *m, struct task_struct *p) { + int *newpid; + seq_printf(m, "voluntary_ctxt_switches:\t%lu\n" "nonvoluntary_ctxt_switches:\t%lu\n", p->nvcsw, p->nivcsw); + + newpid = kpatch_shadow_get(p, "newpid"); + if (newpid) + seq_printf(m, "newpid:\t%d\n", *newpid); } static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) diff -Nupr src.orig/kernel/exit.c src/kernel/exit.c --- src.orig/kernel/exit.c 2016-12-15 19:56:00.184000000 +0000 +++ src/kernel/exit.c 2016-12-15 19:58:00.840000000 +0000 @@ -650,6 +650,7 @@ static void check_stack_usage(void) static inline void check_stack_usage(void) {} #endif +#include "kpatch.h" void do_exit(long code) { struct task_struct *tsk = current; @@ -758,6 +759,8 @@ void do_exit(long code) cgroup_exit(tsk); + kpatch_shadow_free(tsk, "newpid"); + /* * FIXME: do that only when needed, using sched_exit tracepoint */ diff -Nupr src.orig/kernel/fork.c src/kernel/fork.c --- src.orig/kernel/fork.c 2016-12-15 19:56:00.184000000 +0000 +++ src/kernel/fork.c 2016-12-15 19:58:00.840000000 +0000 @@ -1726,6 +1726,7 @@ struct task_struct *fork_idle(int cpu) * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ +#include "kpatch.h" long _do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, @@ -1764,6 +1765,13 @@ long _do_fork(unsigned long clone_flags, if (!IS_ERR(p)) { struct completion vfork; struct pid *pid; + int *newpid; + static int ctr = 0; + + newpid = kpatch_shadow_alloc(p, "newpid", sizeof(*newpid), + GFP_KERNEL); + if (newpid) + *newpid = ctr++; trace_sched_process_fork(current, p);