diff -Nupr src.orig/fs/proc/array.c src/fs/proc/array.c --- src.orig/fs/proc/array.c 2020-09-03 11:48:30.496726119 -0400 +++ src/fs/proc/array.c 2020-09-03 11:49:11.696855012 -0400 @@ -395,13 +395,20 @@ static inline void task_seccomp(struct s seq_putc(m, '\n'); } +#include static inline void task_context_switch_counts(struct seq_file *m, struct task_struct *p) { + int *newpid; + seq_printf(m, "voluntary_ctxt_switches:\t%lu\n" "nonvoluntary_ctxt_switches:\t%lu\n", p->nvcsw, p->nivcsw); + + newpid = klp_shadow_get(p, 0); + if (newpid) + seq_printf(m, "newpid:\t%d\n", *newpid); } static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) diff -Nupr src.orig/kernel/exit.c src/kernel/exit.c --- src.orig/kernel/exit.c 2020-09-03 11:48:30.717726811 -0400 +++ src/kernel/exit.c 2020-09-03 11:49:11.696855012 -0400 @@ -791,6 +791,7 @@ static void check_stack_usage(void) static inline void check_stack_usage(void) {} #endif +#include void do_exit(long code) { struct task_struct *tsk = current; @@ -888,6 +889,8 @@ void do_exit(long code) check_stack_usage(); exit_thread(); + klp_shadow_free(tsk, 0, NULL); + /* * Flush inherited counters to the parent - before the parent * gets woken up by child-exit notifications. diff -Nupr src.orig/kernel/fork.c src/kernel/fork.c --- src.orig/kernel/fork.c 2020-09-03 11:48:30.717726811 -0400 +++ src/kernel/fork.c 2020-09-03 11:49:11.697855015 -0400 @@ -1784,6 +1784,7 @@ struct task_struct *fork_idle(int cpu) * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ +#include long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, @@ -1821,6 +1822,13 @@ long do_fork(unsigned long clone_flags, if (!IS_ERR(p)) { struct completion vfork; struct pid *pid; + int *newpid; + static int ctr = 0; + + newpid = klp_shadow_get_or_alloc(p, 0, sizeof(*newpid), GFP_KERNEL, + NULL, NULL); + if (newpid) + *newpid = ctr++; trace_sched_process_fork(current, p);