| diff --git a/kernel/softlockup.c b/kernel/softlockup.c |
| index 4b493f6..ada1fcd 100644 |
| |
| |
| @@ -187,7 +187,9 @@ static int watchdog(void *__bind_cpu) |
| { |
| struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
| |
| + rcu_read_lock(); |
| sched_setscheduler(current, SCHED_FIFO, ¶m); |
| + rcu_read_unlock(); |
| |
| /* initialize timestamp */ |
| __touch_softlockup_watchdog(); |
| diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c |
| index 5a5ea2c..47ecc56 100644 |
| |
| |
| @@ -1272,6 +1272,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
| * effect of the currently running task from the load |
| * of the current CPU: |
| */ |
| + |
| + rcu_read_lock(); |
| + |
| if (sync) { |
| tg = task_group(current); |
| weight = current->se.load.weight; |
| @@ -1298,6 +1301,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
| 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <= |
| imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); |
| |
| + rcu_read_unlock(); |
| /* |
| * If the currently running task will sleep within |
| * a reasonable amount of time then attract this newly |