Index: sys/kern/sched_4bsd.c =================================================================== --- sys/kern/sched_4bsd.c (revision 202541) +++ sys/kern/sched_4bsd.c (working copy) @@ -920,6 +920,7 @@ void sched_switch(struct thread *td, struct thread *newtd, int flags) { + struct mtx *tmtx; struct td_sched *ts; struct proc *p; @@ -931,10 +932,14 @@ /* * Switch to the sched lock to fix things up and pick * a new thread. + * Switch the interlock to be sched_lock again in order to avoid + * breaking of the critical path for threads willing to lock + * on td_lock. */ - if (td->td_lock != &sched_lock) { + tmtx = td->td_lock; + if (tmtx != &sched_lock) { mtx_lock_spin(&sched_lock); - thread_unlock(td); + thread_lock_set(td, &sched_lock); } if ((td->td_flags & TDF_NOLOAD) == 0) @@ -1004,7 +1009,7 @@ (*dtrace_vtime_switch_func)(newtd); #endif - cpu_switch(td, newtd, td->td_lock); + cpu_switch(td, newtd, tmtx); lock_profile_obtain_lock_success(&sched_lock.lock_object, 0, 0, __FILE__, __LINE__); /*