Index: kern/kern_idle.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_idle.c,v retrieving revision 1.40 diff -u -r1.40 kern_idle.c --- kern/kern_idle.c 25 Jul 2004 19:49:01 -0000 1.40 +++ kern/kern_idle.c 17 Aug 2004 01:15:22 -0000 @@ -41,6 +41,9 @@ SYSINIT(idle_setup, SI_SUB_SCHED_IDLE, SI_ORDER_FIRST, idle_setup, NULL) static void idle_proc(void *dummy); +#ifdef SMP +int idle_count; /* number of idle processors */ +#endif /* * Set up per-cpu idle process contexts. The AP's shouldn't be running or @@ -99,6 +102,9 @@ td = curthread; p = td->td_proc; +#ifdef SMP + idle_count++; +#endif for (;;) { mtx_assert(&Giant, MA_NOTOWNED); @@ -106,7 +112,13 @@ cpu_idle(); mtx_lock_spin(&sched_lock); +#ifdef SMP + idle_count--; +#endif mi_switch(SW_VOL, NULL); +#ifdef SMP + idle_count++; +#endif mtx_unlock_spin(&sched_lock); } } Index: kern/sched_4bsd.c =================================================================== RCS file: /home/ncvs/src/sys/kern/sched_4bsd.c,v retrieving revision 1.49 diff -u -r1.49 sched_4bsd.c --- kern/sched_4bsd.c 11 Aug 2004 20:54:48 -0000 1.49 +++ kern/sched_4bsd.c 17 Aug 2004 01:15:22 -0000 @@ -697,6 +697,10 @@ sched_add(struct thread *td) { struct kse *ke; +#ifdef SMP + int forwarded = 0; + int cpu; +#endif ke = td->td_kse; mtx_assert(&sched_lock, MA_OWNED); @@ -710,28 +714,41 @@ ("sched_add: process swapped out")); #ifdef SMP - /* - * Only try to preempt if the thread is unpinned or pinned to the - * current CPU. - */ - if (KSE_CAN_MIGRATE(ke) || ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)]) -#endif - if (maybe_preempt(td)) - return; - -#ifdef SMP if (KSE_CAN_MIGRATE(ke)) { - CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td); + CTR2(KTR_RUNQ, + "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td); + cpu = NOCPU; ke->ke_runq = &runq; } else { - CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p)to pcpu runq", ke, td); if (!SKE_RUNQ_PCPU(ke)) - ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)]; + ke->ke_runq = &runq_pcpu[(cpu = PCPU_GET(cpuid))]; + else + cpu = td->td_last_cpu; + CTR2(KTR_RUNQ, + "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu); } + + /* + * Only try to preempt if the thread is unpinned or pinned to the + * current CPU. But first check if another CPU can do it.. + */ + if (KSE_CAN_MIGRATE(ke) || ke->ke_runq != &runq_pcpu[PCPU_GET(cpuid)]) { + if (idle_count) { + forwarded = forward_wakeup(cpu); + } + } + if ( !forwarded && + (ke->ke_runq == &runq || + ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)])) #else CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td); ke->ke_runq = &runq; #endif + { + if (maybe_preempt(td)) + return; + } + if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_tdcnt++; runq_add(ke->ke_runq, ke); Index: kern/subr_smp.c =================================================================== RCS file: /home/ncvs/src/sys/kern/subr_smp.c,v retrieving revision 1.188 diff -u -r1.188 subr_smp.c --- kern/subr_smp.c 13 Aug 2004 00:57:43 -0000 1.188 +++ kern/subr_smp.c 17 Aug 2004 01:15:22 -0000 @@ -49,6 +49,8 @@ #include +#include "opt_sched.h" + #ifdef SMP volatile cpumask_t stopped_cpus; volatile cpumask_t started_cpus; @@ -96,6 +98,14 @@ &forward_roundrobin_enabled, 0, "Forwarding of roundrobin to all other CPUs"); +#ifdef SCHED_4BSD +/* Enable forwarding of wakeups to all other cpus */ +static int forward_wakeup_enabled = 0; +SYSCTL_INT(_kern_smp, OID_AUTO, forward_wakeup_enabled, CTLFLAG_RW, + &forward_wakeup_enabled, 0, + "Forwarding of wakeup to idle CPUs"); + +#endif /* SCHED_4BSD */ /* Variables needed for SMP rendezvous. */ static void (*smp_rv_setup_func)(void *arg); static void (*smp_rv_action_func)(void *arg); @@ -195,6 +205,59 @@ } ipi_selected(map, IPI_AST); } + +#ifdef SCHED_4BSD +enable HTT_2 if you have a 2-way HTT cpu. +int +forward_wakeup(int cpunum) +{ + struct pcpu *pc; + struct thread *td; + cpumask_t id, map, me; +#ifdef HTT_2 + cpumask_t map2; +#endif + + mtx_assert(&sched_lock, MA_OWNED); + + CTR0(KTR_SMP, "forward_wakeup()"); + + if (!forward_wakeup_enabled) + return (0); + if (!smp_started || cold || panicstr) + return (0); + map = 0; + me = PCPU_GET(cpumask); + SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { + td = pc->pc_curthread; + id = pc->pc_cpumask; + if (id != me && (id & stopped_cpus) == 0 && + td == pc->pc_idlethread) { + map |= id; + } + } + /* If we only allow one CPU, then mask off all the others */ + if (cpunum != NOCPU) { + KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); + map &= (1 << cpunum); + } +#ifdef HTT_2 + else { + /* Try choose an idle die. */ + map2 = (map & (map >> 1)) & 0x5555; + if (map2) { + map = map2; + } + } +#endif + if (map) { + ipi_selected(map, IPI_AST); + return (1); + } + printf("forward_wakeup: Idle processor not found\n"); + return (0); +} +#endif /* SCHED_4BSD */ /* * When called the executing CPU will send an IPI to all other CPUs Index: sys/smp.h =================================================================== RCS file: /home/ncvs/src/sys/sys/smp.h,v retrieving revision 1.77 diff -u -r1.77 smp.h --- sys/smp.h 27 Mar 2004 18:21:24 -0000 1.77 +++ sys/smp.h 17 Aug 2004 01:15:22 -0000 @@ -54,6 +54,7 @@ extern cpumask_t all_cpus; extern u_int mp_maxid; extern int mp_ncpus; +extern int idle_count; extern volatile int smp_started; /* @@ -92,6 +93,7 @@ void forward_signal(struct thread *); void forward_roundrobin(void); +int forward_wakeup(int cpunum); int restart_cpus(cpumask_t); int stop_cpus(cpumask_t); void smp_rendezvous_action(void);