Index: sys/i386/i386/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/mp_machdep.c,v retrieving revision 1.115.2.20 diff -u -p -r1.115.2.20 mp_machdep.c --- sys/i386/i386/mp_machdep.c 16 Dec 2003 16:32:28 -0000 1.115.2.20 +++ sys/i386/i386/mp_machdep.c 12 May 2005 19:02:40 -0000 @@ -342,6 +342,9 @@ static int apic_int_is_bus_type(int intr static int hlt_cpus_mask; static int hlt_logical_cpus = 1; +static u_int hyperthreading_cpus; +static u_int hyperthreading_cpus_mask; +static int hyperthreading_allowed; static struct sysctl_ctx_list logical_cpu_clist; /* @@ -982,6 +985,9 @@ mptable_pass2(void) proc.apic_id++; (void)processor_entry(&proc, cpu); logical_cpus_mask |= (1 << cpu); + if (hyperthreading_cpus > 1 && + proc.apic_id % hyperthreading_cpus != 0) + hyperthreading_cpus_mask |= (1 << cpu); cpu++; } } @@ -1033,6 +1039,7 @@ static void mptable_hyperthread_fixup(u_int id_mask) { u_int i, id; + u_int threads_per_cache, p[4]; /* Nothing to do if there is no HTT support. */ if ((cpu_feature & CPUID_HTT) == 0) @@ -1042,6 +1049,48 @@ mptable_hyperthread_fixup(u_int id_mask) return; /* + * Work out if hyperthreading is *really* enabled. This + * is made really ugly by the fact that processors lie: Dual + * core processors claim to be hyperthreaded even when they're + * not, presumably because they want to be treated the same + * way as HTT with respect to per-cpu software licensing. + * At the time of writing (May 12, 2005) the only hyperthreaded + * cpus are from Intel, and Intel's dual-core processors can be + * identified via the "deterministic cache parameters" cpuid + * calls. + */ + /* + * First determine if this is an Intel processor which claims + * to have hyperthreading support. + */ + if ((cpu_feature & CPUID_HTT) && + (strcmp(cpu_vendor, "GenuineIntel") == 0)) { + /* + * If the "deterministic cache parameters" cpuid calls + * are available, use them. + */ + if (cpu_high >= 4) { + /* Ask the processor about up to 32 caches. */ + for (i = 0; i < 32; i++) { + cpuid_count(4, i, p); + threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1; + if (hyperthreading_cpus < threads_per_cache) + hyperthreading_cpus = threads_per_cache; + if ((p[0] & 0x1f) == 0) + break; + } + } + + /* + * If the deterministic cache parameters are not + * available, or if no caches were reported to exist, + * just accept what the HTT flag indicated. + */ + if (hyperthreading_cpus == 0) + hyperthreading_cpus = logical_cpus; + } + + /* * For each APIC ID of a CPU that is set in the mask, * scan the other candidate APIC ID's for this * physical processor. If any of those ID's are @@ -3035,6 +3084,9 @@ sysctl_htl_cpus(SYSCTL_HANDLER_ARGS) else hlt_logical_cpus = 0; + if (! hyperthreading_allowed) + mask |= hyperthreading_cpus_mask; + if ((mask & all_cpus) == all_cpus) mask &= ~(1<<0); hlt_cpus_mask = mask; @@ -3058,6 +3110,9 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_A else hlt_cpus_mask &= ~logical_cpus_mask; + if (! hyperthreading_allowed) + hlt_cpus_mask |= hyperthreading_cpus_mask; + if ((hlt_cpus_mask & all_cpus) == all_cpus) hlt_cpus_mask &= ~(1<<0); @@ -3065,6 +3120,34 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_A return (error); } +static int +sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS) +{ + int allowed, error; + + allowed = hyperthreading_allowed; + error = sysctl_handle_int(oidp, &allowed, 0, req); + if (error || !req->newptr) + return (error); + + if (allowed) + hlt_cpus_mask &= ~hyperthreading_cpus_mask; + else + hlt_cpus_mask |= hyperthreading_cpus_mask; + + if (logical_cpus_mask != 0 && + (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask) + hlt_logical_cpus = 1; + else + hlt_logical_cpus = 0; + + if ((hlt_cpus_mask & all_cpus) == all_cpus) + hlt_cpus_mask &= ~(1<<0); + + hyperthreading_allowed = allowed; + return (error); +} + static void cpu_hlt_setup(void *dummy __unused) { @@ -3084,6 +3167,22 @@ cpu_hlt_setup(void *dummy __unused) if (hlt_logical_cpus) hlt_cpus_mask |= logical_cpus_mask; + + /* + * If necessary for security purposes, force + * hyperthreading off, regardless of the value + * of hlt_logical_cpus. + */ + if (hyperthreading_cpus_mask) { + TUNABLE_INT_FETCH("machdep.hyperthreading_allowed", + &hyperthreading_allowed); + SYSCTL_ADD_PROC(&logical_cpu_clist, + SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, + "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW, + 0, 0, sysctl_hyperthreading_allowed, "IU", ""); + if (! hyperthreading_allowed) + hlt_cpus_mask |= hyperthreading_cpus_mask; + } } } SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL); Index: sys/i386/include/cpufunc.h =================================================================== RCS file: /home/ncvs/src/sys/i386/include/cpufunc.h,v retrieving revision 1.96.2.3 diff -u -p -r1.96.2.3 cpufunc.h --- sys/i386/include/cpufunc.h 28 Apr 2002 22:50:54 -0000 1.96.2.3 +++ sys/i386/include/cpufunc.h 12 May 2005 19:02:40 -0000 @@ -103,6 +103,14 @@ do_cpuid(u_int ax, u_int *p) } static __inline void +cpuid_count(u_int ax, u_int cx, u_int *p) +{ + __asm __volatile("cpuid" + : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) + : "0" (ax), "c" (cx)); +} + +static __inline void enable_intr(void) { #ifdef SMP