diff -puN arch/i386/kernel/cpu/mtrr/generic.c~sched-fix-scheduling-latencies-in-mttrc arch/i386/kernel/cpu/mtrr/generic.c --- 25/arch/i386/kernel/cpu/mtrr/generic.c~sched-fix-scheduling-latencies-in-mttrc Tue Sep 14 17:42:08 2004 +++ 25-akpm/arch/i386/kernel/cpu/mtrr/generic.c Tue Sep 14 17:42:08 2004 @@ -240,11 +240,9 @@ /* Note that this is not ideal, since the cache is only flushed/disabled for this CPU while the MTRRs are changed, but changing this requires more invasive changes to the way the kernel boots */ - spin_lock(&set_atomicity_lock); /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ cr0 = read_cr0() | 0x40000000; /* set CD flag */ - wbinvd(); write_cr0(cr0); wbinvd(); @@ -266,8 +264,7 @@ static void post_set(void) { - /* Flush caches and TLBs */ - wbinvd(); + /* Flush TLBs (no need to flush caches - they are disabled) */ __flush_tlb(); /* Intel (P6) standard MTRRs */ @@ -279,12 +276,17 @@ /* Restore value of CR4 */ if ( cpu_has_pge ) write_cr4(cr4); - spin_unlock(&set_atomicity_lock); } static void generic_set_all(void) { - unsigned long mask, count; + unsigned long mask, count, flags; + + /* + * Since we are disabling the cache dont allow any interrupts - they + * would run extremely slow and would only increase the pain: + */ + spin_lock_irqsave(&set_atomicity_lock, flags); prepare_set(); @@ -293,6 +295,8 @@ post_set(); + spin_unlock_irqrestore(&set_atomicity_lock, flags); + /* Use the atomic bitops to update the global mask */ for (count = 0; count < sizeof mask * 8; ++count) { if (mask & 0x01)