diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-09-18 17:44:48 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-09-18 17:44:48 +0200 |
commit | 52ecde3d33045a7ceae795d3b48e1ed79ad86841 (patch) | |
tree | e4d34b98bcd8203bd1b80c0995524f7bc104cfe2 | |
parent | 076be2470c2be51f352654ba09f1b2c60d232609 (diff) | |
parent | 82c07cbbdf7d44564497122eb6d984ffe2497fa3 (diff) |
Merge branch 'rt/head' into rt/2.6.31
-rw-r--r-- | arch/x86/Kconfig | 4 | ||||
-rw-r--r-- | kernel/latencytop.c | 14 |
2 files changed, 10 insertions, 8 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2ddf28660323..bc240958bdac 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -281,6 +281,7 @@ config X86_X2APIC config SPARSE_IRQ bool "Support sparse irq numbering" depends on PCI_MSI || HT_IRQ + depends on !PREEMPT_RT ---help--- This enables support for sparse irqs. This is useful for distro kernels that want to define a high CONFIG_NR_CPUS value but still @@ -1902,7 +1903,7 @@ config PCI_MMCONFIG config DMAR bool "Support for DMA Remapping Devices (EXPERIMENTAL)" - depends on PCI_MSI && ACPI && EXPERIMENTAL + depends on PCI_MSI && ACPI && EXPERIMENTAL && !PREEMPT_RT help DMA remapping (DMAR) devices support enables independent address translations for Direct Memory Access (DMA) from devices. @@ -1945,6 +1946,7 @@ config DMAR_FLOPPY_WA config INTR_REMAP bool "Support for Interrupt Remapping (EXPERIMENTAL)" depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL + depends on !PREEMPT_RT ---help--- Supports Interrupt remapping for IO-APIC and MSI devices. To use x2apic mode in the CPU's which support x2APIC enhancements or diff --git a/kernel/latencytop.c b/kernel/latencytop.c index ca07c5c0c914..34311e1d0572 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -59,7 +59,7 @@ #include <linux/slab.h> #include <linux/stacktrace.h> -static DEFINE_SPINLOCK(latency_lock); +static DEFINE_ATOMIC_SPINLOCK(latency_lock); #define MAXLR 128 static struct latency_record latency_record[MAXLR]; @@ -73,19 +73,19 @@ void clear_all_latency_tracing(struct task_struct *p) if (!latencytop_enabled) return; - spin_lock_irqsave(&latency_lock, flags); + atomic_spin_lock_irqsave(&latency_lock, flags); memset(&p->latency_record, 0, sizeof(p->latency_record)); p->latency_record_count = 0; - spin_unlock_irqrestore(&latency_lock, flags); + atomic_spin_unlock_irqrestore(&latency_lock, flags); } static void clear_global_latency_tracing(void) { unsigned long flags; - spin_lock_irqsave(&latency_lock, flags); + atomic_spin_lock_irqsave(&latency_lock, flags); memset(&latency_record, 0, sizeof(latency_record)); - spin_unlock_irqrestore(&latency_lock, flags); + atomic_spin_unlock_irqrestore(&latency_lock, flags); } static void __sched @@ -191,7 +191,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) lat.max = usecs; store_stacktrace(tsk, &lat); - spin_lock_irqsave(&latency_lock, flags); + atomic_spin_lock_irqsave(&latency_lock, flags); account_global_scheduler_latency(tsk, &lat); @@ -233,7 +233,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); out_unlock: - spin_unlock_irqrestore(&latency_lock, flags); + atomic_spin_unlock_irqrestore(&latency_lock, flags); } static int lstats_show(struct seq_file *m, void *v) |