[amd64] bump rt feature set to 3.2-rc4-rt5

svn path=/dists/trunk/linux-2.6/; revision=18344
This commit is contained in:
Uwe Kleine-König 2011-12-03 15:30:58 +00:00
parent 0a1d59767e
commit 94ef22ed4b
13 changed files with 1157 additions and 74 deletions

2
debian/changelog vendored
View File

@ -13,7 +13,7 @@ linux-2.6 (3.2~rc4-1~experimental.1) UNRELEASED; urgency=low
* [ia64,powerpc,sparc,x86] wireless: Enable MWIFIEX_PCIE as module
[ Uwe Kleine-König ]
* [amd64] reenable rt featureset with 3.2-rc2-rt3
* [amd64] reenable rt featureset with 3.2-rc4-rt5
-- Ben Hutchings <ben@decadent.org.uk> Mon, 14 Nov 2011 15:21:10 +0000

View File

@ -0,0 +1,406 @@
Subject: acpi: Make gbl_[hardware|gpe]_lock raw
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 28 Nov 2011 17:09:54 +0100
These locks are taken in the guts of the idle code and cannot be
converted to "sleeping" spinlocks on RT
Cc: stable-rt@vger.kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/acpi/acpica/acglobal.h | 4 ++--
drivers/acpi/acpica/evgpe.c | 4 ++--
drivers/acpi/acpica/evgpeblk.c | 8 ++++----
drivers/acpi/acpica/evgpeutil.c | 12 ++++++------
drivers/acpi/acpica/evxface.c | 10 +++++-----
drivers/acpi/acpica/evxfgpe.c | 24 ++++++++++++------------
drivers/acpi/acpica/hwregs.c | 4 ++--
drivers/acpi/acpica/hwxface.c | 4 ++--
drivers/acpi/acpica/utmutex.c | 21 +++------------------
9 files changed, 38 insertions(+), 53 deletions(-)
Index: linux-3.2/drivers/acpi/acpica/acglobal.h
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/acglobal.h
+++ linux-3.2/drivers/acpi/acpica/acglobal.h
@@ -235,8 +235,8 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pend
* Spinlocks are used for interfaces that can be possibly called at
* interrupt level
*/
-ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+extern raw_spinlock_t acpi_gbl_gpe_lock; /* For GPE data structs and registers */
+extern raw_spinlock_t acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
/*****************************************************************************
*
Index: linux-3.2/drivers/acpi/acpica/evgpe.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evgpe.c
+++ linux-3.2/drivers/acpi/acpica/evgpe.c
@@ -357,7 +357,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_x
* Note: Not necessary to obtain the hardware lock, since the GPE
* registers are owned by the gpe_lock.
*/
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Examine all GPE blocks attached to this interrupt level */
@@ -440,7 +440,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_x
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return (int_status);
}
Index: linux-3.2/drivers/acpi/acpica/evgpeblk.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evgpeblk.c
+++ linux-3.2/drivers/acpi/acpica/evgpeblk.c
@@ -95,7 +95,7 @@ acpi_ev_install_gpe_block(struct acpi_gp
/* Install the new block at the end of the list with lock */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
if (gpe_xrupt_block->gpe_block_list_head) {
next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
while (next_gpe_block->next) {
@@ -109,7 +109,7 @@ acpi_ev_install_gpe_block(struct acpi_gp
}
gpe_block->xrupt_block = gpe_xrupt_block;
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -156,7 +156,7 @@ acpi_status acpi_ev_delete_gpe_block(str
} else {
/* Remove the block on this interrupt with lock */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
if (gpe_block->previous) {
gpe_block->previous->next = gpe_block->next;
} else {
@@ -167,7 +167,7 @@ acpi_status acpi_ev_delete_gpe_block(str
if (gpe_block->next) {
gpe_block->next->previous = gpe_block->previous;
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
}
acpi_current_gpe_count -= gpe_block->gpe_count;
Index: linux-3.2/drivers/acpi/acpica/evgpeutil.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evgpeutil.c
+++ linux-3.2/drivers/acpi/acpica/evgpeutil.c
@@ -70,7 +70,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback
ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Walk the interrupt level descriptor list */
@@ -101,7 +101,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback
}
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -237,7 +237,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_
/* Install new interrupt descriptor with spin lock */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
if (acpi_gbl_gpe_xrupt_list_head) {
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
while (next_gpe_xrupt->next) {
@@ -249,7 +249,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_
} else {
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
/* Install new interrupt handler if not SCI_INT */
@@ -306,7 +306,7 @@ acpi_status acpi_ev_delete_gpe_xrupt(str
/* Unlink the interrupt block with lock */
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
if (gpe_xrupt->previous) {
gpe_xrupt->previous->next = gpe_xrupt->next;
} else {
@@ -318,7 +318,7 @@ acpi_status acpi_ev_delete_gpe_xrupt(str
if (gpe_xrupt->next) {
gpe_xrupt->next->previous = gpe_xrupt->previous;
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
/* Free the block */
Index: linux-3.2/drivers/acpi/acpica/evxface.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evxface.c
+++ linux-3.2/drivers/acpi/acpica/evxface.c
@@ -751,7 +751,7 @@ acpi_install_gpe_handler(acpi_handle gpe
goto unlock_and_exit;
}
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -799,14 +799,14 @@ acpi_install_gpe_handler(acpi_handle gpe
~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
free_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
ACPI_FREE(handler);
goto unlock_and_exit;
}
@@ -853,7 +853,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_
return_ACPI_STATUS(status);
}
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -904,7 +904,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_
ACPI_FREE(handler);
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
Index: linux-3.2/drivers/acpi/acpica/evxfgpe.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/evxfgpe.c
+++ linux-3.2/drivers/acpi/acpica/evxfgpe.c
@@ -122,7 +122,7 @@ acpi_status acpi_enable_gpe(acpi_handle
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -131,7 +131,7 @@ acpi_status acpi_enable_gpe(acpi_handle
status = acpi_ev_add_gpe_reference(gpe_event_info);
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
@@ -159,7 +159,7 @@ acpi_status acpi_disable_gpe(acpi_handle
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -168,7 +168,7 @@ acpi_status acpi_disable_gpe(acpi_handle
status = acpi_ev_remove_gpe_reference(gpe_event_info) ;
}
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
@@ -215,7 +215,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -271,7 +271,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake
status = AE_OK;
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
@@ -301,7 +301,7 @@ acpi_status acpi_set_gpe_wake_mask(acpi_
ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/*
* Ensure that we have a valid GPE number and that this GPE is in
@@ -347,7 +347,7 @@ acpi_status acpi_set_gpe_wake_mask(acpi_
}
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -373,7 +373,7 @@ acpi_status acpi_clear_gpe(acpi_handle g
ACPI_FUNCTION_TRACE(acpi_clear_gpe);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -386,7 +386,7 @@ acpi_status acpi_clear_gpe(acpi_handle g
status = acpi_hw_clear_gpe(gpe_event_info);
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -416,7 +416,7 @@ acpi_get_gpe_status(acpi_handle gpe_devi
ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_gpe_lock, flags);
/* Ensure that we have a valid GPE number */
@@ -434,7 +434,7 @@ acpi_get_gpe_status(acpi_handle gpe_devi
*event_status |= ACPI_EVENT_FLAG_HANDLE;
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
Index: linux-3.2/drivers/acpi/acpica/hwregs.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/hwregs.c
+++ linux-3.2/drivers/acpi/acpica/hwregs.c
@@ -263,14 +263,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_hardware_lock, lock_flags);
/* Clear the fixed events in PM1 A/B */
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
ACPI_BITMASK_ALL_FIXED_STATUS);
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_hardware_lock, lock_flags);
if (ACPI_FAILURE(status))
goto exit;
Index: linux-3.2/drivers/acpi/acpica/hwxface.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/hwxface.c
+++ linux-3.2/drivers/acpi/acpica/hwxface.c
@@ -387,7 +387,7 @@ acpi_status acpi_write_bit_register(u32
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+ raw_spin_lock_irqsave(&acpi_gbl_hardware_lock, lock_flags);
/*
* At this point, we know that the parent register is one of the
@@ -448,7 +448,7 @@ acpi_status acpi_write_bit_register(u32
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
Index: linux-3.2/drivers/acpi/acpica/utmutex.c
===================================================================
--- linux-3.2.orig/drivers/acpi/acpica/utmutex.c
+++ linux-3.2/drivers/acpi/acpica/utmutex.c
@@ -52,6 +52,9 @@ static acpi_status acpi_ut_create_mutex(
static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id);
+DEFINE_RAW_SPINLOCK(acpi_gbl_gpe_lock);
+DEFINE_RAW_SPINLOCK(acpi_gbl_hardware_lock);
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_mutex_initialize
@@ -81,18 +84,6 @@ acpi_status acpi_ut_mutex_initialize(voi
}
}
- /* Create the spinlocks for use at interrupt level */
-
- status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
- if (ACPI_FAILURE (status)) {
- return_ACPI_STATUS (status);
- }
-
- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
- if (ACPI_FAILURE (status)) {
- return_ACPI_STATUS (status);
- }
-
/* Mutex for _OSI support */
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
if (ACPI_FAILURE(status)) {
@@ -132,13 +123,7 @@ void acpi_ut_mutex_terminate(void)
acpi_os_delete_mutex(acpi_gbl_osi_mutex);
- /* Delete the spinlocks */
-
- acpi_os_delete_lock(acpi_gbl_gpe_lock);
- acpi_os_delete_lock(acpi_gbl_hardware_lock);
-
/* Delete the reader/writer lock */
-
acpi_ut_delete_rw_lock(&acpi_gbl_namespace_rw_lock);
return_VOID;
}

View File

@ -10,7 +10,7 @@ delivery problem for real.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[bwh: Adjust context in __remove_hrtimer() for v3.2-rc4]
---
include/linux/hrtimer.h | 3
kernel/hrtimer.c | 190 ++++++++++++++++++++++++++++++++++++++++++-----
@ -118,7 +118,7 @@ Index: linux-3.2/kernel/hrtimer.c
}
#else
@@ -915,6 +944,11 @@ static void __remove_hrtimer(struct hrti
@@ -916,6 +945,11 @@ static void __remove_hrtimer(struct hrti
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
@ -130,7 +130,7 @@ Index: linux-3.2/kernel/hrtimer.c
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
@@ -1176,6 +1210,7 @@ static void __hrtimer_init(struct hrtime
@@ -1178,6 +1212,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@ -138,7 +138,7 @@ Index: linux-3.2/kernel/hrtimer.c
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
@@ -1259,10 +1294,118 @@ static void __run_hrtimer(struct hrtimer
@@ -1261,10 +1296,118 @@ static void __run_hrtimer(struct hrtimer
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
@ -259,7 +259,7 @@ Index: linux-3.2/kernel/hrtimer.c
/*
* High resolution timer interrupt
* Called with interrupts disabled
@@ -1271,7 +1414,7 @@ void hrtimer_interrupt(struct clock_even
@@ -1273,7 +1416,7 @@ void hrtimer_interrupt(struct clock_even
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
@ -268,7 +268,7 @@ Index: linux-3.2/kernel/hrtimer.c
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1338,7 +1481,10 @@ retry:
@@ -1340,7 +1483,10 @@ retry:
break;
}
@ -280,7 +280,7 @@ Index: linux-3.2/kernel/hrtimer.c
}
}
@@ -1353,6 +1499,10 @@ retry:
@@ -1355,6 +1501,10 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
@ -291,7 +291,7 @@ Index: linux-3.2/kernel/hrtimer.c
return;
}
@@ -1428,17 +1578,17 @@ void hrtimer_peek_ahead_timers(void)
@@ -1430,17 +1580,17 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
@ -314,7 +314,7 @@ Index: linux-3.2/kernel/hrtimer.c
/*
* Called from timer softirq every jiffy, expire hrtimers:
*
@@ -1471,7 +1621,7 @@ void hrtimer_run_queues(void)
@@ -1473,7 +1623,7 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
@ -323,7 +323,7 @@ Index: linux-3.2/kernel/hrtimer.c
if (hrtimer_hres_active())
return;
@@ -1496,12 +1646,16 @@ void hrtimer_run_queues(void)
@@ -1498,12 +1648,16 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
@ -342,7 +342,7 @@ Index: linux-3.2/kernel/hrtimer.c
}
/*
@@ -1523,6 +1677,7 @@ static enum hrtimer_restart hrtimer_wake
@@ -1525,6 +1679,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@ -350,7 +350,7 @@ Index: linux-3.2/kernel/hrtimer.c
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -1661,6 +1816,7 @@ static void __cpuinit init_hrtimers_cpu(
@@ -1663,6 +1818,7 @@ static void __cpuinit init_hrtimers_cpu(
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@ -358,7 +358,7 @@ Index: linux-3.2/kernel/hrtimer.c
}
hrtimer_init_hres(cpu_base);
@@ -1779,9 +1935,7 @@ void __init hrtimers_init(void)
@@ -1781,9 +1937,7 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);

View File

@ -14,4 +14,4 @@ Index: linux-3.2/localversion-rt
--- /dev/null
+++ linux-3.2/localversion-rt
@@ -0,0 +1 @@
+-rt3
+-rt5

View File

@ -16,15 +16,15 @@ Index: linux-3.2/mm/slab.c
===================================================================
--- linux-3.2.orig/mm/slab.c
+++ linux-3.2/mm/slab.c
@@ -719,6 +719,7 @@ static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
@@ -723,6 +723,7 @@ static void slab_set_debugobj_lock_class
#endif
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
+static DEFINE_PER_CPU(struct list_head, slab_free_list);
static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
#ifndef CONFIG_PREEMPT_RT_BASE
@@ -734,14 +735,39 @@ slab_on_each_cpu(void (*func)(void *arg,
@@ -738,14 +739,39 @@ slab_on_each_cpu(void (*func)(void *arg,
{
unsigned int i;
@ -68,7 +68,7 @@ Index: linux-3.2/mm/slab.c
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
return cachep->array[smp_processor_id()];
@@ -1226,7 +1252,7 @@ static void __cpuinit cpuup_canceled(lon
@@ -1230,7 +1256,7 @@ static void __cpuinit cpuup_canceled(lon
free_block(cachep, nc->entry, nc->avail, node);
if (!cpumask_empty(mask)) {
@ -77,7 +77,7 @@ Index: linux-3.2/mm/slab.c
goto free_array_cache;
}
@@ -1240,7 +1266,7 @@ static void __cpuinit cpuup_canceled(lon
@@ -1244,7 +1270,7 @@ static void __cpuinit cpuup_canceled(lon
alien = l3->alien;
l3->alien = NULL;
@ -86,7 +86,7 @@ Index: linux-3.2/mm/slab.c
kfree(shared);
if (alien) {
@@ -1521,6 +1547,8 @@ void __init kmem_cache_init(void)
@@ -1525,6 +1551,8 @@ void __init kmem_cache_init(void)
use_alien_caches = 0;
local_irq_lock_init(slab_lock);
@ -95,7 +95,7 @@ Index: linux-3.2/mm/slab.c
for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]);
@@ -1798,12 +1826,14 @@ static void *kmem_getpages(struct kmem_c
@@ -1803,12 +1831,14 @@ static void *kmem_getpages(struct kmem_c
/*
* Interface to system's page release.
*/
@ -112,7 +112,7 @@ Index: linux-3.2/mm/slab.c
kmemcheck_free_shadow(page, cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1819,7 +1849,13 @@ static void kmem_freepages(struct kmem_c
@@ -1824,7 +1854,13 @@ static void kmem_freepages(struct kmem_c
}
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
@ -127,7 +127,7 @@ Index: linux-3.2/mm/slab.c
}
static void kmem_rcu_free(struct rcu_head *head)
@@ -1827,7 +1863,7 @@ static void kmem_rcu_free(struct rcu_hea
@@ -1832,7 +1868,7 @@ static void kmem_rcu_free(struct rcu_hea
struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
struct kmem_cache *cachep = slab_rcu->cachep;
@ -136,7 +136,7 @@ Index: linux-3.2/mm/slab.c
if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slab_rcu);
}
@@ -2046,7 +2082,8 @@ static void slab_destroy_debugcheck(stru
@@ -2051,7 +2087,8 @@ static void slab_destroy_debugcheck(stru
* Before calling the slab must have been unlinked from the cache. The
* cache-lock is not held/needed.
*/
@ -146,7 +146,7 @@ Index: linux-3.2/mm/slab.c
{
void *addr = slabp->s_mem - slabp->colouroff;
@@ -2059,7 +2096,7 @@ static void slab_destroy(struct kmem_cac
@@ -2064,7 +2101,7 @@ static void slab_destroy(struct kmem_cac
slab_rcu->addr = addr;
call_rcu(&slab_rcu->head, kmem_rcu_free);
} else {
@ -155,7 +155,7 @@ Index: linux-3.2/mm/slab.c
if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slabp);
}
@@ -2579,9 +2616,15 @@ static void do_drain(void *arg)
@@ -2586,9 +2623,15 @@ static void do_drain(void *arg)
__do_drain(arg, smp_processor_id());
}
#else
@ -173,7 +173,7 @@ Index: linux-3.2/mm/slab.c
}
#endif
@@ -2639,7 +2682,7 @@ static int drain_freelist(struct kmem_ca
@@ -2646,7 +2689,7 @@ static int drain_freelist(struct kmem_ca
*/
l3->free_objects -= cache->num;
local_spin_unlock_irq(slab_lock, &l3->list_lock);
@ -182,7 +182,7 @@ Index: linux-3.2/mm/slab.c
nr_freed++;
}
out:
@@ -2974,7 +3017,7 @@ static int cache_grow(struct kmem_cache
@@ -2981,7 +3024,7 @@ static int cache_grow(struct kmem_cache
spin_unlock(&l3->list_lock);
return 1;
opps1:
@ -191,7 +191,7 @@ Index: linux-3.2/mm/slab.c
failed:
if (local_flags & __GFP_WAIT)
local_lock_irq(slab_lock);
@@ -3624,7 +3667,7 @@ static void free_block(struct kmem_cache
@@ -3631,7 +3674,7 @@ static void free_block(struct kmem_cache
* a different cache, refer to comments before
* alloc_slabmgmt.
*/
@ -200,7 +200,7 @@ Index: linux-3.2/mm/slab.c
} else {
list_add(&slabp->list, &l3->slabs_free);
}
@@ -3892,7 +3935,7 @@ void kmem_cache_free(struct kmem_cache *
@@ -3899,7 +3942,7 @@ void kmem_cache_free(struct kmem_cache *
debug_check_no_obj_freed(objp, obj_size(cachep));
local_lock_irqsave(slab_lock, flags);
__cache_free(cachep, objp, __builtin_return_address(0));
@ -209,7 +209,7 @@ Index: linux-3.2/mm/slab.c
trace_kmem_cache_free(_RET_IP_, objp);
}
@@ -3922,7 +3965,7 @@ void kfree(const void *objp)
@@ -3929,7 +3972,7 @@ void kfree(const void *objp)
debug_check_no_obj_freed(objp, obj_size(c));
local_lock_irqsave(slab_lock, flags);
__cache_free(c, (void *)objp, __builtin_return_address(0));
@ -218,7 +218,7 @@ Index: linux-3.2/mm/slab.c
}
EXPORT_SYMBOL(kfree);
@@ -3978,7 +4021,8 @@ static int alloc_kmemlist(struct kmem_ca
@@ -3985,7 +4028,8 @@ static int alloc_kmemlist(struct kmem_ca
}
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
@ -228,7 +228,7 @@ Index: linux-3.2/mm/slab.c
kfree(shared);
free_alien_cache(new_alien);
continue;
@@ -4044,7 +4088,9 @@ static void do_ccupdate_local(void *info
@@ -4051,7 +4095,9 @@ static void do_ccupdate_local(void *info
#else
static void do_ccupdate_local(void *info, int cpu)
{
@ -238,7 +238,7 @@ Index: linux-3.2/mm/slab.c
}
#endif
@@ -4086,8 +4132,8 @@ static int do_tune_cpucache(struct kmem_
@@ -4093,8 +4139,8 @@ static int do_tune_cpucache(struct kmem_
local_spin_lock_irq(slab_lock,
&cachep->nodelists[cpu_to_mem(i)]->list_lock);
free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));

View File

@ -19,8 +19,8 @@ Index: linux-3.2/mm/slab.c
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -718,12 +719,40 @@ static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
@@ -722,12 +723,40 @@ static void slab_set_debugobj_lock_class
#endif
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
+static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
@ -60,7 +60,7 @@ Index: linux-3.2/mm/slab.c
static inline struct kmem_cache *__find_general_cachep(size_t size,
gfp_t gfpflags)
{
@@ -1061,9 +1090,10 @@ static void reap_alien(struct kmem_cache
@@ -1065,9 +1094,10 @@ static void reap_alien(struct kmem_cache
if (l3->alien) {
struct array_cache *ac = l3->alien[node];
@ -73,7 +73,7 @@ Index: linux-3.2/mm/slab.c
}
}
}
@@ -1078,9 +1108,9 @@ static void drain_alien_cache(struct kme
@@ -1082,9 +1112,9 @@ static void drain_alien_cache(struct kme
for_each_online_node(i) {
ac = alien[i];
if (ac) {
@ -85,7 +85,7 @@ Index: linux-3.2/mm/slab.c
}
}
}
@@ -1159,11 +1189,11 @@ static int init_cache_nodelists_node(int
@@ -1163,11 +1193,11 @@ static int init_cache_nodelists_node(int
cachep->nodelists[node] = l3;
}
@ -99,7 +99,7 @@ Index: linux-3.2/mm/slab.c
}
return 0;
}
@@ -1188,7 +1218,7 @@ static void __cpuinit cpuup_canceled(lon
@@ -1192,7 +1222,7 @@ static void __cpuinit cpuup_canceled(lon
if (!l3)
goto free_array_cache;
@ -108,7 +108,7 @@ Index: linux-3.2/mm/slab.c
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
@@ -1196,7 +1226,7 @@ static void __cpuinit cpuup_canceled(lon
@@ -1200,7 +1230,7 @@ static void __cpuinit cpuup_canceled(lon
free_block(cachep, nc->entry, nc->avail, node);
if (!cpumask_empty(mask)) {
@ -117,7 +117,7 @@ Index: linux-3.2/mm/slab.c
goto free_array_cache;
}
@@ -1210,7 +1240,7 @@ static void __cpuinit cpuup_canceled(lon
@@ -1214,7 +1244,7 @@ static void __cpuinit cpuup_canceled(lon
alien = l3->alien;
l3->alien = NULL;
@ -126,7 +126,7 @@ Index: linux-3.2/mm/slab.c
kfree(shared);
if (alien) {
@@ -1284,7 +1314,7 @@ static int __cpuinit cpuup_prepare(long
@@ -1288,7 +1318,7 @@ static int __cpuinit cpuup_prepare(long
l3 = cachep->nodelists[node];
BUG_ON(!l3);
@ -135,7 +135,7 @@ Index: linux-3.2/mm/slab.c
if (!l3->shared) {
/*
* We are serialised from CPU_DEAD or
@@ -1299,7 +1329,7 @@ static int __cpuinit cpuup_prepare(long
@@ -1303,7 +1333,7 @@ static int __cpuinit cpuup_prepare(long
alien = NULL;
}
#endif
@ -144,7 +144,7 @@ Index: linux-3.2/mm/slab.c
kfree(shared);
free_alien_cache(alien);
if (cachep->flags & SLAB_DEBUG_OBJECTS)
@@ -1490,6 +1520,8 @@ void __init kmem_cache_init(void)
@@ -1494,6 +1524,8 @@ void __init kmem_cache_init(void)
if (num_possible_nodes() == 1)
use_alien_caches = 0;
@ -153,7 +153,7 @@ Index: linux-3.2/mm/slab.c
for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]);
if (i < MAX_NUMNODES)
@@ -2493,7 +2525,7 @@ EXPORT_SYMBOL(kmem_cache_create);
@@ -2500,7 +2532,7 @@ EXPORT_SYMBOL(kmem_cache_create);
#if DEBUG
static void check_irq_off(void)
{
@ -162,7 +162,7 @@ Index: linux-3.2/mm/slab.c
}
static void check_irq_on(void)
@@ -2528,13 +2560,12 @@ static void drain_array(struct kmem_cach
@@ -2535,13 +2567,12 @@ static void drain_array(struct kmem_cach
struct array_cache *ac,
int force, int node);
@ -178,7 +178,7 @@ Index: linux-3.2/mm/slab.c
ac = cpu_cache_get(cachep);
spin_lock(&cachep->nodelists[node]->list_lock);
free_block(cachep, ac->entry, ac->avail, node);
@@ -2542,12 +2573,24 @@ static void do_drain(void *arg)
@@ -2549,12 +2580,24 @@ static void do_drain(void *arg)
ac->avail = 0;
}
@ -204,7 +204,7 @@ Index: linux-3.2/mm/slab.c
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
@@ -2578,10 +2621,10 @@ static int drain_freelist(struct kmem_ca
@@ -2585,10 +2628,10 @@ static int drain_freelist(struct kmem_ca
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
@ -217,7 +217,7 @@ Index: linux-3.2/mm/slab.c
goto out;
}
@@ -2595,7 +2638,7 @@ static int drain_freelist(struct kmem_ca
@@ -2602,7 +2645,7 @@ static int drain_freelist(struct kmem_ca
* to the cache.
*/
l3->free_objects -= cache->num;
@ -226,7 +226,7 @@ Index: linux-3.2/mm/slab.c
slab_destroy(cache, slabp);
nr_freed++;
}
@@ -2890,7 +2933,7 @@ static int cache_grow(struct kmem_cache
@@ -2897,7 +2940,7 @@ static int cache_grow(struct kmem_cache
offset *= cachep->colour_off;
if (local_flags & __GFP_WAIT)
@ -235,7 +235,7 @@ Index: linux-3.2/mm/slab.c
/*
* The test for missing atomic flag is performed here, rather than
@@ -2920,7 +2963,7 @@ static int cache_grow(struct kmem_cache
@@ -2927,7 +2970,7 @@ static int cache_grow(struct kmem_cache
cache_init_objs(cachep, slabp);
if (local_flags & __GFP_WAIT)
@ -244,7 +244,7 @@ Index: linux-3.2/mm/slab.c
check_irq_off();
spin_lock(&l3->list_lock);
@@ -2934,7 +2977,7 @@ opps1:
@@ -2941,7 +2984,7 @@ opps1:
kmem_freepages(cachep, objp);
failed:
if (local_flags & __GFP_WAIT)
@ -253,7 +253,7 @@ Index: linux-3.2/mm/slab.c
return 0;
}
@@ -3326,11 +3369,11 @@ retry:
@@ -3333,11 +3376,11 @@ retry:
* set and go into memory reserves if necessary.
*/
if (local_flags & __GFP_WAIT)
@ -267,7 +267,7 @@ Index: linux-3.2/mm/slab.c
if (obj) {
/*
* Insert into the appropriate per node queues
@@ -3446,7 +3489,7 @@ __cache_alloc_node(struct kmem_cache *ca
@@ -3453,7 +3496,7 @@ __cache_alloc_node(struct kmem_cache *ca
return NULL;
cache_alloc_debugcheck_before(cachep, flags);
@ -276,7 +276,7 @@ Index: linux-3.2/mm/slab.c
if (nodeid == NUMA_NO_NODE)
nodeid = slab_node;
@@ -3471,7 +3514,7 @@ __cache_alloc_node(struct kmem_cache *ca
@@ -3478,7 +3521,7 @@ __cache_alloc_node(struct kmem_cache *ca
/* ___cache_alloc_node can fall back to other nodes */
ptr = ____cache_alloc_node(cachep, flags, nodeid);
out:
@ -285,7 +285,7 @@ Index: linux-3.2/mm/slab.c
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
flags);
@@ -3531,9 +3574,9 @@ __cache_alloc(struct kmem_cache *cachep,
@@ -3538,9 +3581,9 @@ __cache_alloc(struct kmem_cache *cachep,
return NULL;
cache_alloc_debugcheck_before(cachep, flags);
@ -297,7 +297,7 @@ Index: linux-3.2/mm/slab.c
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
flags);
@@ -3847,9 +3890,9 @@ void kmem_cache_free(struct kmem_cache *
@@ -3854,9 +3897,9 @@ void kmem_cache_free(struct kmem_cache *
debug_check_no_locks_freed(objp, obj_size(cachep));
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, obj_size(cachep));
@ -309,7 +309,7 @@ Index: linux-3.2/mm/slab.c
trace_kmem_cache_free(_RET_IP_, objp);
}
@@ -3877,9 +3920,9 @@ void kfree(const void *objp)
@@ -3884,9 +3927,9 @@ void kfree(const void *objp)
c = virt_to_cache(objp);
debug_check_no_locks_freed(objp, obj_size(c));
debug_check_no_obj_freed(objp, obj_size(c));
@ -321,7 +321,7 @@ Index: linux-3.2/mm/slab.c
}
EXPORT_SYMBOL(kfree);
@@ -3922,7 +3965,7 @@ static int alloc_kmemlist(struct kmem_ca
@@ -3929,7 +3972,7 @@ static int alloc_kmemlist(struct kmem_ca
if (l3) {
struct array_cache *shared = l3->shared;
@ -330,7 +330,7 @@ Index: linux-3.2/mm/slab.c
if (shared)
free_block(cachep, shared->entry,
@@ -3935,7 +3978,7 @@ static int alloc_kmemlist(struct kmem_ca
@@ -3942,7 +3985,7 @@ static int alloc_kmemlist(struct kmem_ca
}
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
@ -339,7 +339,7 @@ Index: linux-3.2/mm/slab.c
kfree(shared);
free_alien_cache(new_alien);
continue;
@@ -3982,17 +4025,28 @@ struct ccupdate_struct {
@@ -3989,17 +4032,28 @@ struct ccupdate_struct {
struct array_cache *new[0];
};
@ -373,7 +373,7 @@ Index: linux-3.2/mm/slab.c
/* Always called with the cache_chain_mutex held */
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
@@ -4018,7 +4072,7 @@ static int do_tune_cpucache(struct kmem_
@@ -4025,7 +4079,7 @@ static int do_tune_cpucache(struct kmem_
}
new->cachep = cachep;
@ -382,7 +382,7 @@ Index: linux-3.2/mm/slab.c
check_irq_on();
cachep->batchcount = batchcount;
@@ -4029,9 +4083,11 @@ static int do_tune_cpucache(struct kmem_
@@ -4036,9 +4090,11 @@ static int do_tune_cpucache(struct kmem_
struct array_cache *ccold = new->new[i];
if (!ccold)
continue;
@ -396,7 +396,7 @@ Index: linux-3.2/mm/slab.c
kfree(ccold);
}
kfree(new);
@@ -4107,7 +4163,7 @@ static void drain_array(struct kmem_cach
@@ -4114,7 +4170,7 @@ static void drain_array(struct kmem_cach
if (ac->touched && !force) {
ac->touched = 0;
} else {
@ -405,7 +405,7 @@ Index: linux-3.2/mm/slab.c
if (ac->avail) {
tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail)
@@ -4117,7 +4173,7 @@ static void drain_array(struct kmem_cach
@@ -4124,7 +4180,7 @@ static void drain_array(struct kmem_cach
memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void *) * ac->avail);
}
@ -414,7 +414,7 @@ Index: linux-3.2/mm/slab.c
}
}
@@ -4256,7 +4312,7 @@ static int s_show(struct seq_file *m, vo
@@ -4263,7 +4319,7 @@ static int s_show(struct seq_file *m, vo
continue;
check_irq_on();
@ -423,7 +423,7 @@ Index: linux-3.2/mm/slab.c
list_for_each_entry(slabp, &l3->slabs_full, list) {
if (slabp->inuse != cachep->num && !error)
@@ -4281,7 +4337,7 @@ static int s_show(struct seq_file *m, vo
@@ -4288,7 +4344,7 @@ static int s_show(struct seq_file *m, vo
if (l3->shared)
shared_avail += l3->shared->avail;
@ -432,7 +432,7 @@ Index: linux-3.2/mm/slab.c
}
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
@@ -4510,13 +4566,13 @@ static int leaks_show(struct seq_file *m
@@ -4517,13 +4573,13 @@ static int leaks_show(struct seq_file *m
continue;
check_irq_on();

View File

@ -0,0 +1,23 @@
Subject: pci: Use __wake_up_all_locked pci_unblock_user_cfg_access()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Dec 2011 00:07:16 +0100
The waitqueue is protected by the pci_lock, so we can just avoid to
lock the waitqueue lock itself. That prevents the
might_sleep()/scheduling while atomic problem on RT
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
Index: linux-rt/drivers/pci/access.c
===================================================================
--- linux-rt.orig/drivers/pci/access.c
+++ linux-rt/drivers/pci/access.c
@@ -441,7 +441,7 @@ void pci_unblock_user_cfg_access(struct
WARN_ON(!dev->block_ucfg_access);
dev->block_ucfg_access = 0;
- wake_up_all(&pci_ucfg_wait);
+ wake_up_all_locked(&pci_ucfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);

View File

@ -0,0 +1,132 @@
Subject: slab, lockdep: Annotate all slab caches
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Mon Nov 28 19:51:51 CET 2011
Currently we only annotate the kmalloc caches, annotate all of them.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hans Schillstrom <hans@schillstrom.com>
Cc: Christoph Lameter <cl@gentwo.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Sitsofe Wheeler <sitsofe@yahoo.com>
Cc: linux-mm@kvack.org
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/n/tip-10bey2cgpcvtbdkgigaoab8w@git.kernel.org
---
mm/slab.c | 52 ++++++++++++++++++++++++++++------------------------
1 file changed, 28 insertions(+), 24 deletions(-)
Index: linux-3.2/mm/slab.c
===================================================================
--- linux-3.2.orig/mm/slab.c
+++ linux-3.2/mm/slab.c
@@ -607,6 +607,12 @@ int slab_is_available(void)
return g_cpucache_up >= EARLY;
}
+/*
+ * Guard access to the cache-chain.
+ */
+static DEFINE_MUTEX(cache_chain_mutex);
+static struct list_head cache_chain;
+
#ifdef CONFIG_LOCKDEP
/*
@@ -668,38 +674,41 @@ static void slab_set_debugobj_lock_class
slab_set_debugobj_lock_classes_node(cachep, node);
}
-static void init_node_lock_keys(int q)
+static void init_lock_keys(struct kmem_cache *cachep, int node)
{
- struct cache_sizes *s = malloc_sizes;
+ struct kmem_list3 *l3;
if (g_cpucache_up < LATE)
return;
- for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
- struct kmem_list3 *l3;
+ l3 = cachep->nodelists[node];
+ if (!l3 || OFF_SLAB(cachep))
+ return;
- l3 = s->cs_cachep->nodelists[q];
- if (!l3 || OFF_SLAB(s->cs_cachep))
- continue;
+ slab_set_lock_classes(cachep, &on_slab_l3_key, &on_slab_alc_key, node);
+}
- slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
- &on_slab_alc_key, q);
- }
+static void init_node_lock_keys(int node)
+{
+ struct kmem_cache *cachep;
+
+ list_for_each_entry(cachep, &cache_chain, next)
+ init_lock_keys(cachep, node);
}
-static inline void init_lock_keys(void)
+static inline void init_cachep_lock_keys(struct kmem_cache *cachep)
{
int node;
for_each_node(node)
- init_node_lock_keys(node);
+ init_lock_keys(cachep, node);
}
#else
-static void init_node_lock_keys(int q)
+static void init_node_lock_keys(int node)
{
}
-static inline void init_lock_keys(void)
+static void init_cachep_lock_keys(struct kmem_cache *cachep)
{
}
@@ -712,12 +721,6 @@ static void slab_set_debugobj_lock_class
}
#endif
-/*
- * Guard access to the cache-chain.
- */
-static DEFINE_MUTEX(cache_chain_mutex);
-static struct list_head cache_chain;
-
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
@@ -1669,14 +1672,13 @@ void __init kmem_cache_init_late(void)
g_cpucache_up = LATE;
- /* Annotate slab for lockdep -- annotate the malloc caches */
- init_lock_keys();
-
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, next)
+ list_for_each_entry(cachep, &cache_chain, next) {
+ init_cachep_lock_keys(cachep);
if (enable_cpucache(cachep, GFP_NOWAIT))
BUG();
+ }
mutex_unlock(&cache_chain_mutex);
/* Done! */
@@ -2479,6 +2481,8 @@ kmem_cache_create (const char *name, siz
slab_set_debugobj_lock_classes(cachep);
}
+ init_cachep_lock_keys(cachep);
+
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
oops:

View File

@ -0,0 +1,53 @@
Subject: slab, lockdep: Fix silly bug
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Mon Nov 28 21:12:40 CET 2011
Commit 30765b92 ("slab, lockdep: Annotate the locks before using
them") moves the init_lock_keys() call from after g_cpucache_up =
FULL, to before it. And overlooks the fact that init_node_lock_keys()
tests for it and ignores everything !FULL.
Introduce a LATE stage and change the lockdep test to be <LATE.
Cc: stable@kernel.org
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hans Schillstrom <hans@schillstrom.com>
Cc: Christoph Lameter <cl@gentwo.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Sitsofe Wheeler <sitsofe@yahoo.com>
Cc: linux-mm@kvack.org
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/n/tip-gadqbdfxorhia1w5ewmoiodd@git.kernel.org
---
Index: linux-rt/mm/slab.c
===================================================================
--- linux-rt.orig/mm/slab.c
+++ linux-rt/mm/slab.c
@@ -594,6 +594,7 @@ static enum {
PARTIAL_AC,
PARTIAL_L3,
EARLY,
+ LATE,
FULL
} g_cpucache_up;
@@ -670,7 +671,7 @@ static void init_node_lock_keys(int q)
{
struct cache_sizes *s = malloc_sizes;
- if (g_cpucache_up != FULL)
+ if (g_cpucache_up < LATE)
return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1725,6 +1726,8 @@ void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
+ g_cpucache_up = LATE;
+
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();

View File

@ -6,6 +6,8 @@
# UPSTREAM changes queued for 3.2 or in 3.1
############################################################
x86_64-patch-for-idle-notifiers.patch
re-possible-slab-deadlock-while-doing-ifenslave.patch
re-possible-slab-deadlock-while-doing-ifenslave-1.patch
# Should go to stable 3.0.x!
@ -22,8 +24,6 @@ x86_64-patch-for-idle-notifiers.patch
############################################################
# Stuff broken upstream, patches submitted
############################################################
drm-sigh.patch
drm-more-moronic-crap.patch
x86-kprobes-remove-bogus-preempt-enable.patch
############################################################
@ -47,7 +47,6 @@ sched-distangle-worker-accounting-from-rq-3elock.patch
# Submitted to mips ML
############################################################
mips-enable-interrupts-in-signal.patch
mips-octeon-mark-smp-ipi-interrupt-as-irqf_no_thread.patch
############################################################
# Submitted to ARM ML
@ -92,7 +91,6 @@ power-allow-irq-threading.patch
# Pending in peterz's scheduler queue
############################################################
sched-keep-period-timer-alive-when-throttled.patch
sched-prevent-boosting-from-throttling.patch
@ -200,6 +198,13 @@ locking-various-init-fixes.patch
seqlock-remove-unused-functions.patch
seqlock-use-seqcount.patch
# PCI
wait-provide-__wake_up_all_locked.patch
pci-access-use-__wake_up_all_locked.patch
# ACPI
acpi-make-gbl-hardware-lock-raw.patch
#####################################################
# Stuff which should go mainline, but wants some care
#####################################################
@ -417,6 +422,8 @@ softirq-fix-unplug-deadlock.patch
softirq-disable-softirq-stacks-for-rt.patch
softirq-make-fifo.patch
tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
# LOCAL VARS and GETCPU STUFF
local-vars-migrate-disable.patch
@ -549,3 +556,4 @@ kconfig-preempt-rt-full.patch
# Needs some thought and testing
#softirq-preempt-fix.patch

View File

@ -0,0 +1,406 @@
Subject: tasklet: Prevent tasklets from going into infinite spin in RT
From: Ingo Molnar <mingo@elte.hu>
Date: Tue Nov 29 20:18:22 2011 -0500
When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
and spinlocks turn are mutexes. But this can cause issues with
tasks disabling tasklets. A tasklet runs under ksoftirqd, and
if a tasklets are disabled with tasklet_disable(), the tasklet
count is increased. When a tasklet runs, it checks this counter
and if it is set, it adds itself back on the softirq queue and
returns.
The problem arises in RT because ksoftirq will see that a softirq
is ready to run (the tasklet softirq just re-armed itself), and will
not sleep, but instead run the softirqs again. The tasklet softirq
will still see that the count is non-zero and will not execute
the tasklet and requeue itself on the softirq again, which will
cause ksoftirqd to run it again and again and again.
It gets worse because ksoftirqd runs as a real-time thread.
If it preempted the task that disabled tasklets, and that task
has migration disabled, or can't run for other reasons, the tasklet
softirq will never run because the count will never be zero, and
ksoftirqd will go into an infinite loop. As an RT task, it this
becomes a big problem.
This is a hack solution to have tasklet_disable stop tasklets, and
when a tasklet runs, instead of requeueing the tasklet softirqd
it delays it. When tasklet_enable() is called, and tasklets are
waiting, then the tasklet_enable() will kick the tasklets to continue.
This prevents the lock up from ksoftirq going into an infinite loop.
[ rostedt@goodmis.org: ported to 3.0-rt ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/interrupt.h | 39 ++++----
kernel/softirq.c | 208 +++++++++++++++++++++++++++++++++-------------
2 files changed, 170 insertions(+), 77 deletions(-)
Index: linux-3.2/include/linux/interrupt.h
===================================================================
--- linux-3.2.orig/include/linux/interrupt.h
+++ linux-3.2/include/linux/interrupt.h
@@ -517,8 +517,9 @@ extern void __send_remote_softirq(struct
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
- * If this tasklet is already running on another CPU (or schedule is called
- from tasklet itself), it is rescheduled for later.
+ * If this tasklet is already running on another CPU, it is rescheduled
+ for later.
+ * Schedule must not be called from the tasklet itself (a lockup occurs)
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
@@ -543,27 +544,36 @@ struct tasklet_struct name = { NULL, 0,
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
+ TASKLET_STATE_PENDING /* Tasklet is pending */
};
-#ifdef CONFIG_SMP
+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
+static inline int tasklet_tryunlock(struct tasklet_struct *t)
+{
+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
+}
+
static inline void tasklet_unlock(struct tasklet_struct *t)
{
smp_mb__before_clear_bit();
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
+extern void tasklet_unlock_wait(struct tasklet_struct *t);
+
#else
#define tasklet_trylock(t) 1
+#define tasklet_tryunlock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
@@ -612,17 +622,8 @@ static inline void tasklet_disable(struc
smp_mb();
}
-static inline void tasklet_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- atomic_dec(&t->count);
-}
-
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic_dec();
- atomic_dec(&t->count);
-}
+extern void tasklet_enable(struct tasklet_struct *t);
+extern void tasklet_hi_enable(struct tasklet_struct *t);
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
Index: linux-3.2/kernel/softirq.c
===================================================================
--- linux-3.2.orig/kernel/softirq.c
+++ linux-3.2/kernel/softirq.c
@@ -21,6 +21,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
+#include <linux/delay.h>
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/tick.h>
@@ -670,15 +671,45 @@ struct tasklet_head
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
+static void inline
+__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
+{
+ if (tasklet_trylock(t)) {
+again:
+ /* We may have been preempted before tasklet_trylock
+ * and __tasklet_action may have already run.
+ * So double check the sched bit while the takslet
+ * is locked before adding it to the list.
+ */
+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
+ t->next = NULL;
+ *head->tail = t;
+ head->tail = &(t->next);
+ raise_softirq_irqoff(nr);
+ tasklet_unlock(t);
+ } else {
+ /* This is subtle. If we hit the corner case above
+ * It is possible that we get preempted right here,
+ * and another task has successfully called
+ * tasklet_schedule(), then this function, and
+ * failed on the trylock. Thus we must be sure
+ * before releasing the tasklet lock, that the
+ * SCHED_BIT is clear. Otherwise the tasklet
+ * may get its SCHED_BIT set, but not added to the
+ * list
+ */
+ if (!tasklet_tryunlock(t))
+ goto again;
+ }
+ }
+}
+
void __tasklet_schedule(struct tasklet_struct *t)
{
unsigned long flags;
local_irq_save(flags);
- t->next = NULL;
- *__this_cpu_read(tasklet_vec.tail) = t;
- __this_cpu_write(tasklet_vec.tail, &(t->next));
- raise_softirq_irqoff(TASKLET_SOFTIRQ);
+ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
@@ -689,10 +720,7 @@ void __tasklet_hi_schedule(struct taskle
unsigned long flags;
local_irq_save(flags);
- t->next = NULL;
- *__this_cpu_read(tasklet_hi_vec.tail) = t;
- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
- raise_softirq_irqoff(HI_SOFTIRQ);
+ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ);
local_irq_restore(flags);
}
@@ -700,50 +728,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
{
- BUG_ON(!irqs_disabled());
-
- t->next = __this_cpu_read(tasklet_hi_vec.head);
- __this_cpu_write(tasklet_hi_vec.head, t);
- __raise_softirq_irqoff(HI_SOFTIRQ);
+ __tasklet_hi_schedule(t);
}
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-static void tasklet_action(struct softirq_action *a)
+void tasklet_enable(struct tasklet_struct *t)
{
- struct tasklet_struct *list;
+ if (!atomic_dec_and_test(&t->count))
+ return;
+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
+ tasklet_schedule(t);
+}
- local_irq_disable();
- list = __this_cpu_read(tasklet_vec.head);
- __this_cpu_write(tasklet_vec.head, NULL);
- __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
- local_irq_enable();
+EXPORT_SYMBOL(tasklet_enable);
+
+void tasklet_hi_enable(struct tasklet_struct *t)
+{
+ if (!atomic_dec_and_test(&t->count))
+ return;
+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
+ tasklet_hi_schedule(t);
+}
+
+EXPORT_SYMBOL(tasklet_hi_enable);
+
+static void
+__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
+{
+ int loops = 1000000;
while (list) {
struct tasklet_struct *t = list;
list = list->next;
- if (tasklet_trylock(t)) {
- if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- BUG();
- t->func(t->data);
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
+ /*
+ * Should always succeed - after a tasklist got on the
+ * list (after getting the SCHED bit set from 0 to 1),
+ * nothing but the tasklet softirq it got queued to can
+ * lock it:
+ */
+ if (!tasklet_trylock(t)) {
+ WARN_ON(1);
+ continue;
}
- local_irq_disable();
t->next = NULL;
- *__this_cpu_read(tasklet_vec.tail) = t;
- __this_cpu_write(tasklet_vec.tail, &(t->next));
- __raise_softirq_irqoff(TASKLET_SOFTIRQ);
- local_irq_enable();
+
+ /*
+ * If we cannot handle the tasklet because it's disabled,
+ * mark it as pending. tasklet_enable() will later
+ * re-schedule the tasklet.
+ */
+ if (unlikely(atomic_read(&t->count))) {
+out_disabled:
+ /* implicit unlock: */
+ wmb();
+ t->state = TASKLET_STATEF_PENDING;
+ continue;
+ }
+
+ /*
+ * After this point on the tasklet might be rescheduled
+ * on another CPU, but it can only be added to another
+ * CPU's tasklet list if we unlock the tasklet (which we
+ * dont do yet).
+ */
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ WARN_ON(1);
+
+again:
+ t->func(t->data);
+
+ /*
+ * Try to unlock the tasklet. We must use cmpxchg, because
+ * another CPU might have scheduled or disabled the tasklet.
+ * We only allow the STATE_RUN -> 0 transition here.
+ */
+ while (!tasklet_tryunlock(t)) {
+ /*
+ * If it got disabled meanwhile, bail out:
+ */
+ if (atomic_read(&t->count))
+ goto out_disabled;
+ /*
+ * If it got scheduled meanwhile, re-execute
+ * the tasklet function:
+ */
+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ goto again;
+ if (!--loops) {
+ printk("hm, tasklet state: %08lx\n", t->state);
+ WARN_ON(1);
+ tasklet_unlock(t);
+ break;
+ }
+ }
}
}
+static void tasklet_action(struct softirq_action *a)
+{
+ struct tasklet_struct *list;
+
+ local_irq_disable();
+ list = __get_cpu_var(tasklet_vec).head;
+ __get_cpu_var(tasklet_vec).head = NULL;
+ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
+ local_irq_enable();
+
+ __tasklet_action(a, list);
+}
+
static void tasklet_hi_action(struct softirq_action *a)
{
struct tasklet_struct *list;
@@ -754,29 +851,7 @@ static void tasklet_hi_action(struct sof
__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
local_irq_enable();
- while (list) {
- struct tasklet_struct *t = list;
-
- list = list->next;
-
- if (tasklet_trylock(t)) {
- if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- BUG();
- t->func(t->data);
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
- }
-
- local_irq_disable();
- t->next = NULL;
- *__this_cpu_read(tasklet_hi_vec.tail) = t;
- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
- __raise_softirq_irqoff(HI_SOFTIRQ);
- local_irq_enable();
- }
+ __tasklet_action(a, list);
}
@@ -799,7 +874,7 @@ void tasklet_kill(struct tasklet_struct
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
- yield();
+ msleep(1);
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
@@ -1005,6 +1080,23 @@ void __init softirq_init(void)
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+ /*
+ * Hack for now to avoid this busy-loop:
+ */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ msleep(1);
+#else
+ barrier();
+#endif
+ }
+}
+EXPORT_SYMBOL(tasklet_unlock_wait);
+#endif
+
static int run_ksoftirqd(void * __bind_cpu)
{
ksoftirqd_set_sched_params();

View File

@ -0,0 +1,49 @@
Subject: wait: Provide __wake_up_all_locked
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Dec 2011 00:04:00 +0100
For code which protects the waitqueue itself with another lock it
makes no sense to acquire the waitqueue lock for wakeup all. Provide
__wake_up_all_locked.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
Index: linux-rt/include/linux/wait.h
===================================================================
--- linux-rt.orig/include/linux/wait.h
+++ linux-rt/include/linux/wait.h
@@ -157,7 +157,7 @@ void __wake_up(wait_queue_head_t *q, uns
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
void *key);
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_bit(wait_queue_head_t *, void *, int);
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
@@ -170,7 +170,8 @@ wait_queue_head_t *bit_waitqueue(void *,
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
-#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
+#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
+#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
Index: linux-rt/kernel/sched.c
===================================================================
--- linux-rt.orig/kernel/sched.c
+++ linux-rt/kernel/sched.c
@@ -4643,9 +4643,9 @@ EXPORT_SYMBOL(__wake_up);
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
{
- __wake_up_common(q, mode, 1, 0, NULL);
+ __wake_up_common(q, mode, nr, 0, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_locked);

View File

@ -1,4 +1,6 @@
+ features/all/rt/x86_64-patch-for-idle-notifiers.patch featureset=rt
+ features/all/rt/re-possible-slab-deadlock-while-doing-ifenslave.patch featureset=rt
+ features/all/rt/re-possible-slab-deadlock-while-doing-ifenslave-1.patch featureset=rt
+ features/all/rt/x86-kprobes-remove-bogus-preempt-enable.patch featureset=rt
+ features/all/rt/x86-hpet-disable-msi-on-lenovo-w510.patch featureset=rt
+ features/all/rt/block-shorten-interrupt-disabled-regions.patch featureset=rt
@ -57,6 +59,9 @@
+ features/all/rt/locking-various-init-fixes.patch featureset=rt
+ features/all/rt/seqlock-remove-unused-functions.patch featureset=rt
+ features/all/rt/seqlock-use-seqcount.patch featureset=rt
+ features/all/rt/wait-provide-__wake_up_all_locked.patch featureset=rt
+ features/all/rt/pci-access-use-__wake_up_all_locked.patch featureset=rt
+ features/all/rt/acpi-make-gbl-hardware-lock-raw.patch featureset=rt
+ features/all/rt/seqlock-raw-seqlock.patch featureset=rt
+ features/all/rt/timekeeping-covert-xtimelock.patch featureset=rt
+ features/all/rt/latency-hist.patch featureset=rt
@ -173,6 +178,7 @@
+ features/all/rt/softirq-fix-unplug-deadlock.patch featureset=rt
+ features/all/rt/softirq-disable-softirq-stacks-for-rt.patch featureset=rt
+ features/all/rt/softirq-make-fifo.patch featureset=rt
+ features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch featureset=rt
+ features/all/rt/local-vars-migrate-disable.patch featureset=rt
+ features/all/rt/md-raid5-percpu-handling-rt-aware.patch featureset=rt
+ features/all/rt/rtmutex-lock-killable.patch featureset=rt