diff --git a/debian/changelog b/debian/changelog index 80b025fc9..750688c73 100644 --- a/debian/changelog +++ b/debian/changelog @@ -25,6 +25,9 @@ linux-2.6 (3.0.0-4) UNRELEASED; urgency=low * Update Spanish debconf template translations (Omar Campagne) (Closes: #636242) + [ Uwe Kleine-König ] + * [amd64] Update rt featureset to 3.0.4-rt13 + -- Ben Hutchings Sun, 28 Aug 2011 17:07:47 +0100 linux-2.6 (3.0.0-3) unstable; urgency=low diff --git a/debian/patches/features/all/rt/patch-3.0.1-rt11.patch b/debian/patches/features/all/rt/patch-3.0.4-rt13.patch similarity index 97% rename from debian/patches/features/all/rt/patch-3.0.1-rt11.patch rename to debian/patches/features/all/rt/patch-3.0.4-rt13.patch index 547822f25..fc40dc5a2 100644 --- a/debian/patches/features/all/rt/patch-3.0.1-rt11.patch +++ b/debian/patches/features/all/rt/patch-3.0.4-rt13.patch @@ -1,5 +1,970 @@ -bwh: Update context for 3.0.2 - +Index: linux-2.6/mm/memory.c +=================================================================== +--- linux-2.6.orig/mm/memory.c ++++ linux-2.6/mm/memory.c +@@ -1290,13 +1290,6 @@ static unsigned long unmap_page_range(st + return addr; + } + +-#ifdef CONFIG_PREEMPT +-# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) +-#else +-/* No preempt: go for improved straight-line efficiency */ +-# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) +-#endif +- + /** + * unmap_vmas - unmap a range of memory covered by a list of vma's + * @tlb: address of the caller's struct mmu_gather +@@ -3435,6 +3428,32 @@ unlock: + return 0; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++void pagefault_disable(void) ++{ ++ migrate_disable(); ++ current->pagefault_disabled++; ++ /* ++ * make sure to have issued the store before a pagefault ++ * can hit. ++ */ ++ barrier(); ++} ++EXPORT_SYMBOL_GPL(pagefault_disable); ++ ++void pagefault_enable(void) ++{ ++ /* ++ * make sure to issue those last loads/stores before enabling ++ * the pagefault handler again. ++ */ ++ barrier(); ++ current->pagefault_disabled--; ++ migrate_enable(); ++} ++EXPORT_SYMBOL_GPL(pagefault_enable); ++#endif ++ + /* + * By the time we get here, we already hold the mm semaphore + */ +@@ -3983,3 +4002,35 @@ void copy_user_huge_page(struct page *ds + } + } + #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ ++ ++#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0) ++/* ++ * Heinous hack, relies on the caller doing something like: ++ * ++ * pte = alloc_pages(PGALLOC_GFP, 0); ++ * if (pte) ++ * pgtable_page_ctor(pte); ++ * return pte; ++ * ++ * This ensures we release the page and return NULL when the ++ * lock allocation fails. ++ */ ++struct page *pte_lock_init(struct page *page) ++{ ++ page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL); ++ if (page->ptl) { ++ spin_lock_init(__pte_lockptr(page)); ++ } else { ++ __free_page(page); ++ page = NULL; ++ } ++ return page; ++} ++ ++void pte_lock_deinit(struct page *page) ++{ ++ kfree(page->ptl); ++ page->mapping = NULL; ++} ++ ++#endif +Index: linux-2.6/kernel/sched_cpupri.c +=================================================================== +--- linux-2.6.orig/kernel/sched_cpupri.c ++++ linux-2.6/kernel/sched_cpupri.c +@@ -47,9 +47,6 @@ static int convert_prio(int prio) + return cpupri; + } + +-#define for_each_cpupri_active(array, idx) \ +- for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES) +- + /** + * cpupri_find - find the best (lowest-pri) CPU in the system + * @cp: The cpupri context +@@ -71,11 +68,38 @@ int cpupri_find(struct cpupri *cp, struc + int idx = 0; + int task_pri = convert_prio(p->prio); + +- for_each_cpupri_active(cp->pri_active, idx) { ++ if (task_pri >= MAX_RT_PRIO) ++ return 0; ++ ++ for (idx = 0; idx < task_pri; idx++) { + struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; ++ int skip = 0; + +- if (idx >= task_pri) +- break; ++ if (!atomic_read(&(vec)->count)) ++ skip = 1; ++ /* ++ * When looking at the vector, we need to read the counter, ++ * do a memory barrier, then read the mask. ++ * ++ * Note: This is still all racey, but we can deal with it. ++ * Ideally, we only want to look at masks that are set. ++ * ++ * If a mask is not set, then the only thing wrong is that we ++ * did a little more work than necessary. ++ * ++ * If we read a zero count but the mask is set, because of the ++ * memory barriers, that can only happen when the highest prio ++ * task for a run queue has left the run queue, in which case, ++ * it will be followed by a pull. If the task we are processing ++ * fails to find a proper place to go, that pull request will ++ * pull this task if the run queue is running at a lower ++ * priority. ++ */ ++ smp_rmb(); ++ ++ /* Need to do the rmb for every iteration */ ++ if (skip) ++ continue; + + if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) + continue; +@@ -115,7 +139,7 @@ void cpupri_set(struct cpupri *cp, int c + { + int *currpri = &cp->cpu_to_pri[cpu]; + int oldpri = *currpri; +- unsigned long flags; ++ int do_mb = 0; + + newpri = convert_prio(newpri); + +@@ -134,26 +158,41 @@ void cpupri_set(struct cpupri *cp, int c + if (likely(newpri != CPUPRI_INVALID)) { + struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; + +- raw_spin_lock_irqsave(&vec->lock, flags); +- + cpumask_set_cpu(cpu, vec->mask); +- vec->count++; +- if (vec->count == 1) +- set_bit(newpri, cp->pri_active); +- +- raw_spin_unlock_irqrestore(&vec->lock, flags); ++ /* ++ * When adding a new vector, we update the mask first, ++ * do a write memory barrier, and then update the count, to ++ * make sure the vector is visible when count is set. ++ */ ++ smp_mb__before_atomic_inc(); ++ atomic_inc(&(vec)->count); ++ do_mb = 1; + } + if (likely(oldpri != CPUPRI_INVALID)) { + struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; + +- raw_spin_lock_irqsave(&vec->lock, flags); +- +- vec->count--; +- if (!vec->count) +- clear_bit(oldpri, cp->pri_active); ++ /* ++ * Because the order of modification of the vec->count ++ * is important, we must make sure that the update ++ * of the new prio is seen before we decrement the ++ * old prio. This makes sure that the loop sees ++ * one or the other when we raise the priority of ++ * the run queue. We don't care about when we lower the ++ * priority, as that will trigger an rt pull anyway. ++ * ++ * We only need to do a memory barrier if we updated ++ * the new priority vec. ++ */ ++ if (do_mb) ++ smp_mb__after_atomic_inc(); ++ ++ /* ++ * When removing from the vector, we decrement the counter first ++ * do a memory barrier and then clear the mask. ++ */ ++ atomic_dec(&(vec)->count); ++ smp_mb__after_atomic_inc(); + cpumask_clear_cpu(cpu, vec->mask); +- +- raw_spin_unlock_irqrestore(&vec->lock, flags); + } + + *currpri = newpri; +@@ -175,8 +214,7 @@ int cpupri_init(struct cpupri *cp) + for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { + struct cpupri_vec *vec = &cp->pri_to_cpu[i]; + +- raw_spin_lock_init(&vec->lock); +- vec->count = 0; ++ atomic_set(&vec->count, 0); + if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) + goto cleanup; + } +Index: linux-2.6/kernel/sched_cpupri.h +=================================================================== +--- linux-2.6.orig/kernel/sched_cpupri.h ++++ linux-2.6/kernel/sched_cpupri.h +@@ -12,9 +12,8 @@ + /* values 2-101 are RT priorities 0-99 */ + + struct cpupri_vec { +- raw_spinlock_t lock; +- int count; +- cpumask_var_t mask; ++ atomic_t count; ++ cpumask_var_t mask; + }; + + struct cpupri { +Index: linux-2.6/mm/slab.c +=================================================================== +--- linux-2.6.orig/mm/slab.c ++++ linux-2.6/mm/slab.c +@@ -116,6 +116,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -620,6 +621,51 @@ int slab_is_available(void) + static struct lock_class_key on_slab_l3_key; + static struct lock_class_key on_slab_alc_key; + ++static struct lock_class_key debugobj_l3_key; ++static struct lock_class_key debugobj_alc_key; ++ ++static void slab_set_lock_classes(struct kmem_cache *cachep, ++ struct lock_class_key *l3_key, struct lock_class_key *alc_key, ++ int q) ++{ ++ struct array_cache **alc; ++ struct kmem_list3 *l3; ++ int r; ++ ++ l3 = cachep->nodelists[q]; ++ if (!l3) ++ return; ++ ++ lockdep_set_class(&l3->list_lock, l3_key); ++ alc = l3->alien; ++ /* ++ * FIXME: This check for BAD_ALIEN_MAGIC ++ * should go away when common slab code is taught to ++ * work even without alien caches. ++ * Currently, non NUMA code returns BAD_ALIEN_MAGIC ++ * for alloc_alien_cache, ++ */ ++ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) ++ return; ++ for_each_node(r) { ++ if (alc[r]) ++ lockdep_set_class(&alc[r]->lock, alc_key); ++ } ++} ++ ++static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) ++{ ++ slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node); ++} ++ ++static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) ++{ ++ int node; ++ ++ for_each_online_node(node) ++ slab_set_debugobj_lock_classes_node(cachep, node); ++} ++ + static void init_node_lock_keys(int q) + { + struct cache_sizes *s = malloc_sizes; +@@ -628,29 +674,14 @@ static void init_node_lock_keys(int q) + return; + + for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { +- struct array_cache **alc; + struct kmem_list3 *l3; +- int r; + + l3 = s->cs_cachep->nodelists[q]; + if (!l3 || OFF_SLAB(s->cs_cachep)) + continue; +- lockdep_set_class(&l3->list_lock, &on_slab_l3_key); +- alc = l3->alien; +- /* +- * FIXME: This check for BAD_ALIEN_MAGIC +- * should go away when common slab code is taught to +- * work even without alien caches. +- * Currently, non NUMA code returns BAD_ALIEN_MAGIC +- * for alloc_alien_cache, +- */ +- if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) +- continue; +- for_each_node(r) { +- if (alc[r]) +- lockdep_set_class(&alc[r]->lock, +- &on_slab_alc_key); +- } ++ ++ slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, ++ &on_slab_alc_key, q); + } + } + +@@ -669,6 +700,14 @@ static void init_node_lock_keys(int q) + static inline void init_lock_keys(void) + { + } ++ ++static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) ++{ ++} ++ ++static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) ++{ ++} + #endif + + /* +@@ -678,12 +717,66 @@ static DEFINE_MUTEX(cache_chain_mutex); + static struct list_head cache_chain; + + static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); ++static DEFINE_PER_CPU(struct list_head, slab_free_list); ++static DEFINE_LOCAL_IRQ_LOCK(slab_lock); ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1) ++#else ++/* ++ * execute func() for all CPUs. On PREEMPT_RT we dont actually have ++ * to run on the remote CPUs - we only have to take their CPU-locks. ++ * (This is a rare operation, so cacheline bouncing is not an issue.) ++ */ ++static void ++slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg) ++{ ++ unsigned int i; ++ ++ for_each_online_cpu(i) ++ func(arg, i); ++} ++#endif ++ ++static void free_delayed(struct list_head *h) ++{ ++ while(!list_empty(h)) { ++ struct page *page = list_first_entry(h, struct page, lru); ++ ++ list_del(&page->lru); ++ __free_pages(page, page->index); ++ } ++} ++ ++static void unlock_l3_and_free_delayed(spinlock_t *list_lock) ++{ ++ LIST_HEAD(tmp); ++ ++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp); ++ local_spin_unlock_irq(slab_lock, list_lock); ++ free_delayed(&tmp); ++} ++ ++static void unlock_slab_and_free_delayed(unsigned long flags) ++{ ++ LIST_HEAD(tmp); ++ ++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp); ++ local_unlock_irqrestore(slab_lock, flags); ++ free_delayed(&tmp); ++} + + static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) + { + return cachep->array[smp_processor_id()]; + } + ++static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep, ++ int cpu) ++{ ++ return cachep->array[cpu]; ++} ++ + static inline struct kmem_cache *__find_general_cachep(size_t size, + gfp_t gfpflags) + { +@@ -1021,9 +1114,10 @@ static void reap_alien(struct kmem_cache + if (l3->alien) { + struct array_cache *ac = l3->alien[node]; + +- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { ++ if (ac && ac->avail && ++ local_spin_trylock_irq(slab_lock, &ac->lock)) { + __drain_alien_cache(cachep, ac, node); +- spin_unlock_irq(&ac->lock); ++ local_spin_unlock_irq(slab_lock, &ac->lock); + } + } + } +@@ -1038,9 +1132,9 @@ static void drain_alien_cache(struct kme + for_each_online_node(i) { + ac = alien[i]; + if (ac) { +- spin_lock_irqsave(&ac->lock, flags); ++ local_spin_lock_irqsave(slab_lock, &ac->lock, flags); + __drain_alien_cache(cachep, ac, i); +- spin_unlock_irqrestore(&ac->lock, flags); ++ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags); + } + } + } +@@ -1119,11 +1213,11 @@ static int init_cache_nodelists_node(int + cachep->nodelists[node] = l3; + } + +- spin_lock_irq(&cachep->nodelists[node]->list_lock); ++ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock); + cachep->nodelists[node]->free_limit = + (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; +- spin_unlock_irq(&cachep->nodelists[node]->list_lock); ++ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock); + } + return 0; + } +@@ -1148,7 +1242,7 @@ static void __cpuinit cpuup_canceled(lon + if (!l3) + goto free_array_cache; + +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + + /* Free limit for this kmem_list3 */ + l3->free_limit -= cachep->batchcount; +@@ -1156,7 +1250,7 @@ static void __cpuinit cpuup_canceled(lon + free_block(cachep, nc->entry, nc->avail, node); + + if (!cpumask_empty(mask)) { +- spin_unlock_irq(&l3->list_lock); ++ unlock_l3_and_free_delayed(&l3->list_lock); + goto free_array_cache; + } + +@@ -1170,7 +1264,7 @@ static void __cpuinit cpuup_canceled(lon + alien = l3->alien; + l3->alien = NULL; + +- spin_unlock_irq(&l3->list_lock); ++ unlock_l3_and_free_delayed(&l3->list_lock); + + kfree(shared); + if (alien) { +@@ -1244,7 +1338,7 @@ static int __cpuinit cpuup_prepare(long + l3 = cachep->nodelists[node]; + BUG_ON(!l3); + +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + if (!l3->shared) { + /* + * We are serialised from CPU_DEAD or +@@ -1259,9 +1353,11 @@ static int __cpuinit cpuup_prepare(long + alien = NULL; + } + #endif +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + kfree(shared); + free_alien_cache(alien); ++ if (cachep->flags & SLAB_DEBUG_OBJECTS) ++ slab_set_debugobj_lock_classes_node(cachep, node); + } + init_node_lock_keys(node); + +@@ -1448,6 +1544,10 @@ void __init kmem_cache_init(void) + if (num_possible_nodes() == 1) + use_alien_caches = 0; + ++ local_irq_lock_init(slab_lock); ++ for_each_possible_cpu(i) ++ INIT_LIST_HEAD(&per_cpu(slab_free_list, i)); ++ + for (i = 0; i < NUM_INIT_LISTS; i++) { + kmem_list3_init(&initkmem_list3[i]); + if (i < MAX_NUMNODES) +@@ -1625,6 +1725,9 @@ void __init kmem_cache_init_late(void) + { + struct kmem_cache *cachep; + ++ /* Annotate slab for lockdep -- annotate the malloc caches */ ++ init_lock_keys(); ++ + /* 6) resize the head arrays to their final sizes */ + mutex_lock(&cache_chain_mutex); + list_for_each_entry(cachep, &cache_chain, next) +@@ -1635,9 +1738,6 @@ void __init kmem_cache_init_late(void) + /* Done! */ + g_cpucache_up = FULL; + +- /* Annotate slab for lockdep -- annotate the malloc caches */ +- init_lock_keys(); +- + /* + * Register a cpu startup notifier callback that initializes + * cpu_cache_get for all new cpus +@@ -1725,12 +1825,14 @@ static void *kmem_getpages(struct kmem_c + /* + * Interface to system's page release. + */ +-static void kmem_freepages(struct kmem_cache *cachep, void *addr) ++static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed) + { + unsigned long i = (1 << cachep->gfporder); +- struct page *page = virt_to_page(addr); ++ struct page *page, *basepage = virt_to_page(addr); + const unsigned long nr_freed = i; + ++ page = basepage; ++ + kmemcheck_free_shadow(page, cachep->gfporder); + + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) +@@ -1746,7 +1848,13 @@ static void kmem_freepages(struct kmem_c + } + if (current->reclaim_state) + current->reclaim_state->reclaimed_slab += nr_freed; +- free_pages((unsigned long)addr, cachep->gfporder); ++ ++ if (!delayed) { ++ free_pages((unsigned long)addr, cachep->gfporder); ++ } else { ++ basepage->index = cachep->gfporder; ++ list_add(&basepage->lru, &__get_cpu_var(slab_free_list)); ++ } + } + + static void kmem_rcu_free(struct rcu_head *head) +@@ -1754,7 +1862,7 @@ static void kmem_rcu_free(struct rcu_hea + struct slab_rcu *slab_rcu = (struct slab_rcu *)head; + struct kmem_cache *cachep = slab_rcu->cachep; + +- kmem_freepages(cachep, slab_rcu->addr); ++ kmem_freepages(cachep, slab_rcu->addr, false); + if (OFF_SLAB(cachep)) + kmem_cache_free(cachep->slabp_cache, slab_rcu); + } +@@ -1973,7 +2081,8 @@ static void slab_destroy_debugcheck(stru + * Before calling the slab must have been unlinked from the cache. The + * cache-lock is not held/needed. + */ +-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) ++static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp, ++ bool delayed) + { + void *addr = slabp->s_mem - slabp->colouroff; + +@@ -1986,7 +2095,7 @@ static void slab_destroy(struct kmem_cac + slab_rcu->addr = addr; + call_rcu(&slab_rcu->head, kmem_rcu_free); + } else { +- kmem_freepages(cachep, addr); ++ kmem_freepages(cachep, addr, delayed); + if (OFF_SLAB(cachep)) + kmem_cache_free(cachep->slabp_cache, slabp); + } +@@ -2424,6 +2533,16 @@ kmem_cache_create (const char *name, siz + goto oops; + } + ++ if (flags & SLAB_DEBUG_OBJECTS) { ++ /* ++ * Would deadlock through slab_destroy()->call_rcu()-> ++ * debug_object_activate()->kmem_cache_alloc(). ++ */ ++ WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); ++ ++ slab_set_debugobj_lock_classes(cachep); ++ } ++ + /* cache setup completed, link it into the list */ + list_add(&cachep->next, &cache_chain); + oops: +@@ -2441,7 +2560,7 @@ EXPORT_SYMBOL(kmem_cache_create); + #if DEBUG + static void check_irq_off(void) + { +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + } + + static void check_irq_on(void) +@@ -2476,13 +2595,12 @@ static void drain_array(struct kmem_cach + struct array_cache *ac, + int force, int node); + +-static void do_drain(void *arg) ++static void __do_drain(void *arg, unsigned int cpu) + { + struct kmem_cache *cachep = arg; + struct array_cache *ac; +- int node = numa_mem_id(); ++ int node = cpu_to_mem(cpu); + +- check_irq_off(); + ac = cpu_cache_get(cachep); + spin_lock(&cachep->nodelists[node]->list_lock); + free_block(cachep, ac->entry, ac->avail, node); +@@ -2490,12 +2608,30 @@ static void do_drain(void *arg) + ac->avail = 0; + } + ++#ifndef CONFIG_PREEMPT_RT_BASE ++static void do_drain(void *arg) ++{ ++ __do_drain(arg, smp_processor_id()); ++} ++#else ++static void do_drain(void *arg, int cpu) ++{ ++ LIST_HEAD(tmp); ++ ++ spin_lock_irq(&per_cpu(slab_lock, cpu).lock); ++ __do_drain(arg, cpu); ++ list_splice_init(&per_cpu(slab_free_list, cpu), &tmp); ++ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock); ++ free_delayed(&tmp); ++} ++#endif ++ + static void drain_cpu_caches(struct kmem_cache *cachep) + { + struct kmem_list3 *l3; + int node; + +- on_each_cpu(do_drain, cachep, 1); ++ slab_on_each_cpu(do_drain, cachep); + check_irq_on(); + for_each_online_node(node) { + l3 = cachep->nodelists[node]; +@@ -2526,10 +2662,10 @@ static int drain_freelist(struct kmem_ca + nr_freed = 0; + while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { + +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + p = l3->slabs_free.prev; + if (p == &l3->slabs_free) { +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + goto out; + } + +@@ -2543,8 +2679,8 @@ static int drain_freelist(struct kmem_ca + * to the cache. + */ + l3->free_objects -= cache->num; +- spin_unlock_irq(&l3->list_lock); +- slab_destroy(cache, slabp); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); ++ slab_destroy(cache, slabp, false); + nr_freed++; + } + out: +@@ -2838,7 +2974,7 @@ static int cache_grow(struct kmem_cache + offset *= cachep->colour_off; + + if (local_flags & __GFP_WAIT) +- local_irq_enable(); ++ local_unlock_irq(slab_lock); + + /* + * The test for missing atomic flag is performed here, rather than +@@ -2868,7 +3004,7 @@ static int cache_grow(struct kmem_cache + cache_init_objs(cachep, slabp); + + if (local_flags & __GFP_WAIT) +- local_irq_disable(); ++ local_lock_irq(slab_lock); + check_irq_off(); + spin_lock(&l3->list_lock); + +@@ -2879,10 +3015,10 @@ static int cache_grow(struct kmem_cache + spin_unlock(&l3->list_lock); + return 1; + opps1: +- kmem_freepages(cachep, objp); ++ kmem_freepages(cachep, objp, false); + failed: + if (local_flags & __GFP_WAIT) +- local_irq_disable(); ++ local_lock_irq(slab_lock); + return 0; + } + +@@ -3280,11 +3416,11 @@ retry: + * set and go into memory reserves if necessary. + */ + if (local_flags & __GFP_WAIT) +- local_irq_enable(); ++ local_unlock_irq(slab_lock); + kmem_flagcheck(cache, flags); + obj = kmem_getpages(cache, local_flags, numa_mem_id()); + if (local_flags & __GFP_WAIT) +- local_irq_disable(); ++ local_lock_irq(slab_lock); + if (obj) { + /* + * Insert into the appropriate per node queues +@@ -3400,7 +3536,7 @@ __cache_alloc_node(struct kmem_cache *ca + return NULL; + + cache_alloc_debugcheck_before(cachep, flags); +- local_irq_save(save_flags); ++ local_lock_irqsave(slab_lock, save_flags); + + if (nodeid == -1) + nodeid = slab_node; +@@ -3425,7 +3561,7 @@ __cache_alloc_node(struct kmem_cache *ca + /* ___cache_alloc_node can fall back to other nodes */ + ptr = ____cache_alloc_node(cachep, flags, nodeid); + out: +- local_irq_restore(save_flags); ++ local_unlock_irqrestore(slab_lock, save_flags); + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); + kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, + flags); +@@ -3485,9 +3621,9 @@ __cache_alloc(struct kmem_cache *cachep, + return NULL; + + cache_alloc_debugcheck_before(cachep, flags); +- local_irq_save(save_flags); ++ local_lock_irqsave(slab_lock, save_flags); + objp = __do_cache_alloc(cachep, flags); +- local_irq_restore(save_flags); ++ local_unlock_irqrestore(slab_lock, save_flags); + objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); + kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, + flags); +@@ -3535,7 +3671,7 @@ static void free_block(struct kmem_cache + * a different cache, refer to comments before + * alloc_slabmgmt. + */ +- slab_destroy(cachep, slabp); ++ slab_destroy(cachep, slabp, true); + } else { + list_add(&slabp->list, &l3->slabs_free); + } +@@ -3798,12 +3934,12 @@ void kmem_cache_free(struct kmem_cache * + { + unsigned long flags; + +- local_irq_save(flags); + debug_check_no_locks_freed(objp, obj_size(cachep)); + if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) + debug_check_no_obj_freed(objp, obj_size(cachep)); ++ local_lock_irqsave(slab_lock, flags); + __cache_free(cachep, objp, __builtin_return_address(0)); +- local_irq_restore(flags); ++ unlock_slab_and_free_delayed(flags); + + trace_kmem_cache_free(_RET_IP_, objp); + } +@@ -3827,13 +3963,13 @@ void kfree(const void *objp) + + if (unlikely(ZERO_OR_NULL_PTR(objp))) + return; +- local_irq_save(flags); + kfree_debugcheck(objp); + c = virt_to_cache(objp); + debug_check_no_locks_freed(objp, obj_size(c)); + debug_check_no_obj_freed(objp, obj_size(c)); ++ local_lock_irqsave(slab_lock, flags); + __cache_free(c, (void *)objp, __builtin_return_address(0)); +- local_irq_restore(flags); ++ unlock_slab_and_free_delayed(flags); + } + EXPORT_SYMBOL(kfree); + +@@ -3876,7 +4012,7 @@ static int alloc_kmemlist(struct kmem_ca + if (l3) { + struct array_cache *shared = l3->shared; + +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + + if (shared) + free_block(cachep, shared->entry, +@@ -3889,7 +4025,8 @@ static int alloc_kmemlist(struct kmem_ca + } + l3->free_limit = (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; +- spin_unlock_irq(&l3->list_lock); ++ unlock_l3_and_free_delayed(&l3->list_lock); ++ + kfree(shared); + free_alien_cache(new_alien); + continue; +@@ -3936,17 +4073,30 @@ struct ccupdate_struct { + struct array_cache *new[NR_CPUS]; + }; + +-static void do_ccupdate_local(void *info) ++static void __do_ccupdate_local(void *info, int cpu) + { + struct ccupdate_struct *new = info; + struct array_cache *old; + +- check_irq_off(); +- old = cpu_cache_get(new->cachep); ++ old = cpu_cache_get_on_cpu(new->cachep, cpu); + +- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; +- new->new[smp_processor_id()] = old; ++ new->cachep->array[cpu] = new->new[cpu]; ++ new->new[cpu] = old; ++} ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++static void do_ccupdate_local(void *info) ++{ ++ __do_ccupdate_local(info, smp_processor_id()); ++} ++#else ++static void do_ccupdate_local(void *info, int cpu) ++{ ++ spin_lock_irq(&per_cpu(slab_lock, cpu).lock); ++ __do_ccupdate_local(info, cpu); ++ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock); + } ++#endif + + /* Always called with the cache_chain_mutex held */ + static int do_tune_cpucache(struct kmem_cache *cachep, int limit, +@@ -3971,7 +4121,7 @@ static int do_tune_cpucache(struct kmem_ + } + new->cachep = cachep; + +- on_each_cpu(do_ccupdate_local, (void *)new, 1); ++ slab_on_each_cpu(do_ccupdate_local, (void *)new); + + check_irq_on(); + cachep->batchcount = batchcount; +@@ -3982,9 +4132,11 @@ static int do_tune_cpucache(struct kmem_ + struct array_cache *ccold = new->new[i]; + if (!ccold) + continue; +- spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); ++ local_spin_lock_irq(slab_lock, ++ &cachep->nodelists[cpu_to_mem(i)]->list_lock); + free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); +- spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); ++ ++ unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock); + kfree(ccold); + } + kfree(new); +@@ -4060,7 +4212,7 @@ static void drain_array(struct kmem_cach + if (ac->touched && !force) { + ac->touched = 0; + } else { +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + if (ac->avail) { + tofree = force ? ac->avail : (ac->limit + 4) / 5; + if (tofree > ac->avail) +@@ -4070,7 +4222,7 @@ static void drain_array(struct kmem_cach + memmove(ac->entry, &(ac->entry[tofree]), + sizeof(void *) * ac->avail); + } +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + } + } + +@@ -4209,7 +4361,7 @@ static int s_show(struct seq_file *m, vo + continue; + + check_irq_on(); +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + + list_for_each_entry(slabp, &l3->slabs_full, list) { + if (slabp->inuse != cachep->num && !error) +@@ -4234,7 +4386,7 @@ static int s_show(struct seq_file *m, vo + if (l3->shared) + shared_avail += l3->shared->avail; + +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + } + num_slabs += active_slabs; + num_objs = num_slabs * cachep->num; +@@ -4463,13 +4615,13 @@ static int leaks_show(struct seq_file *m + continue; + + check_irq_on(); +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + + list_for_each_entry(slabp, &l3->slabs_full, list) + handle_slab(n, cachep, slabp); + list_for_each_entry(slabp, &l3->slabs_partial, list) + handle_slab(n, cachep, slabp); +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + } + name = cachep->name; + if (n[0] == n[1]) { +Index: linux-2.6/kernel/lockdep.c +=================================================================== +--- linux-2.6.orig/kernel/lockdep.c ++++ linux-2.6/kernel/lockdep.c +@@ -2859,10 +2859,7 @@ static int mark_lock(struct task_struct + void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass) + { +- int i; +- +- for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) +- lock->class_cache[i] = NULL; ++ memset(lock, 0, sizeof(*lock)); + + #ifdef CONFIG_LOCK_STAT + lock->cpu = raw_smp_processor_id(); +@@ -3341,6 +3338,7 @@ static void check_flags(unsigned long fl + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * We dont accurately track softirq state in e.g. + * hardirq contexts (such as on 4KSTACKS), so only +@@ -3352,6 +3350,7 @@ static void check_flags(unsigned long fl + else + DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); + } ++#endif + + if (!debug_locks) + print_irqtrace_events(current); Index: linux-2.6/kernel/trace/ftrace.c =================================================================== --- linux-2.6.orig/kernel/trace/ftrace.c @@ -132,7 +1097,7 @@ Index: linux-2.6/drivers/block/floppy.c current_drive = 0; initialized = true; if (have_no_fdc) { -@@ -4368,7 +4368,7 @@ static int __init floppy_init(void) +@@ -4368,7 +4368,7 @@ out_unreg_blkdev: unregister_blkdev(FLOPPY_MAJOR, "fd"); out_put_disk: while (dr--) { @@ -301,7 +1266,7 @@ Index: linux-2.6/kernel/sched.c success = 1; /* we're going to change ->state */ cpu = task_cpu(p); -@@ -2735,40 +2754,6 @@ try_to_wake_up(struct task_struct *p, un +@@ -2735,40 +2754,6 @@ out: } /** @@ -424,7 +1389,7 @@ Index: linux-2.6/kernel/sched.c { struct task_struct *prev, *next; unsigned long *switch_count; -@@ -4272,29 +4273,6 @@ asmlinkage void __sched schedule(void) +@@ -4272,29 +4273,6 @@ need_resched: } else { deactivate_task(rq, prev, DEQUEUE_SLEEP); prev->on_rq = 0; @@ -454,7 +1419,7 @@ Index: linux-2.6/kernel/sched.c } switch_count = &prev->nvcsw; } -@@ -4328,12 +4306,62 @@ asmlinkage void __sched schedule(void) +@@ -4328,12 +4306,62 @@ need_resched: post_schedule(rq); @@ -682,7 +1647,7 @@ Index: linux-2.6/kernel/sched.c goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); -@@ -6072,6 +6127,101 @@ int set_cpus_allowed_ptr(struct task_str +@@ -6072,6 +6127,124 @@ out: } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); @@ -694,6 +1659,17 @@ Index: linux-2.6/kernel/sched.c + unsigned long flags; + struct rq *rq; + ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic++; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif ++ + preempt_disable(); + if (p->migrate_disable) { + p->migrate_disable++; @@ -742,6 +1718,16 @@ Index: linux-2.6/kernel/sched.c + unsigned long flags; + struct rq *rq; + ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic--; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif + WARN_ON_ONCE(p->migrate_disable <= 0); + + preempt_disable(); @@ -763,12 +1749,14 @@ Index: linux-2.6/kernel/sched.c + */ + rq = this_rq(); + raw_spin_lock_irqsave(&rq->lock, flags); -+ p->migrate_disable = 0; + mask = tsk_cpus_allowed(p); ++ p->migrate_disable = 0; + + WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); + + if (!cpumask_equal(&p->cpus_allowed, mask)) { ++ /* Get the mask now that migration is enabled */ ++ mask = tsk_cpus_allowed(p); + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->rt.nr_cpus_allowed = cpumask_weight(mask); @@ -784,7 +1772,7 @@ Index: linux-2.6/kernel/sched.c /* * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() -@@ -6100,7 +6250,7 @@ static int __migrate_task(struct task_st +@@ -6100,7 +6273,7 @@ static int __migrate_task(struct task_st if (task_cpu(p) != src_cpu) goto done; /* Affinity changed (again). */ @@ -793,7 +1781,7 @@ Index: linux-2.6/kernel/sched.c goto fail; /* -@@ -6142,6 +6292,8 @@ static int migration_cpu_stop(void *data +@@ -6142,6 +6315,8 @@ static int migration_cpu_stop(void *data #ifdef CONFIG_HOTPLUG_CPU @@ -802,7 +1790,7 @@ Index: linux-2.6/kernel/sched.c /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. -@@ -6154,7 +6306,12 @@ void idle_task_exit(void) +@@ -6154,7 +6329,12 @@ void idle_task_exit(void) if (mm != &init_mm) switch_mm(mm, &init_mm, current); @@ -816,7 +1804,7 @@ Index: linux-2.6/kernel/sched.c } /* -@@ -6472,6 +6629,12 @@ migration_call(struct notifier_block *nf +@@ -6472,6 +6652,12 @@ migration_call(struct notifier_block *nf migrate_nr_uninterruptible(rq); calc_global_load_remove(rq); break; @@ -829,7 +1817,7 @@ Index: linux-2.6/kernel/sched.c #endif } -@@ -8188,7 +8351,8 @@ void __init sched_init(void) +@@ -8188,7 +8374,8 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP static inline int preempt_count_equals(int preempt_offset) { @@ -879,7 +1867,7 @@ Index: linux-2.6/block/blk-core.c } } -@@ -2700,7 +2704,6 @@ static void flush_plug_callbacks(struct +@@ -2700,7 +2704,6 @@ static void flush_plug_callbacks(struct void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; @@ -1230,7 +2218,7 @@ Index: linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c unsigned slot, unsigned long idx) { int i; -@@ -421,10 +393,10 @@ static void amd_l3_disable_index(struct +@@ -421,10 +393,10 @@ static void amd_l3_disable_index(struct for (i = 0; i < 4; i++) { u32 reg = idx | (i << 20); @@ -1243,7 +2231,7 @@ Index: linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c /* * We need to WBINVD on a core on the node containing the L3 -@@ -434,7 +406,7 @@ static void amd_l3_disable_index(struct +@@ -434,7 +406,7 @@ static void amd_l3_disable_index(struct wbinvd_on_cpu(cpu); reg |= BIT(31); @@ -1252,7 +2240,7 @@ Index: linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c } } -@@ -448,24 +420,24 @@ static void amd_l3_disable_index(struct +@@ -448,24 +420,24 @@ static void amd_l3_disable_index(struct * * @return: 0 on success, error status on failure */ @@ -1534,7 +2522,7 @@ Index: linux-2.6/arch/mips/ar7/irq.c =================================================================== --- linux-2.6.orig/arch/mips/ar7/irq.c +++ linux-2.6/arch/mips/ar7/irq.c -@@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type +@@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type static struct irqaction ar7_cascade_action = { .handler = no_action, @@ -1665,7 +2653,7 @@ Index: linux-2.6/arch/mips/pnx8550/common/int.c =================================================================== --- linux-2.6.orig/arch/mips/pnx8550/common/int.c +++ linux-2.6/arch/mips/pnx8550/common/int.c -@@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = +@@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = static struct irqaction gic_action = { .handler = no_action, @@ -1820,6 +2808,79 @@ Index: linux-2.6/arch/arm/kernel/signal.c /* * If we were from a system call, check for system call restarting... */ +Index: linux-2.6/arch/arm/kernel/smp.c +=================================================================== +--- linux-2.6.orig/arch/arm/kernel/smp.c ++++ linux-2.6/arch/arm/kernel/smp.c +@@ -305,6 +305,18 @@ asmlinkage void __cpuinit secondary_star + * Enable local interrupts. + */ + notify_cpu_starting(cpu); ++ ++ /* ++ * OK, now it's safe to let the boot CPU continue. Wait for ++ * the CPU migration code to notice that the CPU is online ++ * before we continue. We need to do that before we enable ++ * interrupts otherwise a wakeup of a kernel thread affine to ++ * this CPU might break the affinity and let hell break lose. ++ */ ++ set_cpu_online(cpu, true); ++ while (!cpu_active(cpu)) ++ cpu_relax(); ++ + local_irq_enable(); + local_fiq_enable(); + +@@ -318,15 +330,6 @@ asmlinkage void __cpuinit secondary_star + smp_store_cpu_info(cpu); + + /* +- * OK, now it's safe to let the boot CPU continue. Wait for +- * the CPU migration code to notice that the CPU is online +- * before we continue. +- */ +- set_cpu_online(cpu, true); +- while (!cpu_active(cpu)) +- cpu_relax(); +- +- /* + * OK, it's off to the idle thread for us + */ + cpu_idle(); +@@ -531,7 +534,7 @@ static void percpu_timer_stop(void) + } + #endif + +-static DEFINE_SPINLOCK(stop_lock); ++static DEFINE_RAW_SPINLOCK(stop_lock); + + /* + * ipi_cpu_stop - handle IPI from smp_send_stop() +@@ -540,10 +543,10 @@ static void ipi_cpu_stop(unsigned int cp + { + if (system_state == SYSTEM_BOOTING || + system_state == SYSTEM_RUNNING) { +- spin_lock(&stop_lock); ++ raw_spin_lock(&stop_lock); + printk(KERN_CRIT "CPU%u: stopping\n", cpu); + dump_stack(); +- spin_unlock(&stop_lock); ++ raw_spin_unlock(&stop_lock); + } + + set_cpu_online(cpu, false); +Index: linux-2.6/arch/mips/kernel/i8259.c +=================================================================== +--- linux-2.6.orig/arch/mips/kernel/i8259.c ++++ linux-2.6/arch/mips/kernel/i8259.c +@@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi) + static struct irqaction irq2 = { + .handler = no_action, + .name = "cascade", ++ .flags = IRQF_NO_THREAD, + }; + + static struct resource pic1_io_resource = { Index: linux-2.6/kernel/time/clocksource.c =================================================================== --- linux-2.6.orig/kernel/time/clocksource.c @@ -2038,7 +3099,7 @@ Index: linux-2.6/kernel/rtmutex-debug.c static void printk_task(struct task_struct *p) { if (p) -@@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex +@@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex void rt_mutex_debug_task_free(struct task_struct *task) { @@ -2248,12 +3309,15 @@ Index: linux-2.6/include/linux/sched.h void *stack; atomic_t usage; unsigned int flags; /* per process flags, defined below */ -@@ -1255,14 +1260,14 @@ struct task_struct { +@@ -1255,14 +1260,17 @@ struct task_struct { #endif unsigned int policy; +#ifdef CONFIG_PREEMPT_RT_FULL + int migrate_disable; ++#ifdef CONFIG_SCHED_DEBUG ++ int migrate_disable_atomic; ++#endif +#endif cpumask_t cpus_allowed; @@ -2266,7 +3330,7 @@ Index: linux-2.6/include/linux/sched.h struct list_head rcu_node_entry; #endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TREE_PREEMPT_RCU -@@ -1356,6 +1361,9 @@ struct task_struct { +@@ -1356,6 +1364,9 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; @@ -2276,7 +3340,7 @@ Index: linux-2.6/include/linux/sched.h /* process credentials */ const struct cred __rcu *real_cred; /* objective and real subjective task -@@ -1389,6 +1397,7 @@ struct task_struct { +@@ -1389,6 +1400,7 @@ struct task_struct { /* signal handlers */ struct signal_struct *signal; struct sighand_struct *sighand; @@ -2284,7 +3348,7 @@ Index: linux-2.6/include/linux/sched.h sigset_t blocked, real_blocked; sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ -@@ -1432,6 +1441,9 @@ struct task_struct { +@@ -1432,6 +1444,9 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif @@ -2294,7 +3358,7 @@ Index: linux-2.6/include/linux/sched.h #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; -@@ -1558,6 +1570,12 @@ struct task_struct { +@@ -1558,6 +1573,12 @@ struct task_struct { unsigned long trace; /* bitmask and counter of trace recursion */ unsigned long trace_recursion; @@ -2307,7 +3371,7 @@ Index: linux-2.6/include/linux/sched.h #endif /* CONFIG_TRACING */ #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ struct memcg_batch_info { -@@ -1570,10 +1588,24 @@ struct task_struct { +@@ -1570,10 +1591,26 @@ struct task_struct { #ifdef CONFIG_HAVE_HW_BREAKPOINT atomic_t ptrace_bp_refcnt; #endif @@ -2323,18 +3387,20 @@ Index: linux-2.6/include/linux/sched.h -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } ++#else ++static inline bool cur_pf_disabled(void) { return false; } ++#endif ++ +static inline bool pagefault_disabled(void) +{ -+ return in_atomic() -+#ifdef CONFIG_PREEMPT_RT_FULL -+ || current->pagefault_disabled -+#endif -+ ; ++ return in_atomic() || cur_pf_disabled(); +} /* * Priority of a process goes from 0..MAX_PRIO-1, valid RT -@@ -1743,6 +1775,15 @@ extern struct pid *cad_pid; +@@ -1743,6 +1780,15 @@ extern struct pid *cad_pid; extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) @@ -2350,7 +3416,7 @@ Index: linux-2.6/include/linux/sched.h extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) -@@ -1750,6 +1791,7 @@ static inline void put_task_struct(struc +@@ -1750,6 +1796,7 @@ static inline void put_task_struct(struc if (atomic_dec_and_test(&t->usage)) __put_task_struct(t); } @@ -2358,7 +3424,7 @@ Index: linux-2.6/include/linux/sched.h extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); -@@ -1774,6 +1816,7 @@ extern void thread_group_times(struct ta +@@ -1774,6 +1821,7 @@ extern void thread_group_times(struct ta #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ #define PF_KSWAPD 0x00040000 /* I am kswapd */ @@ -2366,7 +3432,7 @@ Index: linux-2.6/include/linux/sched.h #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ -@@ -2022,15 +2065,27 @@ static inline void sched_autogroup_exit( +@@ -2022,15 +2070,27 @@ static inline void sched_autogroup_exit( #endif #ifdef CONFIG_RT_MUTEXES @@ -2395,7 +3461,7 @@ Index: linux-2.6/include/linux/sched.h #endif extern bool yield_to(struct task_struct *p, bool preempt); -@@ -2110,6 +2165,7 @@ extern void xtime_update(unsigned long t +@@ -2110,6 +2170,7 @@ extern void xtime_update(unsigned long t extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); @@ -2403,7 +3469,7 @@ Index: linux-2.6/include/linux/sched.h extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); -@@ -2199,12 +2255,24 @@ extern struct mm_struct * mm_alloc(void) +@@ -2199,12 +2260,24 @@ extern struct mm_struct * mm_alloc(void) /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); @@ -2428,7 +3494,7 @@ Index: linux-2.6/include/linux/sched.h /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ -@@ -2510,7 +2578,7 @@ extern int _cond_resched(void); +@@ -2510,7 +2583,7 @@ extern int _cond_resched(void); extern int __cond_resched_lock(spinlock_t *lock); @@ -2437,7 +3503,7 @@ Index: linux-2.6/include/linux/sched.h #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET #else #define PREEMPT_LOCK_OFFSET 0 -@@ -2521,12 +2589,16 @@ extern int __cond_resched_lock(spinlock_ +@@ -2521,12 +2594,16 @@ extern int __cond_resched_lock(spinlock_ __cond_resched_lock(lock); \ }) @@ -2454,7 +3520,7 @@ Index: linux-2.6/include/linux/sched.h /* * Does a critical section need to be broken due to another -@@ -2550,7 +2622,7 @@ void thread_group_cputimer(struct task_s +@@ -2550,7 +2627,7 @@ void thread_group_cputimer(struct task_s static inline void thread_group_cputime_init(struct signal_struct *sig) { @@ -2463,7 +3529,7 @@ Index: linux-2.6/include/linux/sched.h } /* -@@ -2589,6 +2661,26 @@ static inline void set_task_cpu(struct t +@@ -2589,6 +2666,26 @@ static inline void set_task_cpu(struct t #endif /* CONFIG_SMP */ @@ -2879,7 +3945,7 @@ Index: linux-2.6/arch/x86/kernel/process_32.c } } -@@ -348,6 +347,41 @@ __switch_to(struct task_struct *prev_p, +@@ -348,6 +347,41 @@ __switch_to(struct task_struct *prev_p, task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); @@ -2986,7 +4052,7 @@ Index: linux-2.6/kernel/mutex.c =================================================================== --- linux-2.6.orig/kernel/mutex.c +++ linux-2.6/kernel/mutex.c -@@ -240,9 +240,7 @@ __mutex_lock_common(struct mutex *lock, +@@ -240,9 +240,7 @@ __mutex_lock_common(struct mutex *lock, /* didn't get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); @@ -3130,7 +4196,7 @@ Index: linux-2.6/kernel/softirq.c lockdep_softirq_enter(); cpu = smp_processor_id(); -@@ -223,36 +313,7 @@ asmlinkage void __do_softirq(void) +@@ -223,36 +313,7 @@ restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); @@ -3168,7 +4234,7 @@ Index: linux-2.6/kernel/softirq.c pending = local_softirq_pending(); if (pending && --max_restart) -@@ -267,6 +328,26 @@ asmlinkage void __do_softirq(void) +@@ -267,6 +328,26 @@ restart: __local_bh_enable(SOFTIRQ_OFFSET); } @@ -3585,7 +4651,7 @@ Index: linux-2.6/kernel/kprobes.c } return 0; } -@@ -1708,7 +1708,7 @@ int __kprobes register_kretprobe(struct +@@ -1708,7 +1708,7 @@ int __kprobes register_kretprobe(struct rp->maxactive = num_possible_cpus(); #endif } @@ -3701,7 +4767,7 @@ Index: linux-2.6/kernel/cgroup.c static void cgroup_release_agent(struct work_struct *work); static DECLARE_WORK(release_agent_work, cgroup_release_agent); static void check_for_release(struct cgroup *cgrp); -@@ -4010,11 +4010,11 @@ static int cgroup_rmdir(struct inode *un +@@ -4010,11 +4010,11 @@ again: finish_wait(&cgroup_rmdir_waitq, &wait); clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); @@ -3731,7 +4797,7 @@ Index: linux-2.6/kernel/cgroup.c if (need_schedule_work) schedule_work(&release_agent_work); } -@@ -4725,7 +4725,7 @@ static void cgroup_release_agent(struct +@@ -4725,7 +4725,7 @@ static void cgroup_release_agent(struct { BUG_ON(work != &release_agent_work); mutex_lock(&cgroup_mutex); @@ -3740,7 +4806,7 @@ Index: linux-2.6/kernel/cgroup.c while (!list_empty(&release_list)) { char *argv[3], *envp[3]; int i; -@@ -4734,7 +4734,7 @@ static void cgroup_release_agent(struct +@@ -4734,7 +4734,7 @@ static void cgroup_release_agent(struct struct cgroup, release_list); list_del_init(&cgrp->release_list); @@ -3749,7 +4815,7 @@ Index: linux-2.6/kernel/cgroup.c pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!pathbuf) goto continue_free; -@@ -4764,9 +4764,9 @@ static void cgroup_release_agent(struct +@@ -4764,9 +4764,9 @@ static void cgroup_release_agent(struct continue_free: kfree(pathbuf); kfree(agentbuf); @@ -4068,7 +5134,36 @@ Index: linux-2.6/kernel/trace/trace.c /** * trace_wake_up - wake up tasks waiting for trace input -@@ -958,7 +958,7 @@ void tracing_start(void) +@@ -351,6 +351,7 @@ static DEFINE_SPINLOCK(tracing_start_loc + */ + void trace_wake_up(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + int cpu; + + if (trace_flags & TRACE_ITER_BLOCK) +@@ -363,6 +364,7 @@ void trace_wake_up(void) + if (!runqueue_is_locked(cpu)) + wake_up(&trace_wait); + put_cpu(); ++#endif + } + + static int __init set_buf_size(char *str) +@@ -716,6 +718,12 @@ update_max_tr_single(struct trace_array + } + #endif /* CONFIG_TRACER_MAX_TRACE */ + ++#ifndef CONFIG_PREEMPT_RT_FULL ++static void default_wait_pipe(struct trace_iterator *iter); ++#else ++#define default_wait_pipe poll_wait_pipe ++#endif ++ + /** + * register_tracer - register a tracer with the ftrace system. + * @type - the plugin for the tracer +@@ -958,7 +966,7 @@ void tracing_start(void) if (tracing_disabled) return; @@ -4077,7 +5172,7 @@ Index: linux-2.6/kernel/trace/trace.c if (--trace_stop_count) { if (trace_stop_count < 0) { /* Someone screwed up their debugging */ -@@ -983,7 +983,7 @@ void tracing_start(void) +@@ -983,7 +991,7 @@ void tracing_start(void) ftrace_start(); out: @@ -4086,7 +5181,7 @@ Index: linux-2.6/kernel/trace/trace.c } /** -@@ -998,7 +998,7 @@ void tracing_stop(void) +@@ -998,7 +1006,7 @@ void tracing_stop(void) unsigned long flags; ftrace_stop(); @@ -4095,7 +5190,7 @@ Index: linux-2.6/kernel/trace/trace.c if (trace_stop_count++) goto out; -@@ -1016,7 +1016,7 @@ void tracing_stop(void) +@@ -1016,7 +1024,7 @@ void tracing_stop(void) arch_spin_unlock(&ftrace_max_lock); out: @@ -4104,7 +5199,7 @@ Index: linux-2.6/kernel/trace/trace.c } void trace_stop_cmdline_recording(void); -@@ -1120,6 +1120,8 @@ tracing_generic_entry_update(struct trac +@@ -1120,6 +1128,8 @@ tracing_generic_entry_update(struct trac ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); @@ -4113,7 +5208,7 @@ Index: linux-2.6/kernel/trace/trace.c } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); -@@ -1757,9 +1759,10 @@ static void print_lat_help_header(struct +@@ -1757,9 +1767,10 @@ static void print_lat_help_header(struct seq_puts(m, "# | / _----=> need-resched \n"); seq_puts(m, "# || / _---=> hardirq/softirq \n"); seq_puts(m, "# ||| / _--=> preempt-depth \n"); @@ -4127,6 +5222,45 @@ Index: linux-2.6/kernel/trace/trace.c } static void print_func_help_header(struct seq_file *m) +@@ -3067,6 +3078,7 @@ static int tracing_release_pipe(struct i + return 0; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static unsigned int + tracing_poll_pipe(struct file *filp, poll_table *poll_table) + { +@@ -3088,8 +3100,7 @@ tracing_poll_pipe(struct file *filp, pol + } + } + +- +-void default_wait_pipe(struct trace_iterator *iter) ++static void default_wait_pipe(struct trace_iterator *iter) + { + DEFINE_WAIT(wait); + +@@ -3100,6 +3111,20 @@ void default_wait_pipe(struct trace_iter + + finish_wait(&trace_wait, &wait); + } ++#else ++static unsigned int ++tracing_poll_pipe(struct file *filp, poll_table *poll_table) ++{ ++ struct trace_iterator *iter = filp->private_data; ++ ++ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter)) ++ return POLLIN | POLLRDNORM; ++ poll_wait_pipe(iter); ++ if (!trace_empty(iter)) ++ return POLLIN | POLLRDNORM; ++ return 0; ++} ++#endif + + /* + * This is a make-shift waitqueue. Index: linux-2.6/kernel/trace/trace_irqsoff.c =================================================================== --- linux-2.6.orig/kernel/trace/trace_irqsoff.c @@ -4217,7 +5351,7 @@ Index: linux-2.6/kernel/trace/trace_irqsoff.c if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, caller_addr); } -@@ -494,6 +502,7 @@ void trace_hardirqs_off_caller(unsigned +@@ -494,6 +502,7 @@ void trace_hardirqs_off_caller(unsigned { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, caller_addr); @@ -4372,7 +5506,7 @@ Index: linux-2.6/kernel/printk.c for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; -@@ -517,7 +511,54 @@ static void __call_console_drivers(unsig +@@ -517,8 +511,62 @@ static void __call_console_drivers(unsig (con->flags & CON_ANYTIME))) con->write(con, &LOG_BUF(start), end - start); } @@ -4406,28 +5540,36 @@ Index: linux-2.6/kernel/printk.c + */ +static int __read_mostly printk_killswitch; + ++static int __init force_early_printk_setup(char *str) ++{ ++ printk_killswitch = 1; ++ return 0; ++} ++early_param("force_early_printk", force_early_printk_setup); ++ +void printk_kill(void) +{ + printk_killswitch = 1; -+} -+ + } + +static int forced_early_printk(const char *fmt, va_list ap) +{ + if (!printk_killswitch) + return 0; + early_vprintk(fmt, ap); + return 1; - } ++} +#else +static inline int forced_early_printk(const char *fmt, va_list ap) +{ + return 0; +} +#endif - ++ static int __read_mostly ignore_loglevel; -@@ -687,7 +728,7 @@ static void zap_locks(void) + static int __init ignore_loglevel_setup(char *str) +@@ -687,7 +735,7 @@ static void zap_locks(void) oops_timestamp = jiffies; /* If a crash is occurring, make sure we can't deadlock */ @@ -4436,7 +5578,7 @@ Index: linux-2.6/kernel/printk.c /* And make sure that we print immediately */ sema_init(&console_sem, 1); } -@@ -779,12 +820,18 @@ static inline int can_use_console(unsign +@@ -779,12 +827,18 @@ static inline int can_use_console(unsign * interrupts disabled. It should return with 'lockbuf_lock' * released but interrupts still disabled. */ @@ -4457,7 +5599,7 @@ Index: linux-2.6/kernel/printk.c retval = 1; /* -@@ -800,7 +847,7 @@ static int console_trylock_for_printk(un +@@ -800,7 +854,7 @@ static int console_trylock_for_printk(un } } printk_cpu = UINT_MAX; @@ -4466,7 +5608,7 @@ Index: linux-2.6/kernel/printk.c return retval; } static const char recursion_bug_msg [] = -@@ -833,6 +880,13 @@ asmlinkage int vprintk(const char *fmt, +@@ -833,6 +887,13 @@ asmlinkage int vprintk(const char *fmt, size_t plen; char special; @@ -4480,7 +5622,7 @@ Index: linux-2.6/kernel/printk.c boot_delay_msec(); printk_delay(); -@@ -860,7 +914,7 @@ asmlinkage int vprintk(const char *fmt, +@@ -860,7 +921,7 @@ asmlinkage int vprintk(const char *fmt, } lockdep_off(); @@ -4489,7 +5631,7 @@ Index: linux-2.6/kernel/printk.c printk_cpu = this_cpu; if (recursion_bug) { -@@ -953,8 +1007,15 @@ asmlinkage int vprintk(const char *fmt, +@@ -953,8 +1014,15 @@ asmlinkage int vprintk(const char *fmt, * will release 'logbuf_lock' regardless of whether it * actually gets the semaphore or not. */ @@ -4506,7 +5648,7 @@ Index: linux-2.6/kernel/printk.c lockdep_on(); out_restore_irqs: -@@ -1252,18 +1313,23 @@ void console_unlock(void) +@@ -1252,18 +1320,23 @@ void console_unlock(void) console_may_schedule = 0; for ( ; ; ) { @@ -4532,7 +5674,7 @@ Index: linux-2.6/kernel/printk.c } console_locked = 0; -@@ -1272,7 +1338,7 @@ void console_unlock(void) +@@ -1272,7 +1345,7 @@ void console_unlock(void) exclusive_console = NULL; up(&console_sem); @@ -4541,7 +5683,7 @@ Index: linux-2.6/kernel/printk.c if (wake_klogd) wake_up_klogd(); } -@@ -1502,9 +1568,9 @@ void register_console(struct console *ne +@@ -1502,9 +1575,9 @@ void register_console(struct console *ne * console_unlock(); will print out the buffered messages * for us. */ @@ -4553,7 +5695,7 @@ Index: linux-2.6/kernel/printk.c /* * We're about to replay the log buffer. Only do this to the * just-registered console to avoid excessive message spam to -@@ -1711,10 +1777,10 @@ void kmsg_dump(enum kmsg_dump_reason rea +@@ -1711,10 +1784,10 @@ void kmsg_dump(enum kmsg_dump_reason rea /* Theoretically, the log could move on after we do this, but there's not a lot we can do about that. The new messages will overwrite the start of what we dump. */ @@ -4570,7 +5712,7 @@ Index: linux-2.6/lib/ratelimit.c =================================================================== --- linux-2.6.orig/lib/ratelimit.c +++ linux-2.6/lib/ratelimit.c -@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state +@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state * in addition to the one that will be printed by * the entity that is holding the lock already: */ @@ -4579,7 +5721,7 @@ Index: linux-2.6/lib/ratelimit.c return 0; if (!rs->begin) -@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state +@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state rs->missed++; ret = 0; } @@ -4975,7 +6117,7 @@ Index: linux-2.6/kernel/semaphore.c } EXPORT_SYMBOL(down); -@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore +@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore unsigned long flags; int result = 0; @@ -5019,7 +6161,7 @@ Index: linux-2.6/kernel/semaphore.c return (count < 0); } -@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, +@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, unsigned long flags; int result = 0; @@ -5515,7 +6657,7 @@ Index: linux-2.6/lib/rwsem-spinlock.c goto out; } -@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct +@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ @@ -5524,7 +6666,7 @@ Index: linux-2.6/lib/rwsem-spinlock.c /* wait to be given the lock */ for (;;) { -@@ -247,7 +247,7 @@ void __sched __down_write_nested(struct +@@ -247,7 +247,7 @@ void __sched __down_write_nested(struct ; } @@ -6077,7 +7219,7 @@ Index: linux-2.6/drivers/oprofile/oprofilefs.c static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) { -@@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned +@@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned if (copy_from_user(tmpbuf, buf, count)) return -EFAULT; @@ -6236,7 +7378,7 @@ Index: linux-2.6/arch/powerpc/sysdev/uic.c } static void uic_ack_irq(struct irq_data *d) -@@ -91,9 +91,9 @@ static void uic_ack_irq(struct irq_data +@@ -91,9 +91,9 @@ static void uic_ack_irq(struct irq_data unsigned int src = irqd_to_hwirq(d); unsigned long flags; @@ -6555,7 +7697,7 @@ Index: linux-2.6/arch/arm/common/gic.c } writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); -@@ -135,7 +135,7 @@ static int gic_set_type(struct irq_data +@@ -135,7 +135,7 @@ static int gic_set_type(struct irq_data if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) return -EINVAL; @@ -6564,7 +7706,7 @@ Index: linux-2.6/arch/arm/common/gic.c if (gic_arch_extn.irq_set_type) gic_arch_extn.irq_set_type(d, type); -@@ -160,7 +160,7 @@ static int gic_set_type(struct irq_data +@@ -160,7 +160,7 @@ static int gic_set_type(struct irq_data if (enabled) writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); @@ -6660,32 +7802,6 @@ Index: linux-2.6/arch/arm/kernel/dma.c EXPORT_SYMBOL(dma_spin_lock); static dma_t *dma_chan[MAX_DMA_CHANNELS]; -Index: linux-2.6/arch/arm/kernel/smp.c -=================================================================== ---- linux-2.6.orig/arch/arm/kernel/smp.c -+++ linux-2.6/arch/arm/kernel/smp.c -@@ -531,7 +531,7 @@ static void percpu_timer_stop(void) - } - #endif - --static DEFINE_SPINLOCK(stop_lock); -+static DEFINE_RAW_SPINLOCK(stop_lock); - - /* - * ipi_cpu_stop - handle IPI from smp_send_stop() -@@ -540,10 +540,10 @@ static void ipi_cpu_stop(unsigned int cp - { - if (system_state == SYSTEM_BOOTING || - system_state == SYSTEM_RUNNING) { -- spin_lock(&stop_lock); -+ raw_spin_lock(&stop_lock); - printk(KERN_CRIT "CPU%u: stopping\n", cpu); - dump_stack(); -- spin_unlock(&stop_lock); -+ raw_spin_unlock(&stop_lock); - } - - set_cpu_online(cpu, false); Index: linux-2.6/arch/arm/kernel/traps.c =================================================================== --- linux-2.6.orig/arch/arm/kernel/traps.c @@ -7010,7 +8126,7 @@ Index: linux-2.6/arch/arm/mach-ixp4xx/common-pci.c return retval; } -@@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, +@@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, unsigned long flags; int retval = 0; @@ -7019,7 +8135,7 @@ Index: linux-2.6/arch/arm/mach-ixp4xx/common-pci.c *PCI_NP_AD = addr; -@@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, +@@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, if(check_master_abort()) retval = 1; @@ -7028,7 +8144,7 @@ Index: linux-2.6/arch/arm/mach-ixp4xx/common-pci.c return retval; } -@@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, +@@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, unsigned long flags; int retval = 0; @@ -7037,7 +8153,7 @@ Index: linux-2.6/arch/arm/mach-ixp4xx/common-pci.c *PCI_NP_AD = addr; -@@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, +@@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, if(check_master_abort()) retval = 1; @@ -7466,7 +8582,7 @@ Index: linux-2.6/drivers/dma/ipu/ipu_irq.c } static void ipu_irq_ack(struct irq_data *d) -@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data +@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data struct ipu_irq_bank *bank; unsigned long lock_flags; @@ -7613,7 +8729,7 @@ Index: linux-2.6/drivers/pci/dmar.c } index = qi->free_head; -@@ -965,15 +965,15 @@ int qi_submit_sync(struct qi_desc *desc, +@@ -965,15 +965,15 @@ restart: if (rc) break; @@ -7632,7 +8748,7 @@ Index: linux-2.6/drivers/pci/dmar.c if (rc == -EAGAIN) goto restart; -@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu +@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu if (!ecap_qis(iommu->ecap)) return; @@ -7641,7 +8757,7 @@ Index: linux-2.6/drivers/pci/dmar.c sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); if (!(sts & DMA_GSTS_QIES)) -@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu +@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, !(sts & DMA_GSTS_QIES), sts); end: @@ -7781,7 +8897,7 @@ Index: linux-2.6/drivers/pci/intel-iommu.c =================================================================== --- linux-2.6.orig/drivers/pci/intel-iommu.c +++ linux-2.6/drivers/pci/intel-iommu.c -@@ -933,7 +933,7 @@ static void iommu_set_root_entry(struct +@@ -933,7 +933,7 @@ static void iommu_set_root_entry(struct addr = iommu->root_entry; @@ -7790,7 +8906,7 @@ Index: linux-2.6/drivers/pci/intel-iommu.c dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); -@@ -942,7 +942,7 @@ static void iommu_set_root_entry(struct +@@ -942,7 +942,7 @@ static void iommu_set_root_entry(struct IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_RTPS), sts); @@ -7996,7 +9112,7 @@ Index: linux-2.6/drivers/pci/intr_remapping.c return index; } -@@ -153,10 +153,10 @@ int map_irq_to_irte_handle(int irq, u16 +@@ -153,10 +153,10 @@ int map_irq_to_irte_handle(int irq, u16 if (!irq_iommu) return -1; @@ -8138,6 +9254,156 @@ Index: linux-2.6/include/linux/intel-iommu.h int seq_id; /* sequence id of the iommu */ int agaw; /* agaw of this iommu */ int msagaw; /* max sagaw of this iommu */ +Index: linux-2.6/lib/atomic64.c +=================================================================== +--- linux-2.6.orig/lib/atomic64.c ++++ linux-2.6/lib/atomic64.c +@@ -29,7 +29,7 @@ + * Ensure each lock is in a separate cacheline. + */ + static union { +- spinlock_t lock; ++ raw_spinlock_t lock; + char pad[L1_CACHE_BYTES]; + } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; + +@@ -48,9 +48,9 @@ long long atomic64_read(const atomic64_t + spinlock_t *lock = lock_addr(v); + long long val; + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + val = v->counter; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + return val; + } + EXPORT_SYMBOL(atomic64_read); +@@ -60,9 +60,9 @@ void atomic64_set(atomic64_t *v, long lo + unsigned long flags; + spinlock_t *lock = lock_addr(v); + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + v->counter = i; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + } + EXPORT_SYMBOL(atomic64_set); + +@@ -71,9 +71,9 @@ void atomic64_add(long long a, atomic64_ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + v->counter += a; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + } + EXPORT_SYMBOL(atomic64_add); + +@@ -83,9 +83,9 @@ long long atomic64_add_return(long long + spinlock_t *lock = lock_addr(v); + long long val; + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + val = v->counter += a; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + return val; + } + EXPORT_SYMBOL(atomic64_add_return); +@@ -95,9 +95,9 @@ void atomic64_sub(long long a, atomic64_ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + v->counter -= a; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + } + EXPORT_SYMBOL(atomic64_sub); + +@@ -107,9 +107,9 @@ long long atomic64_sub_return(long long + spinlock_t *lock = lock_addr(v); + long long val; + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + val = v->counter -= a; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + return val; + } + EXPORT_SYMBOL(atomic64_sub_return); +@@ -120,11 +120,11 @@ long long atomic64_dec_if_positive(atomi + spinlock_t *lock = lock_addr(v); + long long val; + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + val = v->counter - 1; + if (val >= 0) + v->counter = val; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + return val; + } + EXPORT_SYMBOL(atomic64_dec_if_positive); +@@ -135,11 +135,11 @@ long long atomic64_cmpxchg(atomic64_t *v + spinlock_t *lock = lock_addr(v); + long long val; + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + val = v->counter; + if (val == o) + v->counter = n; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + return val; + } + EXPORT_SYMBOL(atomic64_cmpxchg); +@@ -150,10 +150,10 @@ long long atomic64_xchg(atomic64_t *v, l + spinlock_t *lock = lock_addr(v); + long long val; + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + val = v->counter; + v->counter = new; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + return val; + } + EXPORT_SYMBOL(atomic64_xchg); +@@ -164,12 +164,12 @@ int atomic64_add_unless(atomic64_t *v, l + spinlock_t *lock = lock_addr(v); + int ret = 0; + +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + if (v->counter != u) { + v->counter += a; + ret = 1; + } +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + return ret; + } + EXPORT_SYMBOL(atomic64_add_unless); +@@ -179,7 +179,7 @@ static int init_atomic64_lock(void) + int i; + + for (i = 0; i < NR_LOCKS; ++i) +- spin_lock_init(&atomic64_lock[i].lock); ++ raw_spin_lock_init(&atomic64_lock[i].lock); + return 0; + } + Index: linux-2.6/kernel/signal.c =================================================================== --- linux-2.6.orig/kernel/signal.c @@ -8373,7 +9639,7 @@ Index: linux-2.6/kernel/posix-timers.c /* Set a POSIX.1b interval timer. */ /* timr->it_lock is taken. */ static int -@@ -841,6 +857,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, +@@ -841,6 +857,7 @@ retry: if (!timr) return -EINVAL; @@ -8381,7 +9647,7 @@ Index: linux-2.6/kernel/posix-timers.c kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; -@@ -849,9 +866,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t, +@@ -849,9 +866,12 @@ retry: unlock_timer(timr, flag); if (error == TIMER_RETRY) { @@ -8394,7 +9660,7 @@ Index: linux-2.6/kernel/posix-timers.c if (old_setting && !error && copy_to_user(old_setting, &old_spec, sizeof (old_spec))) -@@ -889,10 +909,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t +@@ -889,10 +909,15 @@ retry_delete: if (!timer) return -EINVAL; @@ -8660,7 +9926,7 @@ Index: linux-2.6/drivers/char/random.c .pool = nonblocking_pool_data }; -@@ -633,8 +633,11 @@ static void add_timer_randomness(struct +@@ -633,8 +633,11 @@ static void add_timer_randomness(struct preempt_disable(); /* if over the trickle threshold, use only 1 in 4096 samples */ if (input_pool.entropy_count > trickle_thresh && @@ -8674,7 +9940,7 @@ Index: linux-2.6/drivers/char/random.c sample.jiffies = jiffies; sample.cycles = get_cycles(); -@@ -676,8 +679,6 @@ static void add_timer_randomness(struct +@@ -676,8 +679,6 @@ static void add_timer_randomness(struct credit_entropy_bits(&input_pool, min_t(int, fls(delta>>1), 11)); } @@ -8687,7 +9953,7 @@ Index: linux-2.6/fs/ioprio.c =================================================================== --- linux-2.6.orig/fs/ioprio.c +++ linux-2.6/fs/ioprio.c -@@ -226,6 +226,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, +@@ -226,6 +226,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, if (!user) break; @@ -8695,7 +9961,7 @@ Index: linux-2.6/fs/ioprio.c do_each_thread(g, p) { if (__task_cred(p)->uid != user->uid) continue; -@@ -237,6 +238,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, +@@ -237,6 +238,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, else ret = ioprio_best(ret, tmpio); } while_each_thread(g, p); @@ -8814,7 +10080,7 @@ Index: linux-2.6/drivers/clocksource/tcb_clksrc.c __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); -@@ -152,8 +146,12 @@ static struct tc_clkevt_device clkevt = +@@ -152,8 +146,12 @@ static struct tc_clkevt_device clkevt = .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, @@ -9072,13 +10338,13 @@ Index: linux-2.6/include/linux/preempt.h +# define preempt_enable_rt() preempt_enable() +# define preempt_disable_nort() do { } while (0) +# define preempt_enable_nort() do { } while (0) -+#ifdef CONFIG_SMP -+extern void migrate_disable(void); -+extern void migrate_enable(void); -+#else /* CONFIG_SMP */ -+# define migrate_disable() do { } while (0) -+# define migrate_enable() do { } while (0) -+#endif /* CONFIG_SMP */ ++# ifdef CONFIG_SMP ++ extern void migrate_disable(void); ++ extern void migrate_enable(void); ++# else /* CONFIG_SMP */ ++# define migrate_disable() do { } while (0) ++# define migrate_enable() do { } while (0) ++# endif /* CONFIG_SMP */ +#else +# define preempt_disable_rt() do { } while (0) +# define preempt_enable_rt() do { } while (0) @@ -9164,95 +10430,6 @@ Index: linux-2.6/include/linux/uaccess.h set_fs(old_fs); \ ret; \ }) -Index: linux-2.6/mm/memory.c -=================================================================== ---- linux-2.6.orig/mm/memory.c -+++ linux-2.6/mm/memory.c -@@ -1290,10 +1290,13 @@ static unsigned long unmap_page_range(st - return addr; - } - --#ifdef CONFIG_PREEMPT -+#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT_FULL) - # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) - #else --/* No preempt: go for improved straight-line efficiency */ -+/* -+ * No preempt: go for improved straight-line efficiency -+ * on PREEMPT_RT this is not a critical latency-path. -+ */ - # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) - #endif - -@@ -3435,6 +3438,32 @@ int handle_pte_fault(struct mm_struct *m - return 0; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+void pagefault_disable(void) -+{ -+ migrate_disable(); -+ current->pagefault_disabled++; -+ /* -+ * make sure to have issued the store before a pagefault -+ * can hit. -+ */ -+ barrier(); -+} -+EXPORT_SYMBOL_GPL(pagefault_disable); -+ -+void pagefault_enable(void) -+{ -+ /* -+ * make sure to issue those last loads/stores before enabling -+ * the pagefault handler again. -+ */ -+ barrier(); -+ current->pagefault_disabled--; -+ migrate_enable(); -+} -+EXPORT_SYMBOL_GPL(pagefault_enable); -+#endif -+ - /* - * By the time we get here, we already hold the mm semaphore - */ -@@ -3983,3 +4012,35 @@ void copy_user_huge_page(struct page *ds - } - } - #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ -+ -+#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0) -+/* -+ * Heinous hack, relies on the caller doing something like: -+ * -+ * pte = alloc_pages(PGALLOC_GFP, 0); -+ * if (pte) -+ * pgtable_page_ctor(pte); -+ * return pte; -+ * -+ * This ensures we release the page and return NULL when the -+ * lock allocation fails. -+ */ -+struct page *pte_lock_init(struct page *page) -+{ -+ page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL); -+ if (page->ptl) { -+ spin_lock_init(__pte_lockptr(page)); -+ } else { -+ __free_page(page); -+ page = NULL; -+ } -+ return page; -+} -+ -+void pte_lock_deinit(struct page *page) -+{ -+ kfree(page->ptl); -+ page->mapping = NULL; -+} -+ -+#endif Index: linux-2.6/arch/alpha/mm/fault.c =================================================================== --- linux-2.6.orig/arch/alpha/mm/fault.c @@ -9348,7 +10525,7 @@ Index: linux-2.6/arch/m68k/mm/fault.c =================================================================== --- linux-2.6.orig/arch/m68k/mm/fault.c +++ linux-2.6/arch/m68k/mm/fault.c -@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, +@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, * If we're in an interrupt or have no user * context, we must not take the fault.. */ @@ -9436,7 +10613,7 @@ Index: linux-2.6/arch/s390/mm/fault.c goto out; address = trans_exc_code & __FAIL_ADDR_MASK; -@@ -410,7 +411,8 @@ void __kprobes do_asce_exception(struct +@@ -410,7 +411,8 @@ void __kprobes do_asce_exception(struct struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -9698,7 +10875,7 @@ Index: linux-2.6/drivers/of/base.c for (pp = np->properties; pp != 0; pp = pp->next) { if (of_prop_cmp(pp->name, name) == 0) { if (lenp != 0) -@@ -155,7 +153,20 @@ struct property *of_find_property(const +@@ -155,7 +153,20 @@ struct property *of_find_property(const break; } } @@ -10317,7 +11494,7 @@ Index: linux-2.6/mm/page_alloc.c } static bool free_pages_prepare(struct page *page, unsigned int order) -@@ -682,13 +718,13 @@ static void __free_pages_ok(struct page +@@ -682,13 +718,13 @@ static void __free_pages_ok(struct page if (!free_pages_prepare(page, order)) return; @@ -10427,7 +11604,7 @@ Index: linux-2.6/mm/page_alloc.c } /* -@@ -1301,7 +1358,7 @@ struct page *buffered_rmqueue(struct zon +@@ -1301,7 +1358,7 @@ again: struct per_cpu_pages *pcp; struct list_head *list; @@ -10436,7 +11613,7 @@ Index: linux-2.6/mm/page_alloc.c pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { -@@ -1333,17 +1390,19 @@ struct page *buffered_rmqueue(struct zon +@@ -1333,17 +1390,19 @@ again: */ WARN_ON_ONCE(order > 1); } @@ -10460,7 +11637,7 @@ Index: linux-2.6/mm/page_alloc.c VM_BUG_ON(bad_range(zone, page)); if (prep_new_page(page, order, gfp_flags)) -@@ -1351,7 +1410,7 @@ struct page *buffered_rmqueue(struct zon +@@ -1351,7 +1410,7 @@ again: return page; failed: @@ -10500,7 +11677,7 @@ Index: linux-2.6/mm/page_alloc.c } return 0; } -@@ -4972,6 +5033,7 @@ static int page_alloc_cpu_notify(struct +@@ -4972,6 +5033,7 @@ static int page_alloc_cpu_notify(struct void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); @@ -10508,707 +11685,6 @@ Index: linux-2.6/mm/page_alloc.c } /* -Index: linux-2.6/mm/slab.c -=================================================================== ---- linux-2.6.orig/mm/slab.c -+++ linux-2.6/mm/slab.c -@@ -116,6 +116,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -620,6 +621,51 @@ int slab_is_available(void) - static struct lock_class_key on_slab_l3_key; - static struct lock_class_key on_slab_alc_key; - -+static struct lock_class_key debugobj_l3_key; -+static struct lock_class_key debugobj_alc_key; -+ -+static void slab_set_lock_classes(struct kmem_cache *cachep, -+ struct lock_class_key *l3_key, struct lock_class_key *alc_key, -+ int q) -+{ -+ struct array_cache **alc; -+ struct kmem_list3 *l3; -+ int r; -+ -+ l3 = cachep->nodelists[q]; -+ if (!l3) -+ return; -+ -+ lockdep_set_class(&l3->list_lock, l3_key); -+ alc = l3->alien; -+ /* -+ * FIXME: This check for BAD_ALIEN_MAGIC -+ * should go away when common slab code is taught to -+ * work even without alien caches. -+ * Currently, non NUMA code returns BAD_ALIEN_MAGIC -+ * for alloc_alien_cache, -+ */ -+ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) -+ return; -+ for_each_node(r) { -+ if (alc[r]) -+ lockdep_set_class(&alc[r]->lock, alc_key); -+ } -+} -+ -+static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) -+{ -+ slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node); -+} -+ -+static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) -+{ -+ int node; -+ -+ for_each_online_node(node) -+ slab_set_debugobj_lock_classes_node(cachep, node); -+} -+ - static void init_node_lock_keys(int q) - { - struct cache_sizes *s = malloc_sizes; -@@ -628,29 +674,14 @@ static void init_node_lock_keys(int q) - return; - - for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { -- struct array_cache **alc; - struct kmem_list3 *l3; -- int r; - - l3 = s->cs_cachep->nodelists[q]; - if (!l3 || OFF_SLAB(s->cs_cachep)) - continue; -- lockdep_set_class(&l3->list_lock, &on_slab_l3_key); -- alc = l3->alien; -- /* -- * FIXME: This check for BAD_ALIEN_MAGIC -- * should go away when common slab code is taught to -- * work even without alien caches. -- * Currently, non NUMA code returns BAD_ALIEN_MAGIC -- * for alloc_alien_cache, -- */ -- if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) -- continue; -- for_each_node(r) { -- if (alc[r]) -- lockdep_set_class(&alc[r]->lock, -- &on_slab_alc_key); -- } -+ -+ slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, -+ &on_slab_alc_key, q); - } - } - -@@ -669,6 +700,14 @@ static void init_node_lock_keys(int q) - static inline void init_lock_keys(void) - { - } -+ -+static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) -+{ -+} -+ -+static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) -+{ -+} - #endif - - /* -@@ -678,12 +717,66 @@ static DEFINE_MUTEX(cache_chain_mutex); - static struct list_head cache_chain; - - static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); -+static DEFINE_PER_CPU(struct list_head, slab_free_list); -+static DEFINE_LOCAL_IRQ_LOCK(slab_lock); -+ -+#ifndef CONFIG_PREEMPT_RT_BASE -+# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1) -+#else -+/* -+ * execute func() for all CPUs. On PREEMPT_RT we dont actually have -+ * to run on the remote CPUs - we only have to take their CPU-locks. -+ * (This is a rare operation, so cacheline bouncing is not an issue.) -+ */ -+static void -+slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg) -+{ -+ unsigned int i; -+ -+ for_each_online_cpu(i) -+ func(arg, i); -+} -+#endif -+ -+static void free_delayed(struct list_head *h) -+{ -+ while(!list_empty(h)) { -+ struct page *page = list_first_entry(h, struct page, lru); -+ -+ list_del(&page->lru); -+ __free_pages(page, page->index); -+ } -+} -+ -+static void unlock_l3_and_free_delayed(spinlock_t *list_lock) -+{ -+ LIST_HEAD(tmp); -+ -+ list_splice_init(&__get_cpu_var(slab_free_list), &tmp); -+ local_spin_unlock_irq(slab_lock, list_lock); -+ free_delayed(&tmp); -+} -+ -+static void unlock_slab_and_free_delayed(unsigned long flags) -+{ -+ LIST_HEAD(tmp); -+ -+ list_splice_init(&__get_cpu_var(slab_free_list), &tmp); -+ local_unlock_irqrestore(slab_lock, flags); -+ free_delayed(&tmp); -+} - - static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) - { - return cachep->array[smp_processor_id()]; - } - -+static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep, -+ int cpu) -+{ -+ return cachep->array[cpu]; -+} -+ - static inline struct kmem_cache *__find_general_cachep(size_t size, - gfp_t gfpflags) - { -@@ -1021,9 +1114,10 @@ static void reap_alien(struct kmem_cache - if (l3->alien) { - struct array_cache *ac = l3->alien[node]; - -- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { -+ if (ac && ac->avail && -+ local_spin_trylock_irq(slab_lock, &ac->lock)) { - __drain_alien_cache(cachep, ac, node); -- spin_unlock_irq(&ac->lock); -+ local_spin_unlock_irq(slab_lock, &ac->lock); - } - } - } -@@ -1038,9 +1132,9 @@ static void drain_alien_cache(struct kme - for_each_online_node(i) { - ac = alien[i]; - if (ac) { -- spin_lock_irqsave(&ac->lock, flags); -+ local_spin_lock_irqsave(slab_lock, &ac->lock, flags); - __drain_alien_cache(cachep, ac, i); -- spin_unlock_irqrestore(&ac->lock, flags); -+ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags); - } - } - } -@@ -1119,11 +1213,11 @@ static int init_cache_nodelists_node(int - cachep->nodelists[node] = l3; - } - -- spin_lock_irq(&cachep->nodelists[node]->list_lock); -+ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock); - cachep->nodelists[node]->free_limit = - (1 + nr_cpus_node(node)) * - cachep->batchcount + cachep->num; -- spin_unlock_irq(&cachep->nodelists[node]->list_lock); -+ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock); - } - return 0; - } -@@ -1148,7 +1242,7 @@ static void __cpuinit cpuup_canceled(lon - if (!l3) - goto free_array_cache; - -- spin_lock_irq(&l3->list_lock); -+ local_spin_lock_irq(slab_lock, &l3->list_lock); - - /* Free limit for this kmem_list3 */ - l3->free_limit -= cachep->batchcount; -@@ -1156,7 +1250,7 @@ static void __cpuinit cpuup_canceled(lon - free_block(cachep, nc->entry, nc->avail, node); - - if (!cpumask_empty(mask)) { -- spin_unlock_irq(&l3->list_lock); -+ unlock_l3_and_free_delayed(&l3->list_lock); - goto free_array_cache; - } - -@@ -1170,7 +1264,7 @@ static void __cpuinit cpuup_canceled(lon - alien = l3->alien; - l3->alien = NULL; - -- spin_unlock_irq(&l3->list_lock); -+ unlock_l3_and_free_delayed(&l3->list_lock); - - kfree(shared); - if (alien) { -@@ -1244,7 +1338,7 @@ static int __cpuinit cpuup_prepare(long - l3 = cachep->nodelists[node]; - BUG_ON(!l3); - -- spin_lock_irq(&l3->list_lock); -+ local_spin_lock_irq(slab_lock, &l3->list_lock); - if (!l3->shared) { - /* - * We are serialised from CPU_DEAD or -@@ -1259,9 +1353,11 @@ static int __cpuinit cpuup_prepare(long - alien = NULL; - } - #endif -- spin_unlock_irq(&l3->list_lock); -+ local_spin_unlock_irq(slab_lock, &l3->list_lock); - kfree(shared); - free_alien_cache(alien); -+ if (cachep->flags & SLAB_DEBUG_OBJECTS) -+ slab_set_debugobj_lock_classes_node(cachep, node); - } - init_node_lock_keys(node); - -@@ -1448,6 +1544,10 @@ void __init kmem_cache_init(void) - if (num_possible_nodes() == 1) - use_alien_caches = 0; - -+ local_irq_lock_init(slab_lock); -+ for_each_possible_cpu(i) -+ INIT_LIST_HEAD(&per_cpu(slab_free_list, i)); -+ - for (i = 0; i < NUM_INIT_LISTS; i++) { - kmem_list3_init(&initkmem_list3[i]); - if (i < MAX_NUMNODES) -@@ -1625,6 +1725,9 @@ void __init kmem_cache_init_late(void) - { - struct kmem_cache *cachep; - -+ /* Annotate slab for lockdep -- annotate the malloc caches */ -+ init_lock_keys(); -+ - /* 6) resize the head arrays to their final sizes */ - mutex_lock(&cache_chain_mutex); - list_for_each_entry(cachep, &cache_chain, next) -@@ -1635,9 +1738,6 @@ void __init kmem_cache_init_late(void) - /* Done! */ - g_cpucache_up = FULL; - -- /* Annotate slab for lockdep -- annotate the malloc caches */ -- init_lock_keys(); -- - /* - * Register a cpu startup notifier callback that initializes - * cpu_cache_get for all new cpus -@@ -1725,12 +1825,14 @@ static void *kmem_getpages(struct kmem_c - /* - * Interface to system's page release. - */ --static void kmem_freepages(struct kmem_cache *cachep, void *addr) -+static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed) - { - unsigned long i = (1 << cachep->gfporder); -- struct page *page = virt_to_page(addr); -+ struct page *page, *basepage = virt_to_page(addr); - const unsigned long nr_freed = i; - -+ page = basepage; -+ - kmemcheck_free_shadow(page, cachep->gfporder); - - if (cachep->flags & SLAB_RECLAIM_ACCOUNT) -@@ -1746,7 +1848,13 @@ static void kmem_freepages(struct kmem_c - } - if (current->reclaim_state) - current->reclaim_state->reclaimed_slab += nr_freed; -- free_pages((unsigned long)addr, cachep->gfporder); -+ -+ if (!delayed) { -+ free_pages((unsigned long)addr, cachep->gfporder); -+ } else { -+ basepage->index = cachep->gfporder; -+ list_add(&basepage->lru, &__get_cpu_var(slab_free_list)); -+ } - } - - static void kmem_rcu_free(struct rcu_head *head) -@@ -1754,7 +1862,7 @@ static void kmem_rcu_free(struct rcu_hea - struct slab_rcu *slab_rcu = (struct slab_rcu *)head; - struct kmem_cache *cachep = slab_rcu->cachep; - -- kmem_freepages(cachep, slab_rcu->addr); -+ kmem_freepages(cachep, slab_rcu->addr, false); - if (OFF_SLAB(cachep)) - kmem_cache_free(cachep->slabp_cache, slab_rcu); - } -@@ -1973,7 +2081,8 @@ static void slab_destroy_debugcheck(stru - * Before calling the slab must have been unlinked from the cache. The - * cache-lock is not held/needed. - */ --static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) -+static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp, -+ bool delayed) - { - void *addr = slabp->s_mem - slabp->colouroff; - -@@ -1986,7 +2095,7 @@ static void slab_destroy(struct kmem_cac - slab_rcu->addr = addr; - call_rcu(&slab_rcu->head, kmem_rcu_free); - } else { -- kmem_freepages(cachep, addr); -+ kmem_freepages(cachep, addr, delayed); - if (OFF_SLAB(cachep)) - kmem_cache_free(cachep->slabp_cache, slabp); - } -@@ -2424,6 +2533,16 @@ kmem_cache_create (const char *name, siz - goto oops; - } - -+ if (flags & SLAB_DEBUG_OBJECTS) { -+ /* -+ * Would deadlock through slab_destroy()->call_rcu()-> -+ * debug_object_activate()->kmem_cache_alloc(). -+ */ -+ WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); -+ -+ slab_set_debugobj_lock_classes(cachep); -+ } -+ - /* cache setup completed, link it into the list */ - list_add(&cachep->next, &cache_chain); - oops: -@@ -2441,7 +2560,7 @@ EXPORT_SYMBOL(kmem_cache_create); - #if DEBUG - static void check_irq_off(void) - { -- BUG_ON(!irqs_disabled()); -+ BUG_ON_NONRT(!irqs_disabled()); - } - - static void check_irq_on(void) -@@ -2476,13 +2595,12 @@ static void drain_array(struct kmem_cach - struct array_cache *ac, - int force, int node); - --static void do_drain(void *arg) -+static void __do_drain(void *arg, unsigned int cpu) - { - struct kmem_cache *cachep = arg; - struct array_cache *ac; -- int node = numa_mem_id(); -+ int node = cpu_to_mem(cpu); - -- check_irq_off(); - ac = cpu_cache_get(cachep); - spin_lock(&cachep->nodelists[node]->list_lock); - free_block(cachep, ac->entry, ac->avail, node); -@@ -2490,12 +2608,30 @@ static void do_drain(void *arg) - ac->avail = 0; - } - -+#ifndef CONFIG_PREEMPT_RT_BASE -+static void do_drain(void *arg) -+{ -+ __do_drain(arg, smp_processor_id()); -+} -+#else -+static void do_drain(void *arg, int cpu) -+{ -+ LIST_HEAD(tmp); -+ -+ spin_lock_irq(&per_cpu(slab_lock, cpu).lock); -+ __do_drain(arg, cpu); -+ list_splice_init(&per_cpu(slab_free_list, cpu), &tmp); -+ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock); -+ free_delayed(&tmp); -+} -+#endif -+ - static void drain_cpu_caches(struct kmem_cache *cachep) - { - struct kmem_list3 *l3; - int node; - -- on_each_cpu(do_drain, cachep, 1); -+ slab_on_each_cpu(do_drain, cachep); - check_irq_on(); - for_each_online_node(node) { - l3 = cachep->nodelists[node]; -@@ -2526,10 +2662,10 @@ static int drain_freelist(struct kmem_ca - nr_freed = 0; - while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { - -- spin_lock_irq(&l3->list_lock); -+ local_spin_lock_irq(slab_lock, &l3->list_lock); - p = l3->slabs_free.prev; - if (p == &l3->slabs_free) { -- spin_unlock_irq(&l3->list_lock); -+ local_spin_unlock_irq(slab_lock, &l3->list_lock); - goto out; - } - -@@ -2543,8 +2679,8 @@ static int drain_freelist(struct kmem_ca - * to the cache. - */ - l3->free_objects -= cache->num; -- spin_unlock_irq(&l3->list_lock); -- slab_destroy(cache, slabp); -+ local_spin_unlock_irq(slab_lock, &l3->list_lock); -+ slab_destroy(cache, slabp, false); - nr_freed++; - } - out: -@@ -2838,7 +2974,7 @@ static int cache_grow(struct kmem_cache - offset *= cachep->colour_off; - - if (local_flags & __GFP_WAIT) -- local_irq_enable(); -+ local_unlock_irq(slab_lock); - - /* - * The test for missing atomic flag is performed here, rather than -@@ -2868,7 +3004,7 @@ static int cache_grow(struct kmem_cache - cache_init_objs(cachep, slabp); - - if (local_flags & __GFP_WAIT) -- local_irq_disable(); -+ local_lock_irq(slab_lock); - check_irq_off(); - spin_lock(&l3->list_lock); - -@@ -2879,10 +3015,10 @@ static int cache_grow(struct kmem_cache - spin_unlock(&l3->list_lock); - return 1; - opps1: -- kmem_freepages(cachep, objp); -+ kmem_freepages(cachep, objp, false); - failed: - if (local_flags & __GFP_WAIT) -- local_irq_disable(); -+ local_lock_irq(slab_lock); - return 0; - } - -@@ -3280,11 +3416,11 @@ static void *fallback_alloc(struct kmem_ - * set and go into memory reserves if necessary. - */ - if (local_flags & __GFP_WAIT) -- local_irq_enable(); -+ local_unlock_irq(slab_lock); - kmem_flagcheck(cache, flags); - obj = kmem_getpages(cache, local_flags, numa_mem_id()); - if (local_flags & __GFP_WAIT) -- local_irq_disable(); -+ local_lock_irq(slab_lock); - if (obj) { - /* - * Insert into the appropriate per node queues -@@ -3400,7 +3536,7 @@ __cache_alloc_node(struct kmem_cache *ca - return NULL; - - cache_alloc_debugcheck_before(cachep, flags); -- local_irq_save(save_flags); -+ local_lock_irqsave(slab_lock, save_flags); - - if (nodeid == -1) - nodeid = slab_node; -@@ -3425,7 +3561,7 @@ __cache_alloc_node(struct kmem_cache *ca - /* ___cache_alloc_node can fall back to other nodes */ - ptr = ____cache_alloc_node(cachep, flags, nodeid); - out: -- local_irq_restore(save_flags); -+ local_unlock_irqrestore(slab_lock, save_flags); - ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); - kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, - flags); -@@ -3485,9 +3621,9 @@ __cache_alloc(struct kmem_cache *cachep, - return NULL; - - cache_alloc_debugcheck_before(cachep, flags); -- local_irq_save(save_flags); -+ local_lock_irqsave(slab_lock, save_flags); - objp = __do_cache_alloc(cachep, flags); -- local_irq_restore(save_flags); -+ local_unlock_irqrestore(slab_lock, save_flags); - objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); - kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, - flags); -@@ -3535,7 +3671,7 @@ static void free_block(struct kmem_cache - * a different cache, refer to comments before - * alloc_slabmgmt. - */ -- slab_destroy(cachep, slabp); -+ slab_destroy(cachep, slabp, true); - } else { - list_add(&slabp->list, &l3->slabs_free); - } -@@ -3798,12 +3934,12 @@ void kmem_cache_free(struct kmem_cache * - { - unsigned long flags; - -- local_irq_save(flags); - debug_check_no_locks_freed(objp, obj_size(cachep)); - if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) - debug_check_no_obj_freed(objp, obj_size(cachep)); -+ local_lock_irqsave(slab_lock, flags); - __cache_free(cachep, objp, __builtin_return_address(0)); -- local_irq_restore(flags); -+ unlock_slab_and_free_delayed(flags); - - trace_kmem_cache_free(_RET_IP_, objp); - } -@@ -3827,13 +3963,13 @@ void kfree(const void *objp) - - if (unlikely(ZERO_OR_NULL_PTR(objp))) - return; -- local_irq_save(flags); - kfree_debugcheck(objp); - c = virt_to_cache(objp); - debug_check_no_locks_freed(objp, obj_size(c)); - debug_check_no_obj_freed(objp, obj_size(c)); -+ local_lock_irqsave(slab_lock, flags); - __cache_free(c, (void *)objp, __builtin_return_address(0)); -- local_irq_restore(flags); -+ unlock_slab_and_free_delayed(flags); - } - EXPORT_SYMBOL(kfree); - -@@ -3876,7 +4012,7 @@ static int alloc_kmemlist(struct kmem_ca - if (l3) { - struct array_cache *shared = l3->shared; - -- spin_lock_irq(&l3->list_lock); -+ local_spin_lock_irq(slab_lock, &l3->list_lock); - - if (shared) - free_block(cachep, shared->entry, -@@ -3889,7 +4025,8 @@ static int alloc_kmemlist(struct kmem_ca - } - l3->free_limit = (1 + nr_cpus_node(node)) * - cachep->batchcount + cachep->num; -- spin_unlock_irq(&l3->list_lock); -+ unlock_l3_and_free_delayed(&l3->list_lock); -+ - kfree(shared); - free_alien_cache(new_alien); - continue; -@@ -3936,17 +4073,30 @@ struct ccupdate_struct { - struct array_cache *new[NR_CPUS]; - }; - --static void do_ccupdate_local(void *info) -+static void __do_ccupdate_local(void *info, int cpu) - { - struct ccupdate_struct *new = info; - struct array_cache *old; - -- check_irq_off(); -- old = cpu_cache_get(new->cachep); -+ old = cpu_cache_get_on_cpu(new->cachep, cpu); - -- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; -- new->new[smp_processor_id()] = old; -+ new->cachep->array[cpu] = new->new[cpu]; -+ new->new[cpu] = old; -+} -+ -+#ifndef CONFIG_PREEMPT_RT_BASE -+static void do_ccupdate_local(void *info) -+{ -+ __do_ccupdate_local(info, smp_processor_id()); -+} -+#else -+static void do_ccupdate_local(void *info, int cpu) -+{ -+ spin_lock_irq(&per_cpu(slab_lock, cpu).lock); -+ __do_ccupdate_local(info, cpu); -+ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock); - } -+#endif - - /* Always called with the cache_chain_mutex held */ - static int do_tune_cpucache(struct kmem_cache *cachep, int limit, -@@ -3971,7 +4121,7 @@ static int do_tune_cpucache(struct kmem_ - } - new->cachep = cachep; - -- on_each_cpu(do_ccupdate_local, (void *)new, 1); -+ slab_on_each_cpu(do_ccupdate_local, (void *)new); - - check_irq_on(); - cachep->batchcount = batchcount; -@@ -3982,9 +4132,11 @@ static int do_tune_cpucache(struct kmem_ - struct array_cache *ccold = new->new[i]; - if (!ccold) - continue; -- spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); -+ local_spin_lock_irq(slab_lock, -+ &cachep->nodelists[cpu_to_mem(i)]->list_lock); - free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); -- spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); -+ -+ unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock); - kfree(ccold); - } - kfree(new); -@@ -4060,7 +4212,7 @@ static void drain_array(struct kmem_cach - if (ac->touched && !force) { - ac->touched = 0; - } else { -- spin_lock_irq(&l3->list_lock); -+ local_spin_lock_irq(slab_lock, &l3->list_lock); - if (ac->avail) { - tofree = force ? ac->avail : (ac->limit + 4) / 5; - if (tofree > ac->avail) -@@ -4070,7 +4222,7 @@ static void drain_array(struct kmem_cach - memmove(ac->entry, &(ac->entry[tofree]), - sizeof(void *) * ac->avail); - } -- spin_unlock_irq(&l3->list_lock); -+ local_spin_unlock_irq(slab_lock, &l3->list_lock); - } - } - -@@ -4209,7 +4361,7 @@ static int s_show(struct seq_file *m, vo - continue; - - check_irq_on(); -- spin_lock_irq(&l3->list_lock); -+ local_spin_lock_irq(slab_lock, &l3->list_lock); - - list_for_each_entry(slabp, &l3->slabs_full, list) { - if (slabp->inuse != cachep->num && !error) -@@ -4234,7 +4386,7 @@ static int s_show(struct seq_file *m, vo - if (l3->shared) - shared_avail += l3->shared->avail; - -- spin_unlock_irq(&l3->list_lock); -+ local_spin_unlock_irq(slab_lock, &l3->list_lock); - } - num_slabs += active_slabs; - num_objs = num_slabs * cachep->num; -@@ -4463,13 +4615,13 @@ static int leaks_show(struct seq_file *m - continue; - - check_irq_on(); -- spin_lock_irq(&l3->list_lock); -+ local_spin_lock_irq(slab_lock, &l3->list_lock); - - list_for_each_entry(slabp, &l3->slabs_full, list) - handle_slab(n, cachep, slabp); - list_for_each_entry(slabp, &l3->slabs_partial, list) - handle_slab(n, cachep, slabp); -- spin_unlock_irq(&l3->list_lock); -+ local_spin_unlock_irq(slab_lock, &l3->list_lock); - } - name = cachep->name; - if (n[0] == n[1]) { Index: linux-2.6/kernel/sched_fair.c =================================================================== --- linux-2.6.orig/kernel/sched_fair.c @@ -11249,7 +11725,7 @@ Index: linux-2.6/kernel/sched_fair.c want_affine = 1; new_cpu = prev_cpu; } -@@ -2067,7 +2067,7 @@ int can_migrate_task(struct task_struct +@@ -2067,7 +2067,7 @@ int can_migrate_task(struct task_struct * 2) cannot be migrated to this CPU due to cpus_allowed, or * 3) are cache-hot on their current CPU. */ @@ -11290,7 +11766,7 @@ Index: linux-2.6/kernel/sched_fair.c } rcu_read_unlock(); -@@ -3418,7 +3436,7 @@ static int load_balance(int this_cpu, st +@@ -3418,7 +3436,7 @@ redo: * moved to this_cpu */ if (!cpumask_test_cpu(this_cpu, @@ -11634,7 +12110,7 @@ Index: linux-2.6/include/linux/interrupt.h =================================================================== --- linux-2.6.orig/include/linux/interrupt.h +++ linux-2.6/include/linux/interrupt.h -@@ -202,7 +202,7 @@ extern void devm_free_irq(struct device +@@ -202,7 +202,7 @@ extern void devm_free_irq(struct device #ifdef CONFIG_LOCKDEP # define local_irq_enable_in_hardirq() do { } while (0) #else @@ -11826,7 +12302,7 @@ Index: linux-2.6/arch/x86/kernel/entry_32.S jmp resume_userspace_sig ALIGN -@@ -638,7 +642,11 @@ work_notifysig: # deal with pending s +@@ -638,7 +642,11 @@ work_notifysig_v86: movl %esp, %eax #endif xorl %edx, %edx @@ -11891,7 +12367,7 @@ Index: linux-2.6/kernel/rcutree.c /* * Queue an RCU for invocation after a quicker grace period. */ -@@ -1587,6 +1592,7 @@ void call_rcu_bh(struct rcu_head *head, +@@ -1587,6 +1592,7 @@ void call_rcu_bh(struct rcu_head *head, __call_rcu(head, func, &rcu_bh_state); } EXPORT_SYMBOL_GPL(call_rcu_bh); @@ -12417,6 +12893,194 @@ Index: linux-2.6/include/linux/seqlock.h +} #endif /* __LINUX_SEQLOCK_H */ +Index: linux-2.6/drivers/cpufreq/cpufreq.c +=================================================================== +--- linux-2.6.orig/drivers/cpufreq/cpufreq.c ++++ linux-2.6/drivers/cpufreq/cpufreq.c +@@ -43,7 +43,7 @@ static DEFINE_PER_CPU(struct cpufreq_pol + /* This one keeps track of the previously set governor of a removed CPU */ + static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); + #endif +-static DEFINE_SPINLOCK(cpufreq_driver_lock); ++static DEFINE_RAW_SPINLOCK(cpufreq_driver_lock); + + /* + * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure +@@ -138,7 +138,7 @@ struct cpufreq_policy *cpufreq_cpu_get(u + goto err_out; + + /* get the cpufreq driver */ +- spin_lock_irqsave(&cpufreq_driver_lock, flags); ++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); + + if (!cpufreq_driver) + goto err_out_unlock; +@@ -156,13 +156,13 @@ struct cpufreq_policy *cpufreq_cpu_get(u + if (!kobject_get(&data->kobj)) + goto err_out_put_module; + +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + return data; + + err_out_put_module: + module_put(cpufreq_driver->owner); + err_out_unlock: +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + err_out: + return NULL; + } +@@ -722,10 +722,10 @@ static int cpufreq_add_dev_policy(unsign + return -EBUSY; + } + +- spin_lock_irqsave(&cpufreq_driver_lock, flags); ++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); + cpumask_copy(managed_policy->cpus, policy->cpus); + per_cpu(cpufreq_cpu_data, cpu) = managed_policy; +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + + pr_debug("CPU already managed, adding link\n"); + ret = sysfs_create_link(&sys_dev->kobj, +@@ -821,14 +821,16 @@ static int cpufreq_add_dev_interface(uns + goto err_out_kobj_put; + } + +- spin_lock_irqsave(&cpufreq_driver_lock, flags); ++ get_online_cpus(); + for_each_cpu(j, policy->cpus) { + if (!cpu_online(j)) + continue; ++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); + per_cpu(cpufreq_cpu_data, j) = policy; + per_cpu(cpufreq_policy_cpu, j) = policy->cpu; ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + } +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ put_online_cpus(); + + ret = cpufreq_add_dev_symlink(cpu, policy); + if (ret) +@@ -970,10 +972,13 @@ static int cpufreq_add_dev(struct sys_de + + + err_out_unregister: +- spin_lock_irqsave(&cpufreq_driver_lock, flags); +- for_each_cpu(j, policy->cpus) ++ get_online_cpus(); ++ for_each_cpu(j, policy->cpus) { ++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); + per_cpu(cpufreq_cpu_data, j) = NULL; +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ } ++ put_online_cpus(); + + kobject_put(&policy->kobj); + wait_for_completion(&policy->kobj_unregister); +@@ -1013,11 +1018,11 @@ static int __cpufreq_remove_dev(struct s + + pr_debug("unregistering CPU %u\n", cpu); + +- spin_lock_irqsave(&cpufreq_driver_lock, flags); ++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); + data = per_cpu(cpufreq_cpu_data, cpu); + + if (!data) { +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + unlock_policy_rwsem_write(cpu); + return -EINVAL; + } +@@ -1031,7 +1036,7 @@ static int __cpufreq_remove_dev(struct s + if (unlikely(cpu != data->cpu)) { + pr_debug("removing link\n"); + cpumask_clear_cpu(cpu, data->cpus); +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + kobj = &sys_dev->kobj; + cpufreq_cpu_put(data); + unlock_policy_rwsem_write(cpu); +@@ -1040,6 +1045,7 @@ static int __cpufreq_remove_dev(struct s + } + #endif + ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + #ifdef CONFIG_SMP + + #ifdef CONFIG_HOTPLUG_CPU +@@ -1052,15 +1058,17 @@ static int __cpufreq_remove_dev(struct s + * per_cpu(cpufreq_cpu_data) while holding the lock, and remove + * the sysfs links afterwards. + */ ++ get_online_cpus(); + if (unlikely(cpumask_weight(data->cpus) > 1)) { + for_each_cpu(j, data->cpus) { + if (j == cpu) + continue; ++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); + per_cpu(cpufreq_cpu_data, j) = NULL; ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + } + } +- +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ put_online_cpus(); + + if (unlikely(cpumask_weight(data->cpus) > 1)) { + for_each_cpu(j, data->cpus) { +@@ -1079,8 +1087,6 @@ static int __cpufreq_remove_dev(struct s + cpufreq_cpu_put(data); + } + } +-#else +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + #endif + + if (cpufreq_driver->target) +@@ -1802,13 +1808,13 @@ int cpufreq_register_driver(struct cpufr + if (driver_data->setpolicy) + driver_data->flags |= CPUFREQ_CONST_LOOPS; + +- spin_lock_irqsave(&cpufreq_driver_lock, flags); ++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); + if (cpufreq_driver) { +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + return -EBUSY; + } + cpufreq_driver = driver_data; +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + + ret = sysdev_driver_register(&cpu_sysdev_class, + &cpufreq_sysdev_driver); +@@ -1842,9 +1848,9 @@ err_sysdev_unreg: + sysdev_driver_unregister(&cpu_sysdev_class, + &cpufreq_sysdev_driver); + err_null_driver: +- spin_lock_irqsave(&cpufreq_driver_lock, flags); ++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_driver = NULL; +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(cpufreq_register_driver); +@@ -1870,9 +1876,9 @@ int cpufreq_unregister_driver(struct cpu + sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); + unregister_hotcpu_notifier(&cpufreq_cpu_notifier); + +- spin_lock_irqsave(&cpufreq_driver_lock, flags); ++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_driver = NULL; +- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + + return 0; + } Index: linux-2.6/arch/ia64/kernel/time.c =================================================================== --- linux-2.6.orig/arch/ia64/kernel/time.c @@ -13201,7 +13865,7 @@ Index: linux-2.6/arch/sh/include/asm/rwsem.h { atomic_add(delta, (atomic_t *)(&sem->count)); } -@@ -104,7 +104,7 @@ static inline void rwsem_atomic_add(int +@@ -104,7 +104,7 @@ static inline void rwsem_atomic_add(int /* * downgrade write lock to read lock */ @@ -13479,7 +14143,7 @@ Index: linux-2.6/arch/xtensa/include/asm/rwsem.h { atomic_add(delta, (atomic_t *)(&sem->count)); } -@@ -109,7 +109,7 @@ static inline void rwsem_atomic_add(int +@@ -109,7 +109,7 @@ static inline void rwsem_atomic_add(int /* * downgrade write lock to read lock */ @@ -13704,7 +14368,7 @@ Index: linux-2.6/fs/ext4/inode.c =================================================================== --- linux-2.6.orig/fs/ext4/inode.c +++ linux-2.6/fs/ext4/inode.c -@@ -5853,7 +5853,7 @@ int ext4_page_mkwrite(struct vm_area_str +@@ -5875,7 +5875,7 @@ int ext4_page_mkwrite(struct vm_area_str * Get i_alloc_sem to stop truncates messing with the inode. We cannot * get i_mutex because we are already holding mmap_sem. */ @@ -13713,7 +14377,7 @@ Index: linux-2.6/fs/ext4/inode.c size = i_size_read(inode); if (page->mapping != mapping || size <= page_offset(page) || !PageUptodate(page)) { -@@ -5865,7 +5865,7 @@ int ext4_page_mkwrite(struct vm_area_str +@@ -5887,7 +5887,7 @@ int ext4_page_mkwrite(struct vm_area_str lock_page(page); wait_on_page_writeback(page); if (PageMappedToDisk(page)) { @@ -13722,7 +14386,7 @@ Index: linux-2.6/fs/ext4/inode.c return VM_FAULT_LOCKED; } -@@ -5883,7 +5883,7 @@ int ext4_page_mkwrite(struct vm_area_str +@@ -5905,7 +5905,7 @@ int ext4_page_mkwrite(struct vm_area_str if (page_has_buffers(page)) { if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, ext4_bh_unmapped)) { @@ -13731,7 +14395,7 @@ Index: linux-2.6/fs/ext4/inode.c return VM_FAULT_LOCKED; } } -@@ -5912,11 +5912,11 @@ int ext4_page_mkwrite(struct vm_area_str +@@ -5934,11 +5934,11 @@ int ext4_page_mkwrite(struct vm_area_str */ lock_page(page); wait_on_page_writeback(page); @@ -13816,7 +14480,7 @@ Index: linux-2.6/fs/ocfs2/file.c have_alloc_sem = 1; /* communicate with ocfs2_dio_end_io */ ocfs2_iocb_set_sem_locked(iocb); -@@ -2290,7 +2290,7 @@ static ssize_t ocfs2_file_aio_write(stru +@@ -2290,7 +2290,7 @@ relock: */ if (direct_io && !can_do_direct) { ocfs2_rw_unlock(inode, rw_level); @@ -13825,7 +14489,7 @@ Index: linux-2.6/fs/ocfs2/file.c have_alloc_sem = 0; rw_level = -1; -@@ -2379,7 +2379,7 @@ static ssize_t ocfs2_file_aio_write(stru +@@ -2379,7 +2379,7 @@ out: out_sems: if (have_alloc_sem) { @@ -14610,7 +15274,7 @@ Index: linux-2.6/kernel/hrtimer.c BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; -@@ -1278,6 +1448,14 @@ void hrtimer_interrupt(struct clock_even +@@ -1278,6 +1448,14 @@ retry: timer = container_of(node, struct hrtimer, node); @@ -14625,7 +15289,7 @@ Index: linux-2.6/kernel/hrtimer.c /* * The immediate goal for using the softexpires is * minimizing wakeups, not running timers at the -@@ -1301,7 +1479,10 @@ void hrtimer_interrupt(struct clock_even +@@ -1301,7 +1479,10 @@ retry: break; } @@ -14637,7 +15301,7 @@ Index: linux-2.6/kernel/hrtimer.c } } -@@ -1316,6 +1497,10 @@ void hrtimer_interrupt(struct clock_even +@@ -1316,6 +1497,10 @@ retry: if (expires_next.tv64 == KTIME_MAX || !tick_program_event(expires_next, 0)) { cpu_base->hang_detected = 0; @@ -17364,6 +18028,12 @@ Index: linux-2.6/drivers/misc/hwlat_detector.c + +module_init(detector_init); +module_exit(detector_exit); +Index: linux-2.6/localversion-rt +=================================================================== +--- /dev/null ++++ linux-2.6/localversion-rt +@@ -0,0 +1 @@ ++-rt13 Index: linux-2.6/arch/arm/kernel/early_printk.c =================================================================== --- linux-2.6.orig/arch/arm/kernel/early_printk.c @@ -17566,7 +18236,7 @@ Index: linux-2.6/arch/sparc/kernel/setup_64.c =================================================================== --- linux-2.6.orig/arch/sparc/kernel/setup_64.c +++ linux-2.6/arch/sparc/kernel/setup_64.c -@@ -278,6 +278,12 @@ void __init boot_cpu_id_too_large(int cp +@@ -463,6 +463,12 @@ static void __init init_sparc64_elf_hwca popc_patch(); } @@ -17579,7 +18249,7 @@ Index: linux-2.6/arch/sparc/kernel/setup_64.c void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ -@@ -289,7 +295,7 @@ void __init setup_arch(char **cmdline_p) +@@ -474,7 +480,7 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_EARLYFB if (btext_find_display()) #endif @@ -17688,7 +18358,7 @@ Index: linux-2.6/arch/unicore32/kernel/early_printk.c =================================================================== --- linux-2.6.orig/arch/unicore32/kernel/early_printk.c +++ linux-2.6/arch/unicore32/kernel/early_printk.c -@@ -33,21 +33,17 @@ static struct console early_ocd_console +@@ -33,21 +33,17 @@ static struct console early_ocd_console .index = -1, }; @@ -17796,8 +18466,6 @@ Index: linux-2.6/include/linux/printk.h extern int printk_delay_msec; extern int dmesg_restrict; extern int kptr_restrict; -Index: linux-2.6/localversion-rt -=================================================================== Index: linux-2.6/kernel/Kconfig.preempt =================================================================== --- linux-2.6.orig/kernel/Kconfig.preempt @@ -17966,7 +18634,7 @@ Index: linux-2.6/drivers/ata/libata-sff.c } else { buf = page_address(page); ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, -@@ -863,7 +863,7 @@ static int __atapi_pio_bytes(struct ata_ +@@ -863,7 +863,7 @@ next_sg: unsigned long flags; /* FIXME: use bounce buffer */ @@ -17975,7 +18643,7 @@ Index: linux-2.6/drivers/ata/libata-sff.c buf = kmap_atomic(page, KM_IRQ0); /* do the actual data transfer */ -@@ -871,7 +871,7 @@ static int __atapi_pio_bytes(struct ata_ +@@ -871,7 +871,7 @@ next_sg: count, rw); kunmap_atomic(buf, KM_IRQ0); @@ -17997,7 +18665,7 @@ Index: linux-2.6/drivers/ide/alim15x3.c if (m5229_revision < 0xC2) { /* -@@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct p +@@ -325,7 +325,7 @@ out: } pci_dev_put(north); pci_dev_put(isa_dev); @@ -18032,7 +18700,7 @@ Index: linux-2.6/drivers/ide/ide-io-std.c =================================================================== --- linux-2.6.orig/drivers/ide/ide-io-std.c +++ linux-2.6/drivers/ide/ide-io-std.c -@@ -174,7 +174,7 @@ void ide_input_data(ide_drive_t *drive, +@@ -174,7 +174,7 @@ void ide_input_data(ide_drive_t *drive, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { @@ -18041,7 +18709,7 @@ Index: linux-2.6/drivers/ide/ide-io-std.c ata_vlb_sync(io_ports->nsect_addr); } -@@ -185,7 +185,7 @@ void ide_input_data(ide_drive_t *drive, +@@ -185,7 +185,7 @@ void ide_input_data(ide_drive_t *drive, insl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) @@ -18085,7 +18753,7 @@ Index: linux-2.6/drivers/ide/ide-iops.c =================================================================== --- linux-2.6.orig/drivers/ide/ide-iops.c +++ linux-2.6/drivers/ide/ide-iops.c -@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, +@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, if ((stat & ATA_BUSY) == 0) break; @@ -18249,7 +18917,7 @@ Index: linux-2.6/kernel/res_counter.c for (c = counter; c != NULL; c = c->parent) { spin_lock(&c->lock); ret = res_counter_charge_locked(c, val); -@@ -62,7 +62,7 @@ int res_counter_charge(struct res_counte +@@ -62,7 +62,7 @@ undo: spin_unlock(&u->lock); } done: @@ -18640,7 +19308,7 @@ Index: linux-2.6/kernel/irq/handle.c =================================================================== --- linux-2.6.orig/kernel/irq/handle.c +++ linux-2.6/kernel/irq/handle.c -@@ -156,8 +156,11 @@ handle_irq_event_percpu(struct irq_desc +@@ -156,8 +156,11 @@ handle_irq_event_percpu(struct irq_desc action = action->next; } while (action); @@ -19439,7 +20107,7 @@ Index: linux-2.6/include/linux/mm.h =================================================================== --- linux-2.6.orig/include/linux/mm.h +++ linux-2.6/include/linux/mm.h -@@ -1222,27 +1222,59 @@ static inline pmd_t *pmd_alloc(struct mm +@@ -1230,27 +1230,59 @@ static inline pmd_t *pmd_alloc(struct mm * overflow into the next struct page (as it might with DEBUG_SPINLOCK). * When freeing, reset page->mapping so free_pages_check won't complain. */ @@ -19632,7 +20300,7 @@ Index: linux-2.6/lib/radix-tree.c /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On -@@ -240,6 +242,7 @@ int radix_tree_preload(gfp_t gfp_mask) +@@ -240,6 +242,7 @@ out: return ret; } EXPORT_SYMBOL(radix_tree_preload); @@ -19725,7 +20393,7 @@ Index: linux-2.6/ipc/msg.c } } -@@ -611,6 +619,12 @@ static inline int pipelined_send(struct +@@ -611,6 +619,12 @@ static inline int pipelined_send(struct !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { @@ -19738,7 +20406,7 @@ Index: linux-2.6/ipc/msg.c list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { msr->r_msg = NULL; -@@ -624,9 +638,11 @@ static inline int pipelined_send(struct +@@ -624,9 +638,11 @@ static inline int pipelined_send(struct wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = msg; @@ -19793,7 +20461,7 @@ Index: linux-2.6/net/ipv4/route.c =================================================================== --- linux-2.6.orig/net/ipv4/route.c +++ linux-2.6/net/ipv4/route.c -@@ -241,7 +241,7 @@ struct rt_hash_bucket { +@@ -242,7 +242,7 @@ struct rt_hash_bucket { }; #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ @@ -19806,7 +20474,7 @@ Index: linux-2.6/include/linux/timer.h =================================================================== --- linux-2.6.orig/include/linux/timer.h +++ linux-2.6/include/linux/timer.h -@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list +@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list extern int try_to_del_timer_sync(struct timer_list *timer); @@ -19927,7 +20595,7 @@ Index: linux-2.6/kernel/timer.c /** * del_timer - deactive a timer. * @timer: the timer to be deactivated -@@ -953,7 +1004,7 @@ int try_to_del_timer_sync(struct timer_l +@@ -953,7 +1004,7 @@ out: } EXPORT_SYMBOL(try_to_del_timer_sync); @@ -20110,7 +20778,7 @@ Index: linux-2.6/kernel/itimer.c =================================================================== --- linux-2.6.orig/kernel/itimer.c +++ linux-2.6/kernel/itimer.c -@@ -214,6 +214,7 @@ int do_setitimer(int which, struct itime +@@ -214,6 +214,7 @@ again: /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); @@ -20256,7 +20924,7 @@ Index: linux-2.6/kernel/stop_machine.c } /* signal completion unless @done is NULL */ -@@ -55,8 +55,10 @@ static void cpu_stop_signal_done(struct +@@ -55,8 +55,10 @@ static void cpu_stop_signal_done(struct if (done) { if (executed) done->executed = true; @@ -20312,14 +20980,14 @@ Index: linux-2.6/kernel/stop_machine.c return done.executed ? done.ret : -ENOENT; } -@@ -134,6 +152,7 @@ void stop_one_cpu_nowait(unsigned int cp +@@ -133,6 +151,7 @@ void stop_one_cpu_nowait(unsigned int cp + } - /* static data for stop_cpus */ - static DEFINE_MUTEX(stop_cpus_mutex); + DEFINE_MUTEX(stop_cpus_mutex); +static DEFINE_MUTEX(stopper_lock); + /* static data for stop_cpus */ static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); - int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) @@ -152,17 +171,16 @@ int __stop_cpus(const struct cpumask *cp cpu_stop_init_done(&done, cpumask_weight(cpumask)); @@ -20343,7 +21011,7 @@ Index: linux-2.6/kernel/stop_machine.c return done.executed ? done.ret : -ENOENT; } -@@ -250,13 +268,13 @@ static int cpu_stopper_thread(void *data +@@ -250,13 +268,13 @@ repeat: } work = NULL; @@ -20359,7 +21027,7 @@ Index: linux-2.6/kernel/stop_machine.c if (work) { cpu_stop_fn_t fn = work->fn; -@@ -266,6 +284,16 @@ static int cpu_stopper_thread(void *data +@@ -266,6 +284,16 @@ repeat: __set_current_state(TASK_RUNNING); @@ -20376,7 +21044,7 @@ Index: linux-2.6/kernel/stop_machine.c /* cpu stop callbacks are not allowed to sleep */ preempt_disable(); -@@ -280,7 +308,13 @@ static int cpu_stopper_thread(void *data +@@ -280,7 +308,13 @@ repeat: kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, ksym_buf), arg); @@ -20580,7 +21248,7 @@ Index: linux-2.6/kernel/cpu.c if (num_online_cpus() == 1) return -EBUSY; -@@ -224,7 +323,19 @@ static int __ref _cpu_down(unsigned int +@@ -224,7 +323,19 @@ static int __ref _cpu_down(unsigned int if (!cpu_online(cpu)) return -EINVAL; @@ -20601,7 +21269,7 @@ Index: linux-2.6/kernel/cpu.c err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { -@@ -232,7 +343,16 @@ static int __ref _cpu_down(unsigned int +@@ -232,7 +343,16 @@ static int __ref _cpu_down(unsigned int __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); @@ -20619,7 +21287,7 @@ Index: linux-2.6/kernel/cpu.c } err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); -@@ -263,6 +383,9 @@ static int __ref _cpu_down(unsigned int +@@ -263,6 +383,9 @@ static int __ref _cpu_down(unsigned int check_for_tasks(cpu); out_release: @@ -20671,38 +21339,41 @@ Index: linux-2.6/kernel/trace/trace_output.c return ret; } -Index: linux-2.6/kernel/lockdep.c +Index: linux-2.6/kernel/sched_debug.c =================================================================== ---- linux-2.6.orig/kernel/lockdep.c -+++ linux-2.6/kernel/lockdep.c -@@ -2859,10 +2859,7 @@ static int mark_lock(struct task_struct - void lockdep_init_map(struct lockdep_map *lock, const char *name, - struct lock_class_key *key, int subclass) - { -- int i; -- -- for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) -- lock->class_cache[i] = NULL; -+ memset(lock, 0, sizeof(*lock)); +--- linux-2.6.orig/kernel/sched_debug.c ++++ linux-2.6/kernel/sched_debug.c +@@ -235,6 +235,7 @@ void print_rt_rq(struct seq_file *m, int + P(rt_throttled); + PN(rt_time); + PN(rt_runtime); ++ P(rt_nr_migratory); - #ifdef CONFIG_LOCK_STAT - lock->cpu = raw_smp_processor_id(); -@@ -3341,6 +3338,7 @@ static void check_flags(unsigned long fl - } - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * We dont accurately track softirq state in e.g. - * hardirq contexts (such as on 4KSTACKS), so only -@@ -3352,6 +3350,7 @@ static void check_flags(unsigned long fl - else - DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); - } + #undef PN + #undef P +@@ -484,6 +485,10 @@ void proc_sched_show_task(struct task_st + P(se.load.weight); + P(policy); + P(prio); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ P(migrate_disable); +#endif ++ P(rt.nr_cpus_allowed); + #undef PN + #undef __PN + #undef P +Index: linux-2.6/kernel/trace/trace.h +=================================================================== +--- linux-2.6.orig/kernel/trace/trace.h ++++ linux-2.6/kernel/trace/trace.h +@@ -322,7 +322,6 @@ void trace_init_global_iter(struct trace - if (!debug_locks) - print_irqtrace_events(current); + void tracing_iter_reset(struct trace_iterator *iter, int cpu); + +-void default_wait_pipe(struct trace_iterator *iter); + void poll_wait_pipe(struct trace_iterator *iter); + + void ftrace(struct trace_array *tr, Index: linux-2.6/kernel/Kconfig.locks =================================================================== --- linux-2.6.orig/kernel/Kconfig.locks @@ -21082,7 +21753,7 @@ Index: linux-2.6/kernel/rtmutex.c /* * Calculate task priority from the waiter list priority * -@@ -136,6 +153,14 @@ static void rt_mutex_adjust_prio(struct +@@ -136,6 +153,14 @@ static void rt_mutex_adjust_prio(struct raw_spin_unlock_irqrestore(&task->pi_lock, flags); } @@ -21675,7 +22346,7 @@ Index: linux-2.6/kernel/futex.c =================================================================== --- linux-2.6.orig/kernel/futex.c +++ linux-2.6/kernel/futex.c -@@ -1380,6 +1380,16 @@ static int futex_requeue(u32 __user *uad +@@ -1410,6 +1410,16 @@ retry_private: requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; @@ -21692,7 +22363,7 @@ Index: linux-2.6/kernel/futex.c } else if (ret) { /* -EDEADLK */ this->pi_state = NULL; -@@ -2224,7 +2234,7 @@ static int futex_wait_requeue_pi(u32 __u +@@ -2254,7 +2264,7 @@ static int futex_wait_requeue_pi(u32 __u struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; @@ -21701,7 +22372,7 @@ Index: linux-2.6/kernel/futex.c union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; -@@ -2246,8 +2256,7 @@ static int futex_wait_requeue_pi(u32 __u +@@ -2276,8 +2286,7 @@ static int futex_wait_requeue_pi(u32 __u * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ @@ -21711,7 +22382,7 @@ Index: linux-2.6/kernel/futex.c ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) -@@ -2268,20 +2277,55 @@ static int futex_wait_requeue_pi(u32 __u +@@ -2298,20 +2307,55 @@ static int futex_wait_requeue_pi(u32 __u /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); @@ -21778,7 +22449,7 @@ Index: linux-2.6/kernel/futex.c /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { -@@ -2290,9 +2334,10 @@ static int futex_wait_requeue_pi(u32 __u +@@ -2320,9 +2364,10 @@ static int futex_wait_requeue_pi(u32 __u * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { @@ -21791,7 +22462,7 @@ Index: linux-2.6/kernel/futex.c } } else { /* -@@ -2305,7 +2350,8 @@ static int futex_wait_requeue_pi(u32 __u +@@ -2335,7 +2380,8 @@ static int futex_wait_requeue_pi(u32 __u ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); @@ -23517,7 +24188,7 @@ Index: linux-2.6/drivers/tty/serial/8250.c } while (l != end); spin_unlock(&i->lock); -@@ -2892,14 +2895,14 @@ serial8250_console_write(struct console +@@ -2892,14 +2895,14 @@ serial8250_console_write(struct console touch_nmi_watchdog(); @@ -23539,7 +24210,7 @@ Index: linux-2.6/drivers/tty/serial/8250.c /* * First save the IER then disable the interrupts -@@ -2931,8 +2934,7 @@ serial8250_console_write(struct console +@@ -2931,8 +2934,7 @@ serial8250_console_write(struct console check_modem_status(up); if (locked) @@ -23911,7 +24582,7 @@ Index: linux-2.6/arch/x86/kernel/cpu/common.c =================================================================== --- linux-2.6.orig/arch/x86/kernel/cpu/common.c +++ linux-2.6/arch/x86/kernel/cpu/common.c -@@ -1050,7 +1050,9 @@ DEFINE_PER_CPU(unsigned int, irq_count) +@@ -1050,7 +1050,9 @@ DEFINE_PER_CPU(unsigned int, irq_count) */ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, @@ -24026,7 +24697,7 @@ Index: linux-2.6/mm/vmalloc.c =================================================================== --- linux-2.6.orig/mm/vmalloc.c +++ linux-2.6/mm/vmalloc.c -@@ -788,7 +788,7 @@ static struct vmap_block *new_vmap_block +@@ -789,7 +789,7 @@ static struct vmap_block *new_vmap_block struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; @@ -24035,7 +24706,7 @@ Index: linux-2.6/mm/vmalloc.c node = numa_node_id(); -@@ -827,12 +827,13 @@ static struct vmap_block *new_vmap_block +@@ -828,12 +828,13 @@ static struct vmap_block *new_vmap_block BUG_ON(err); radix_tree_preload_end(); @@ -24051,7 +24722,7 @@ Index: linux-2.6/mm/vmalloc.c return vb; } -@@ -913,7 +914,7 @@ static void *vb_alloc(unsigned long size +@@ -914,7 +915,7 @@ static void *vb_alloc(unsigned long size struct vmap_block *vb; unsigned long addr = 0; unsigned int order; @@ -24060,7 +24731,7 @@ Index: linux-2.6/mm/vmalloc.c BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); -@@ -921,7 +922,8 @@ static void *vb_alloc(unsigned long size +@@ -922,7 +923,8 @@ static void *vb_alloc(unsigned long size again: rcu_read_lock(); @@ -24070,7 +24741,7 @@ Index: linux-2.6/mm/vmalloc.c list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i; -@@ -958,7 +960,7 @@ static void *vb_alloc(unsigned long size +@@ -959,7 +961,7 @@ next: if (purge) purge_fragmented_blocks_thiscpu(); @@ -24257,7 +24928,7 @@ Index: linux-2.6/net/ipv4/icmp.c #include #include #include -@@ -799,6 +800,30 @@ static void icmp_redirect(struct sk_buff +@@ -801,6 +802,30 @@ out_err: } /* @@ -24288,7 +24959,7 @@ Index: linux-2.6/net/ipv4/icmp.c * Handle ICMP_ECHO ("ping") requests. * * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo -@@ -825,6 +850,11 @@ static void icmp_echo(struct sk_buff *sk +@@ -827,6 +852,11 @@ static void icmp_echo(struct sk_buff *sk icmp_param.data_len = skb->len; icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); @@ -24358,7 +25029,7 @@ Index: linux-2.6/kernel/debug/kdb/kdb_io.c /* Serialize kdb_printf if multiple cpus try to write at once. * But if any cpu goes recursive in kdb, just print the output, -@@ -807,7 +804,6 @@ int vkdb_printf(const char *fmt, va_list +@@ -807,7 +804,6 @@ kdb_print_out: } else { __release(kdb_printf_lock); } @@ -24378,6 +25049,36 @@ Index: linux-2.6/kernel/debug/kdb/kdb_io.c return r; } +Index: linux-2.6/kernel/ksysfs.c +=================================================================== +--- linux-2.6.orig/kernel/ksysfs.c ++++ linux-2.6/kernel/ksysfs.c +@@ -132,6 +132,15 @@ KERNEL_ATTR_RO(vmcoreinfo); + + #endif /* CONFIG_KEXEC */ + ++#if defined(CONFIG_PREEMPT_RT_FULL) ++static ssize_t realtime_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", 1); ++} ++KERNEL_ATTR_RO(realtime); ++#endif ++ + /* whether file capabilities are enabled */ + static ssize_t fscaps_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +@@ -181,6 +190,9 @@ static struct attribute * kernel_attrs[] + &kexec_crash_size_attr.attr, + &vmcoreinfo_attr.attr, + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ &realtime_attr.attr, ++#endif + NULL + }; + Index: linux-2.6/arch/Kconfig =================================================================== --- linux-2.6.orig/arch/Kconfig @@ -24431,7 +25132,7 @@ Index: linux-2.6/init/Makefile =================================================================== --- linux-2.6.orig/init/Makefile +++ linux-2.6/init/Makefile -@@ -29,4 +29,4 @@ $(obj)/version.o: include/generated/comp +@@ -29,4 +29,4 @@ silent_chk_compile.h = : include/generated/compile.h: FORCE @$($(quiet)chk_compile.h) $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ @@ -24459,368 +25160,3 @@ Index: linux-2.6/scripts/mkcompile_h UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length -Index: linux-2.6/kernel/sched_cpupri.c -=================================================================== ---- linux-2.6.orig/kernel/sched_cpupri.c -+++ linux-2.6/kernel/sched_cpupri.c -@@ -47,9 +47,6 @@ static int convert_prio(int prio) - return cpupri; - } - --#define for_each_cpupri_active(array, idx) \ -- for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES) -- - /** - * cpupri_find - find the best (lowest-pri) CPU in the system - * @cp: The cpupri context -@@ -71,11 +68,38 @@ int cpupri_find(struct cpupri *cp, struc - int idx = 0; - int task_pri = convert_prio(p->prio); - -- for_each_cpupri_active(cp->pri_active, idx) { -+ if (task_pri >= MAX_RT_PRIO) -+ return 0; -+ -+ for (idx = 0; idx < task_pri; idx++) { - struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; -+ int skip = 0; - -- if (idx >= task_pri) -- break; -+ if (!atomic_read(&(vec)->count)) -+ skip = 1; -+ /* -+ * When looking at the vector, we need to read the counter, -+ * do a memory barrier, then read the mask. -+ * -+ * Note: This is still all racey, but we can deal with it. -+ * Ideally, we only want to look at masks that are set. -+ * -+ * If a mask is not set, then the only thing wrong is that we -+ * did a little more work than necessary. -+ * -+ * If we read a zero count but the mask is set, because of the -+ * memory barriers, that can only happen when the highest prio -+ * task for a run queue has left the run queue, in which case, -+ * it will be followed by a pull. If the task we are processing -+ * fails to find a proper place to go, that pull request will -+ * pull this task if the run queue is running at a lower -+ * priority. -+ */ -+ smp_rmb(); -+ -+ /* Need to do the rmb for every iteration */ -+ if (skip) -+ continue; - - if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) - continue; -@@ -115,7 +139,7 @@ void cpupri_set(struct cpupri *cp, int c - { - int *currpri = &cp->cpu_to_pri[cpu]; - int oldpri = *currpri; -- unsigned long flags; -+ int do_mb = 0; - - newpri = convert_prio(newpri); - -@@ -134,26 +158,41 @@ void cpupri_set(struct cpupri *cp, int c - if (likely(newpri != CPUPRI_INVALID)) { - struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; - -- raw_spin_lock_irqsave(&vec->lock, flags); -- - cpumask_set_cpu(cpu, vec->mask); -- vec->count++; -- if (vec->count == 1) -- set_bit(newpri, cp->pri_active); -- -- raw_spin_unlock_irqrestore(&vec->lock, flags); -+ /* -+ * When adding a new vector, we update the mask first, -+ * do a write memory barrier, and then update the count, to -+ * make sure the vector is visible when count is set. -+ */ -+ smp_mb__before_atomic_inc(); -+ atomic_inc(&(vec)->count); -+ do_mb = 1; - } - if (likely(oldpri != CPUPRI_INVALID)) { - struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; - -- raw_spin_lock_irqsave(&vec->lock, flags); -- -- vec->count--; -- if (!vec->count) -- clear_bit(oldpri, cp->pri_active); -+ /* -+ * Because the order of modification of the vec->count -+ * is important, we must make sure that the update -+ * of the new prio is seen before we decrement the -+ * old prio. This makes sure that the loop sees -+ * one or the other when we raise the priority of -+ * the run queue. We don't care about when we lower the -+ * priority, as that will trigger an rt pull anyway. -+ * -+ * We only need to do a memory barrier if we updated -+ * the new priority vec. -+ */ -+ if (do_mb) -+ smp_mb__after_atomic_inc(); -+ -+ /* -+ * When removing from the vector, we decrement the counter first -+ * do a memory barrier and then clear the mask. -+ */ -+ atomic_dec(&(vec)->count); -+ smp_mb__after_atomic_inc(); - cpumask_clear_cpu(cpu, vec->mask); -- -- raw_spin_unlock_irqrestore(&vec->lock, flags); - } - - *currpri = newpri; -@@ -175,8 +214,7 @@ int cpupri_init(struct cpupri *cp) - for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { - struct cpupri_vec *vec = &cp->pri_to_cpu[i]; - -- raw_spin_lock_init(&vec->lock); -- vec->count = 0; -+ atomic_set(&vec->count, 0); - if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) - goto cleanup; - } -Index: linux-2.6/kernel/sched_cpupri.h -=================================================================== ---- linux-2.6.orig/kernel/sched_cpupri.h -+++ linux-2.6/kernel/sched_cpupri.h -@@ -12,9 +12,8 @@ - /* values 2-101 are RT priorities 0-99 */ - - struct cpupri_vec { -- raw_spinlock_t lock; -- int count; -- cpumask_var_t mask; -+ atomic_t count; -+ cpumask_var_t mask; - }; - - struct cpupri { -Index: linux-2.6/kernel/ksysfs.c -=================================================================== ---- linux-2.6.orig/kernel/ksysfs.c -+++ linux-2.6/kernel/ksysfs.c -@@ -132,6 +132,15 @@ KERNEL_ATTR_RO(vmcoreinfo); - - #endif /* CONFIG_KEXEC */ - -+#if defined(CONFIG_PREEMPT_RT_FULL) -+static ssize_t realtime_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%d\n", 1); -+} -+KERNEL_ATTR_RO(realtime); -+#endif -+ - /* whether file capabilities are enabled */ - static ssize_t fscaps_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -@@ -181,6 +190,9 @@ static struct attribute * kernel_attrs[] - &kexec_crash_size_attr.attr, - &vmcoreinfo_attr.attr, - #endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+ &realtime_attr.attr, -+#endif - NULL - }; - -Index: linux-2.6/drivers/cpufreq/cpufreq.c -=================================================================== ---- linux-2.6.orig/drivers/cpufreq/cpufreq.c -+++ linux-2.6/drivers/cpufreq/cpufreq.c -@@ -43,7 +43,7 @@ static DEFINE_PER_CPU(struct cpufreq_pol - /* This one keeps track of the previously set governor of a removed CPU */ - static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); - #endif --static DEFINE_SPINLOCK(cpufreq_driver_lock); -+static DEFINE_RAW_SPINLOCK(cpufreq_driver_lock); - - /* - * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure -@@ -138,7 +138,7 @@ struct cpufreq_policy *cpufreq_cpu_get(u - goto err_out; - - /* get the cpufreq driver */ -- spin_lock_irqsave(&cpufreq_driver_lock, flags); -+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); - - if (!cpufreq_driver) - goto err_out_unlock; -@@ -156,13 +156,13 @@ struct cpufreq_policy *cpufreq_cpu_get(u - if (!kobject_get(&data->kobj)) - goto err_out_put_module; - -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - return data; - - err_out_put_module: - module_put(cpufreq_driver->owner); - err_out_unlock: -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - err_out: - return NULL; - } -@@ -722,10 +722,10 @@ static int cpufreq_add_dev_policy(unsign - return -EBUSY; - } - -- spin_lock_irqsave(&cpufreq_driver_lock, flags); -+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); - cpumask_copy(managed_policy->cpus, policy->cpus); - per_cpu(cpufreq_cpu_data, cpu) = managed_policy; -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - - pr_debug("CPU already managed, adding link\n"); - ret = sysfs_create_link(&sys_dev->kobj, -@@ -821,14 +821,16 @@ static int cpufreq_add_dev_interface(uns - goto err_out_kobj_put; - } - -- spin_lock_irqsave(&cpufreq_driver_lock, flags); -+ get_online_cpus(); - for_each_cpu(j, policy->cpus) { - if (!cpu_online(j)) - continue; -+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); - per_cpu(cpufreq_cpu_data, j) = policy; - per_cpu(cpufreq_policy_cpu, j) = policy->cpu; -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - } -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ put_online_cpus(); - - ret = cpufreq_add_dev_symlink(cpu, policy); - if (ret) -@@ -970,10 +972,13 @@ static int cpufreq_add_dev(struct sys_de - - - err_out_unregister: -- spin_lock_irqsave(&cpufreq_driver_lock, flags); -- for_each_cpu(j, policy->cpus) -+ get_online_cpus(); -+ for_each_cpu(j, policy->cpus) { -+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); - per_cpu(cpufreq_cpu_data, j) = NULL; -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ } -+ put_online_cpus(); - - kobject_put(&policy->kobj); - wait_for_completion(&policy->kobj_unregister); -@@ -1013,11 +1018,11 @@ static int __cpufreq_remove_dev(struct s - - pr_debug("unregistering CPU %u\n", cpu); - -- spin_lock_irqsave(&cpufreq_driver_lock, flags); -+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); - data = per_cpu(cpufreq_cpu_data, cpu); - - if (!data) { -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - unlock_policy_rwsem_write(cpu); - return -EINVAL; - } -@@ -1031,7 +1036,7 @@ static int __cpufreq_remove_dev(struct s - if (unlikely(cpu != data->cpu)) { - pr_debug("removing link\n"); - cpumask_clear_cpu(cpu, data->cpus); -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - kobj = &sys_dev->kobj; - cpufreq_cpu_put(data); - unlock_policy_rwsem_write(cpu); -@@ -1040,6 +1045,7 @@ static int __cpufreq_remove_dev(struct s - } - #endif - -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - #ifdef CONFIG_SMP - - #ifdef CONFIG_HOTPLUG_CPU -@@ -1052,15 +1058,17 @@ static int __cpufreq_remove_dev(struct s - * per_cpu(cpufreq_cpu_data) while holding the lock, and remove - * the sysfs links afterwards. - */ -+ get_online_cpus(); - if (unlikely(cpumask_weight(data->cpus) > 1)) { - for_each_cpu(j, data->cpus) { - if (j == cpu) - continue; -+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); - per_cpu(cpufreq_cpu_data, j) = NULL; -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - } - } -- -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ put_online_cpus(); - - if (unlikely(cpumask_weight(data->cpus) > 1)) { - for_each_cpu(j, data->cpus) { -@@ -1079,8 +1087,6 @@ static int __cpufreq_remove_dev(struct s - cpufreq_cpu_put(data); - } - } --#else -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - #endif - - if (cpufreq_driver->target) -@@ -1802,13 +1808,13 @@ int cpufreq_register_driver(struct cpufr - if (driver_data->setpolicy) - driver_data->flags |= CPUFREQ_CONST_LOOPS; - -- spin_lock_irqsave(&cpufreq_driver_lock, flags); -+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); - if (cpufreq_driver) { -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - return -EBUSY; - } - cpufreq_driver = driver_data; -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - - ret = sysdev_driver_register(&cpu_sysdev_class, - &cpufreq_sysdev_driver); -@@ -1842,9 +1848,9 @@ int cpufreq_register_driver(struct cpufr - sysdev_driver_unregister(&cpu_sysdev_class, - &cpufreq_sysdev_driver); - err_null_driver: -- spin_lock_irqsave(&cpufreq_driver_lock, flags); -+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); - cpufreq_driver = NULL; -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - return ret; - } - EXPORT_SYMBOL_GPL(cpufreq_register_driver); -@@ -1870,9 +1876,9 @@ int cpufreq_unregister_driver(struct cpu - sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); - unregister_hotcpu_notifier(&cpufreq_cpu_notifier); - -- spin_lock_irqsave(&cpufreq_driver_lock, flags); -+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags); - cpufreq_driver = NULL; -- spin_unlock_irqrestore(&cpufreq_driver_lock, flags); -+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - - return 0; - } diff --git a/debian/patches/series/2-extra b/debian/patches/series/2-extra deleted file mode 100644 index 1c2e92650..000000000 --- a/debian/patches/series/2-extra +++ /dev/null @@ -1 +0,0 @@ -+ features/all/rt/patch-3.0.1-rt11.patch featureset=rt diff --git a/debian/patches/series/4-extra b/debian/patches/series/4-extra new file mode 100644 index 000000000..a50d2dd76 --- /dev/null +++ b/debian/patches/series/4-extra @@ -0,0 +1 @@ ++ features/all/rt/patch-3.0.4-rt13.patch featureset=rt