From d4b3d2991e6a0e45cc82a72c12ac7acbb27929ce Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Fri, 13 Dec 2013 09:44:42 +0530 Subject: [PATCH 2/3] sparc64: convert spinlock_t to raw_spinlock_t in mmu_context_t Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.5-rt7.tar.xz Issue debugged by Thomas Gleixner Signed-off-by: Allen Pais Signed-off-by: Sebastian Andrzej Siewior --- arch/sparc/Kconfig | 1 + arch/sparc/include/asm/mmu_64.h | 2 +- arch/sparc/include/asm/mmu_context_64.h | 8 ++++---- arch/sparc/kernel/smp_64.c | 4 ++-- arch/sparc/mm/init_64.c | 4 ++-- arch/sparc/mm/tsb.c | 16 ++++++++-------- 6 files changed, 18 insertions(+), 17 deletions(-) --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -26,6 +26,7 @@ config SPARC select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG select HAVE_ARCH_JUMP_LABEL + select IRQ_FORCED_THREADING select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select USE_GENERIC_SMP_HELPERS if SMP --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -90,7 +90,7 @@ struct tsb_config { #endif typedef struct { - spinlock_t lock; + raw_spinlock_t lock; unsigned long sparc64_ctx_val; unsigned long huge_pte_count; struct page *pgtable_page; --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -77,7 +77,7 @@ static inline void switch_mm(struct mm_s if (unlikely(mm == &init_mm)) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); ctx_valid = CTX_VALID(mm->context); if (!ctx_valid) get_new_mmu_context(mm); @@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_s __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define deactivate_mm(tsk,mm) do { } while (0) @@ -136,7 +136,7 @@ static inline void activate_mm(struct mm unsigned long flags; int cpu; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); cpu = smp_processor_id(); @@ -146,7 +146,7 @@ static inline void activate_mm(struct mm load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); tsb_context_switch(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #endif /* !(__ASSEMBLY__) */ --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -976,12 +976,12 @@ void __irq_entry smp_new_mmu_context_ver if (unlikely(!mm || (mm == &init_mm))) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (unlikely(!CTX_VALID(mm->context))) get_new_mmu_context(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -350,7 +350,7 @@ void update_mmu_cache(struct vm_area_str mm = vma->vm_mm; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) @@ -361,7 +361,7 @@ void update_mmu_cache(struct vm_area_str __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -73,7 +73,7 @@ void flush_tsb_user(struct tlb_batch *tb struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -90,14 +90,14 @@ void flush_tsb_user(struct tlb_batch *tb __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) { unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -114,7 +114,7 @@ void flush_tsb_user_page(struct mm_struc __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K @@ -392,7 +392,7 @@ void tsb_grow(struct mm_struct *mm, unsi * the lock and ask all other cpus running this address space * to run tsb_context_switch() to see the new TSB table. */ - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); old_tsb = mm->context.tsb_block[tsb_index].tsb; old_cache_index = @@ -407,7 +407,7 @@ void tsb_grow(struct mm_struct *mm, unsi */ if (unlikely(old_tsb && (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; @@ -433,7 +433,7 @@ void tsb_grow(struct mm_struct *mm, unsi mm->context.tsb_block[tsb_index].tsb = new_tsb; setup_tsb_params(mm, tsb_index, new_size); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); /* If old_tsb is NULL, we're being invoked for the first time * from init_new_context(). @@ -459,7 +459,7 @@ int init_new_context(struct task_struct #endif unsigned int i; - spin_lock_init(&mm->context.lock); + raw_spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL;