From: Thomas Gleixner Date: Mon, 13 Dec 2010 16:33:39 +0100 Subject: x86: Convert mce timer to hrtimer mce_timer is started in atomic contexts of cpu bringup. This results in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to avoid this. Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/mcheck/mce.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c =================================================================== --- linux-stable.orig/arch/x86/kernel/cpu/mcheck/mce.c +++ linux-stable/arch/x86/kernel/cpu/mcheck/mce.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -1264,15 +1265,12 @@ void mce_log_therm_throt_event(__u64 sta static unsigned long check_interval = 5 * 60; /* 5 minutes */ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ -static DEFINE_PER_CPU(struct timer_list, mce_timer); +static DEFINE_PER_CPU(struct hrtimer, mce_timer); -static void mce_timer_fn(unsigned long data) +static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) { - struct timer_list *t = &__get_cpu_var(mce_timer); unsigned long iv; - WARN_ON(smp_processor_id() != data); - if (mce_available(__this_cpu_ptr(&cpu_info))) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_poll_banks)); @@ -1289,17 +1287,18 @@ static void mce_timer_fn(unsigned long d iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); __this_cpu_write(mce_next_interval, iv); - t->expires = jiffies + iv; - add_timer_on(t, smp_processor_id()); + hrtimer_forward(timer, timer->base->get_time(), + ns_to_ktime(jiffies_to_usecs(iv))); + return HRTIMER_RESTART; } -/* Must not be called in IRQ context where del_timer_sync() can deadlock */ +/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */ static void mce_timer_delete_all(void) { int cpu; for_each_online_cpu(cpu) - del_timer_sync(&per_cpu(mce_timer, cpu)); + hrtimer_cancel(&per_cpu(mce_timer, cpu)); } static void mce_do_trigger(struct work_struct *work) @@ -1596,10 +1595,11 @@ static void __mcheck_cpu_init_vendor(str static void __mcheck_cpu_init_timer(void) { - struct timer_list *t = &__get_cpu_var(mce_timer); + struct hrtimer *t = &__get_cpu_var(mce_timer); unsigned long iv = check_interval * HZ; - setup_timer(t, mce_timer_fn, smp_processor_id()); + hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + t->function = mce_timer_fn; if (mce_ignore_ce) return; @@ -1607,8 +1607,8 @@ static void __mcheck_cpu_init_timer(void __this_cpu_write(mce_next_interval, iv); if (!iv) return; - t->expires = round_jiffies(jiffies + iv); - add_timer_on(t, smp_processor_id()); + hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000), + 0, HRTIMER_MODE_REL_PINNED); } /* Handle unconfigured int18 (should never happen) */ @@ -2259,6 +2259,8 @@ static void __cpuinit mce_disable_cpu(vo if (!mce_available(__this_cpu_ptr(&cpu_info))) return; + hrtimer_cancel(&__get_cpu_var(mce_timer)); + if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); for (i = 0; i < banks; i++) { @@ -2285,6 +2287,7 @@ static void __cpuinit mce_reenable_cpu(v if (b->init) wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); } + __mcheck_cpu_init_timer(); } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ @@ -2292,7 +2295,6 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct timer_list *t = &per_cpu(mce_timer, cpu); switch (action) { case CPU_ONLINE: @@ -2309,16 +2311,10 @@ mce_cpu_callback(struct notifier_block * break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: - del_timer_sync(t); smp_call_function_single(cpu, mce_disable_cpu, &action, 1); break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: - if (!mce_ignore_ce && check_interval) { - t->expires = round_jiffies(jiffies + - per_cpu(mce_next_interval, cpu)); - add_timer_on(t, cpu); - } smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); break; case CPU_POST_DEAD: