From: Thomas Gleixner Date: Mon, 13 Dec 2010 16:33:39 +0100 Subject: x86: Convert mce timer to hrtimer Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.6-rt9.tar.xz mce_timer is started in atomic contexts of cpu bringup. This results in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to avoid this. Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/mcheck/mce.c | 57 ++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 24 deletions(-) --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -1268,7 +1269,7 @@ void mce_log_therm_throt_event(__u64 sta static unsigned long check_interval = 5 * 60; /* 5 minutes */ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ -static DEFINE_PER_CPU(struct timer_list, mce_timer); +static DEFINE_PER_CPU(struct hrtimer, mce_timer); static unsigned long mce_adjust_timer_default(unsigned long interval) { @@ -1278,13 +1279,10 @@ static unsigned long mce_adjust_timer_de static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; -static void mce_timer_fn(unsigned long data) +static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) { - struct timer_list *t = &__get_cpu_var(mce_timer); unsigned long iv; - WARN_ON(smp_processor_id() != data); - if (mce_available(__this_cpu_ptr(&cpu_info))) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_poll_banks)); @@ -1305,9 +1303,10 @@ static void mce_timer_fn(unsigned long d __this_cpu_write(mce_next_interval, iv); /* Might have become 0 after CMCI storm subsided */ if (iv) { - t->expires = jiffies + iv; - add_timer_on(t, smp_processor_id()); + hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_usecs(iv))); + return HRTIMER_RESTART; } + return HRTIMER_NORESTART; } /* @@ -1315,28 +1314,37 @@ static void mce_timer_fn(unsigned long d */ void mce_timer_kick(unsigned long interval) { - struct timer_list *t = &__get_cpu_var(mce_timer); - unsigned long when = jiffies + interval; + struct hrtimer *t = &__get_cpu_var(mce_timer); unsigned long iv = __this_cpu_read(mce_next_interval); - if (timer_pending(t)) { - if (time_before(when, t->expires)) - mod_timer_pinned(t, when); + if (hrtimer_active(t)) { + s64 exp; + s64 intv_us; + + intv_us = jiffies_to_usecs(interval); + exp = ktime_to_us(hrtimer_expires_remaining(t)); + if (intv_us < exp) { + hrtimer_cancel(t); + hrtimer_start_range_ns(t, + ns_to_ktime(intv_us * 1000), + 0, HRTIMER_MODE_REL_PINNED); + } } else { - t->expires = round_jiffies(when); - add_timer_on(t, smp_processor_id()); + hrtimer_start_range_ns(t, + ns_to_ktime(jiffies_to_usecs(interval) * 1000), + 0, HRTIMER_MODE_REL_PINNED); } if (interval < iv) __this_cpu_write(mce_next_interval, interval); } -/* Must not be called in IRQ context where del_timer_sync() can deadlock */ +/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */ static void mce_timer_delete_all(void) { int cpu; for_each_online_cpu(cpu) - del_timer_sync(&per_cpu(mce_timer, cpu)); + hrtimer_cancel(&per_cpu(mce_timer, cpu)); } static void mce_do_trigger(struct work_struct *work) @@ -1636,7 +1644,7 @@ static void __mcheck_cpu_init_vendor(str } } -static void mce_start_timer(unsigned int cpu, struct timer_list *t) +static void mce_start_timer(unsigned int cpu, struct hrtimer *t) { unsigned long iv = mce_adjust_timer(check_interval * HZ); @@ -1645,16 +1653,17 @@ static void mce_start_timer(unsigned int if (mca_cfg.ignore_ce || !iv) return; - t->expires = round_jiffies(jiffies + iv); - add_timer_on(t, smp_processor_id()); + hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000), + 0, HRTIMER_MODE_REL_PINNED); } static void __mcheck_cpu_init_timer(void) { - struct timer_list *t = &__get_cpu_var(mce_timer); + struct hrtimer *t = &__get_cpu_var(mce_timer); unsigned int cpu = smp_processor_id(); - setup_timer(t, mce_timer_fn, cpu); + hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + t->function = mce_timer_fn; mce_start_timer(cpu, t); } @@ -2329,6 +2338,8 @@ static void mce_disable_cpu(void *h) if (!mce_available(__this_cpu_ptr(&cpu_info))) return; + hrtimer_cancel(&__get_cpu_var(mce_timer)); + if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); for (i = 0; i < mca_cfg.banks; i++) { @@ -2355,6 +2366,7 @@ static void mce_reenable_cpu(void *h) if (b->init) wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); } + __mcheck_cpu_init_timer(); } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ @@ -2362,7 +2374,6 @@ static int mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct timer_list *t = &per_cpu(mce_timer, cpu); switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: @@ -2378,11 +2389,9 @@ mce_cpu_callback(struct notifier_block * break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, mce_disable_cpu, &action, 1); - del_timer_sync(t); break; case CPU_DOWN_FAILED: smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); - mce_start_timer(cpu, t); break; }