94 lines
3.1 KiB
Diff
94 lines
3.1 KiB
Diff
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Tue, 6 Apr 2010 16:51:31 +0200
|
|
Subject: md: raid5: Make raid5_percpu handling RT aware
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz
|
|
|
|
__raid_run_ops() disables preemption with get_cpu() around the access
|
|
to the raid5_percpu variables. That causes scheduling while atomic
|
|
spews on RT.
|
|
|
|
Serialize the access to the percpu data with a lock and keep the code
|
|
preemptible.
|
|
|
|
Reported-by: Udo van den Heuvel <udovdh@xs4all.nl>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
|
|
|
|
---
|
|
drivers/md/raid5.c | 13 ++++++++-----
|
|
drivers/md/raid5.h | 1 +
|
|
2 files changed, 9 insertions(+), 5 deletions(-)
|
|
|
|
Index: linux-stable/drivers/md/raid5.c
|
|
===================================================================
|
|
--- linux-stable.orig/drivers/md/raid5.c
|
|
+++ linux-stable/drivers/md/raid5.c
|
|
@@ -1418,8 +1418,9 @@ static void raid_run_ops(struct stripe_h
|
|
struct raid5_percpu *percpu;
|
|
unsigned long cpu;
|
|
|
|
- cpu = get_cpu();
|
|
+ cpu = get_cpu_light();
|
|
percpu = per_cpu_ptr(conf->percpu, cpu);
|
|
+ spin_lock(&percpu->lock);
|
|
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
|
|
ops_run_biofill(sh);
|
|
overlap_clear++;
|
|
@@ -1471,7 +1472,8 @@ static void raid_run_ops(struct stripe_h
|
|
if (test_and_clear_bit(R5_Overlap, &dev->flags))
|
|
wake_up(&sh->raid_conf->wait_for_overlap);
|
|
}
|
|
- put_cpu();
|
|
+ spin_unlock(&percpu->lock);
|
|
+ put_cpu_light();
|
|
}
|
|
|
|
static int grow_one_stripe(struct r5conf *conf)
|
|
@@ -1907,7 +1909,7 @@ static void raid5_end_write_request(stru
|
|
}
|
|
|
|
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
|
|
-
|
|
+
|
|
static void raid5_build_block(struct stripe_head *sh, int i, int previous)
|
|
{
|
|
struct r5dev *dev = &sh->dev[i];
|
|
@@ -4315,7 +4317,7 @@ static void make_request(struct mddev *m
|
|
previous,
|
|
&dd_idx, NULL);
|
|
pr_debug("raid456: make_request, sector %llu logical %llu\n",
|
|
- (unsigned long long)new_sector,
|
|
+ (unsigned long long)new_sector,
|
|
(unsigned long long)logical_sector);
|
|
|
|
sh = get_active_stripe(conf, new_sector, previous,
|
|
@@ -5111,6 +5113,7 @@ static int raid5_alloc_percpu(struct r5c
|
|
break;
|
|
}
|
|
per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
|
|
+ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
|
|
}
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
conf->cpu_notify.notifier_call = raid456_cpu_notify;
|
|
@@ -5285,7 +5288,7 @@ static int only_parity(int raid_disk, in
|
|
return 1;
|
|
break;
|
|
case ALGORITHM_PARITY_0_6:
|
|
- if (raid_disk == 0 ||
|
|
+ if (raid_disk == 0 ||
|
|
raid_disk == raid_disks - 1)
|
|
return 1;
|
|
break;
|
|
Index: linux-stable/drivers/md/raid5.h
|
|
===================================================================
|
|
--- linux-stable.orig/drivers/md/raid5.h
|
|
+++ linux-stable/drivers/md/raid5.h
|
|
@@ -425,6 +425,7 @@ struct r5conf {
|
|
int recovery_disabled;
|
|
/* per cpu variables */
|
|
struct raid5_percpu {
|
|
+ spinlock_t lock; /* Protection for -RT */
|
|
struct page *spare_page; /* Used when checking P/Q in raid6 */
|
|
void *scribble; /* space for constructing buffer
|
|
* lists and performing address
|