125 lines
3.7 KiB
Diff
125 lines
3.7 KiB
Diff
Subject: mm: memcg: shorten preempt-disabled section around event checks
|
|
From: Johannes Weiner <hannes@cmpxchg.org>
|
|
Date: Thu, 17 Nov 2011 07:49:25 +0100
|
|
|
|
Only the ratelimit checks themselves have to run with preemption
|
|
disabled, the resulting actions - checking for usage thresholds,
|
|
updating the soft limit tree - can and should run with preemption
|
|
enabled.
|
|
|
|
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
|
|
Tested-by: Luis Henriques <henrix@camandro.org>
|
|
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
mm/memcontrol.c | 73 ++++++++++++++++++++++++++------------------------------
|
|
1 file changed, 35 insertions(+), 38 deletions(-)
|
|
|
|
Thomas, HTH and it is probably interesting for upstream as well.
|
|
Unfortunately, I'm in the middle of moving right now, so this is
|
|
untested except for compiling.
|
|
|
|
Index: linux-3.2/mm/memcontrol.c
|
|
===================================================================
|
|
--- linux-3.2.orig/mm/memcontrol.c
|
|
+++ linux-3.2/mm/memcontrol.c
|
|
@@ -683,37 +683,32 @@ static unsigned long mem_cgroup_nr_lru_p
|
|
return total;
|
|
}
|
|
|
|
-static bool __memcg_event_check(struct mem_cgroup *memcg, int target)
|
|
+static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
|
|
+ enum mem_cgroup_events_target target)
|
|
{
|
|
unsigned long val, next;
|
|
|
|
val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
|
|
next = __this_cpu_read(memcg->stat->targets[target]);
|
|
/* from time_after() in jiffies.h */
|
|
- return ((long)next - (long)val < 0);
|
|
-}
|
|
-
|
|
-static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
|
|
-{
|
|
- unsigned long val, next;
|
|
-
|
|
- val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
|
|
-
|
|
- switch (target) {
|
|
- case MEM_CGROUP_TARGET_THRESH:
|
|
- next = val + THRESHOLDS_EVENTS_TARGET;
|
|
- break;
|
|
- case MEM_CGROUP_TARGET_SOFTLIMIT:
|
|
- next = val + SOFTLIMIT_EVENTS_TARGET;
|
|
- break;
|
|
- case MEM_CGROUP_TARGET_NUMAINFO:
|
|
- next = val + NUMAINFO_EVENTS_TARGET;
|
|
- break;
|
|
- default:
|
|
- return;
|
|
+ if ((long)next - (long)val < 0) {
|
|
+ switch (target) {
|
|
+ case MEM_CGROUP_TARGET_THRESH:
|
|
+ next = val + THRESHOLDS_EVENTS_TARGET;
|
|
+ break;
|
|
+ case MEM_CGROUP_TARGET_SOFTLIMIT:
|
|
+ next = val + SOFTLIMIT_EVENTS_TARGET;
|
|
+ break;
|
|
+ case MEM_CGROUP_TARGET_NUMAINFO:
|
|
+ next = val + NUMAINFO_EVENTS_TARGET;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ __this_cpu_write(memcg->stat->targets[target], next);
|
|
+ return true;
|
|
}
|
|
-
|
|
- __this_cpu_write(memcg->stat->targets[target], next);
|
|
+ return false;
|
|
}
|
|
|
|
/*
|
|
@@ -724,25 +719,27 @@ static void memcg_check_events(struct me
|
|
{
|
|
preempt_disable();
|
|
/* threshold event is triggered in finer grain than soft limit */
|
|
- if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
|
|
+ if (unlikely(mem_cgroup_event_ratelimit(memcg,
|
|
+ MEM_CGROUP_TARGET_THRESH))) {
|
|
+ bool do_softlimit, do_numainfo;
|
|
+
|
|
+ do_softlimit = mem_cgroup_event_ratelimit(memcg,
|
|
+ MEM_CGROUP_TARGET_SOFTLIMIT);
|
|
+#if MAX_NUMNODES > 1
|
|
+ do_numainfo = mem_cgroup_event_ratelimit(memcg,
|
|
+ MEM_CGROUP_TARGET_NUMAINFO);
|
|
+#endif
|
|
+ preempt_enable();
|
|
+
|
|
mem_cgroup_threshold(memcg);
|
|
- __mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH);
|
|
- if (unlikely(__memcg_event_check(memcg,
|
|
- MEM_CGROUP_TARGET_SOFTLIMIT))) {
|
|
+ if (unlikely(do_softlimit))
|
|
mem_cgroup_update_tree(memcg, page);
|
|
- __mem_cgroup_target_update(memcg,
|
|
- MEM_CGROUP_TARGET_SOFTLIMIT);
|
|
- }
|
|
#if MAX_NUMNODES > 1
|
|
- if (unlikely(__memcg_event_check(memcg,
|
|
- MEM_CGROUP_TARGET_NUMAINFO))) {
|
|
+ if (unlikely(do_numainfo))
|
|
atomic_inc(&memcg->numainfo_events);
|
|
- __mem_cgroup_target_update(memcg,
|
|
- MEM_CGROUP_TARGET_NUMAINFO);
|
|
- }
|
|
#endif
|
|
- }
|
|
- preempt_enable();
|
|
+ } else
|
|
+ preempt_enable();
|
|
}
|
|
|
|
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
|