2020-09-04 20:10:21 +00:00
|
|
|
From 8550a07330a14f76ace74d423da03e02855055d3 Mon Sep 17 00:00:00 2001
|
|
|
|
Message-Id: <8550a07330a14f76ace74d423da03e02855055d3.1599166690.git.zanussi@kernel.org>
|
|
|
|
In-Reply-To: <56457dc415803c8abc5acb513ada877a79596f05.1599166690.git.zanussi@kernel.org>
|
|
|
|
References: <56457dc415803c8abc5acb513ada877a79596f05.1599166690.git.zanussi@kernel.org>
|
2018-08-27 14:32:32 +00:00
|
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Date: Wed, 28 Jan 2015 17:14:16 +0100
|
2020-09-04 20:10:21 +00:00
|
|
|
Subject: [PATCH 085/333] mm/memcontrol: Replace local_irq_disable with local
|
2019-04-08 23:49:20 +00:00
|
|
|
locks
|
2020-09-04 20:10:21 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.142-rt63.tar.xz
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
There are a few local_irq_disable() which then take sleeping locks. This
|
|
|
|
patch converts them local locks.
|
|
|
|
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
---
|
2019-04-08 23:49:20 +00:00
|
|
|
mm/memcontrol.c | 24 ++++++++++++++++--------
|
2018-08-27 14:32:32 +00:00
|
|
|
1 file changed, 16 insertions(+), 8 deletions(-)
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
2020-08-28 04:53:35 +00:00
|
|
|
index cc920c15e5d7..a54cfafba8ad 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/mm/memcontrol.c
|
|
|
|
+++ b/mm/memcontrol.c
|
|
|
|
@@ -69,6 +69,7 @@
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/ip.h>
|
|
|
|
#include "slab.h"
|
|
|
|
+#include <linux/locallock.h>
|
|
|
|
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
|
|
|
@@ -94,6 +95,8 @@ int do_swap_account __read_mostly;
|
|
|
|
#define do_swap_account 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(event_lock);
|
|
|
|
+
|
|
|
|
/* Whether legacy memory+swap accounting is active */
|
|
|
|
static bool do_memsw_account(void)
|
|
|
|
{
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -4930,12 +4933,12 @@ static int mem_cgroup_move_account(struct page *page,
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
- local_irq_disable();
|
|
|
|
+ local_lock_irq(event_lock);
|
|
|
|
mem_cgroup_charge_statistics(to, page, compound, nr_pages);
|
|
|
|
memcg_check_events(to, page);
|
|
|
|
mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
|
|
|
|
memcg_check_events(from, page);
|
|
|
|
- local_irq_enable();
|
|
|
|
+ local_unlock_irq(event_lock);
|
|
|
|
out_unlock:
|
|
|
|
unlock_page(page);
|
|
|
|
out:
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -6054,10 +6057,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
commit_charge(page, memcg, lrucare);
|
|
|
|
|
|
|
|
- local_irq_disable();
|
|
|
|
+ local_lock_irq(event_lock);
|
|
|
|
mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
|
|
|
|
memcg_check_events(memcg, page);
|
|
|
|
- local_irq_enable();
|
|
|
|
+ local_unlock_irq(event_lock);
|
|
|
|
|
|
|
|
if (do_memsw_account() && PageSwapCache(page)) {
|
|
|
|
swp_entry_t entry = { .val = page_private(page) };
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -6126,7 +6129,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
|
2018-08-27 14:32:32 +00:00
|
|
|
memcg_oom_recover(ug->memcg);
|
|
|
|
}
|
|
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
|
+ local_lock_irqsave(event_lock, flags);
|
|
|
|
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
|
|
|
|
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
|
|
|
|
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -6134,7 +6137,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
|
2018-08-27 14:32:32 +00:00
|
|
|
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
|
|
|
|
__this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
|
|
|
|
memcg_check_events(ug->memcg, ug->dummy_page);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
+ local_unlock_irqrestore(event_lock, flags);
|
|
|
|
|
|
|
|
if (!mem_cgroup_is_root(ug->memcg))
|
|
|
|
css_put_many(&ug->memcg->css, nr_pages);
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -6297,10 +6300,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
commit_charge(newpage, memcg, false);
|
|
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
|
+ local_lock_irqsave(event_lock, flags);
|
|
|
|
mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
|
|
|
|
memcg_check_events(memcg, newpage);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
+ local_unlock_irqrestore(event_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -6482,6 +6485,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
|
2018-08-27 14:32:32 +00:00
|
|
|
struct mem_cgroup *memcg, *swap_memcg;
|
|
|
|
unsigned int nr_entries;
|
|
|
|
unsigned short oldid;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
|
|
VM_BUG_ON_PAGE(PageLRU(page), page);
|
|
|
|
VM_BUG_ON_PAGE(page_count(page), page);
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -6527,13 +6531,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
|
2018-08-27 14:32:32 +00:00
|
|
|
* important here to have the interrupts disabled because it is the
|
|
|
|
* only synchronisation we have for updating the per-CPU variables.
|
|
|
|
*/
|
|
|
|
+ local_lock_irqsave(event_lock, flags);
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
|
|
VM_BUG_ON(!irqs_disabled());
|
|
|
|
+#endif
|
|
|
|
mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
|
|
|
|
-nr_entries);
|
|
|
|
memcg_check_events(memcg, page);
|
|
|
|
|
|
|
|
if (!mem_cgroup_is_root(memcg))
|
|
|
|
css_put_many(&memcg->css, nr_entries);
|
|
|
|
+ local_unlock_irqrestore(event_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2020-01-03 23:36:11 +00:00
|
|
|
--
|
2020-06-22 13:14:16 +00:00
|
|
|
2.17.1
|
2020-01-03 23:36:11 +00:00
|
|
|
|