mirror of
https://github.com/Divested-Mobile/DivestOS-Build.git
synced 2024-12-11 08:54:28 -05:00
164 lines
5.9 KiB
Diff
164 lines
5.9 KiB
Diff
|
From af110cc4b24250faafd4f3b9879cf51e350d7799 Mon Sep 17 00:00:00 2001
|
||
|
From: Hugh Dickins <hughd@google.com>
|
||
|
Date: Fri, 15 Jul 2016 15:08:19 -0400
|
||
|
Subject: mm: migrate dirty page without clear_page_dirty_for_io etc
|
||
|
|
||
|
commit 42cb14b110a5698ccf26ce59c4441722605a3743 upstream.
|
||
|
|
||
|
clear_page_dirty_for_io() has accumulated writeback and memcg subtleties
|
||
|
since v2.6.16 first introduced page migration; and the set_page_dirty()
|
||
|
which completed its migration of PageDirty, later had to be moderated to
|
||
|
__set_page_dirty_nobuffers(); then PageSwapBacked had to skip that too.
|
||
|
|
||
|
No actual problems seen with this procedure recently, but if you look into
|
||
|
what the clear_page_dirty_for_io(page)+set_page_dirty(newpage) is actually
|
||
|
achieving, it turns out to be nothing more than moving the PageDirty flag,
|
||
|
and its NR_FILE_DIRTY stat from one zone to another.
|
||
|
|
||
|
It would be good to avoid a pile of irrelevant decrementations and
|
||
|
incrementations, and improper event counting, and unnecessary descent of
|
||
|
the radix_tree under tree_lock (to set the PAGECACHE_TAG_DIRTY which
|
||
|
radix_tree_replace_slot() left in place anyway).
|
||
|
|
||
|
Do the NR_FILE_DIRTY movement, like the other stats movements, while
|
||
|
interrupts still disabled in migrate_page_move_mapping(); and don't even
|
||
|
bother if the zone is the same. Do the PageDirty movement there under
|
||
|
tree_lock too, where old page is frozen and newpage not yet visible:
|
||
|
bearing in mind that as soon as newpage becomes visible in radix_tree, an
|
||
|
un-page-locked set_page_dirty() might interfere (or perhaps that's just
|
||
|
not possible: anything doing so should already hold an additional
|
||
|
reference to the old page, preventing its migration; but play safe).
|
||
|
|
||
|
But we do still need to transfer PageDirty in migrate_page_copy(), for
|
||
|
those who don't go the mapping route through migrate_page_move_mapping().
|
||
|
|
||
|
CVE-2016-3070
|
||
|
|
||
|
Signed-off-by: Hugh Dickins <hughd@google.com>
|
||
|
Cc: Christoph Lameter <cl@linux.com>
|
||
|
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
|
||
|
Cc: Rik van Riel <riel@redhat.com>
|
||
|
Cc: Vlastimil Babka <vbabka@suse.cz>
|
||
|
Cc: Davidlohr Bueso <dave@stgolabs.net>
|
||
|
Cc: Oleg Nesterov <oleg@redhat.com>
|
||
|
Cc: Sasha Levin <sasha.levin@oracle.com>
|
||
|
Cc: Dmitry Vyukov <dvyukov@google.com>
|
||
|
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
|
||
|
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||
|
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
||
|
[ciwillia@brocade.com: backported to 3.10: adjusted context]
|
||
|
Signed-off-by: Charles (Chas) Williams <ciwillia@brocade.com>
|
||
|
Signed-off-by: Willy Tarreau <w@1wt.eu>
|
||
|
---
|
||
|
mm/migrate.c | 51 +++++++++++++++++++++++++++++++--------------------
|
||
|
1 file changed, 31 insertions(+), 20 deletions(-)
|
||
|
|
||
|
diff --git a/mm/migrate.c b/mm/migrate.c
|
||
|
index a88c12f..a61500f 100644
|
||
|
--- a/mm/migrate.c
|
||
|
+++ b/mm/migrate.c
|
||
|
@@ -30,6 +30,7 @@
|
||
|
#include <linux/mempolicy.h>
|
||
|
#include <linux/vmalloc.h>
|
||
|
#include <linux/security.h>
|
||
|
+#include <linux/backing-dev.h>
|
||
|
#include <linux/memcontrol.h>
|
||
|
#include <linux/syscalls.h>
|
||
|
#include <linux/hugetlb.h>
|
||
|
@@ -311,6 +312,8 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
||
|
struct page *newpage, struct page *page,
|
||
|
struct buffer_head *head, enum migrate_mode mode)
|
||
|
{
|
||
|
+ struct zone *oldzone, *newzone;
|
||
|
+ int dirty;
|
||
|
int expected_count = 0;
|
||
|
void **pslot;
|
||
|
|
||
|
@@ -321,6 +324,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
||
|
return MIGRATEPAGE_SUCCESS;
|
||
|
}
|
||
|
|
||
|
+ oldzone = page_zone(page);
|
||
|
+ newzone = page_zone(newpage);
|
||
|
+
|
||
|
spin_lock_irq(&mapping->tree_lock);
|
||
|
|
||
|
pslot = radix_tree_lookup_slot(&mapping->page_tree,
|
||
|
@@ -361,6 +367,13 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
||
|
set_page_private(newpage, page_private(page));
|
||
|
}
|
||
|
|
||
|
+ /* Move dirty while page refs frozen and newpage not yet exposed */
|
||
|
+ dirty = PageDirty(page);
|
||
|
+ if (dirty) {
|
||
|
+ ClearPageDirty(page);
|
||
|
+ SetPageDirty(newpage);
|
||
|
+ }
|
||
|
+
|
||
|
radix_tree_replace_slot(pslot, newpage);
|
||
|
|
||
|
/*
|
||
|
@@ -370,6 +383,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
||
|
*/
|
||
|
page_unfreeze_refs(page, expected_count - 1);
|
||
|
|
||
|
+ spin_unlock(&mapping->tree_lock);
|
||
|
+ /* Leave irq disabled to prevent preemption while updating stats */
|
||
|
+
|
||
|
/*
|
||
|
* If moved to a different zone then also account
|
||
|
* the page for that zone. Other VM counters will be
|
||
|
@@ -380,13 +396,19 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
||
|
* via NR_FILE_PAGES and NR_ANON_PAGES if they
|
||
|
* are mapped to swap space.
|
||
|
*/
|
||
|
- __dec_zone_page_state(page, NR_FILE_PAGES);
|
||
|
- __inc_zone_page_state(newpage, NR_FILE_PAGES);
|
||
|
- if (!PageSwapCache(page) && PageSwapBacked(page)) {
|
||
|
- __dec_zone_page_state(page, NR_SHMEM);
|
||
|
- __inc_zone_page_state(newpage, NR_SHMEM);
|
||
|
+ if (newzone != oldzone) {
|
||
|
+ __dec_zone_state(oldzone, NR_FILE_PAGES);
|
||
|
+ __inc_zone_state(newzone, NR_FILE_PAGES);
|
||
|
+ if (PageSwapBacked(page) && !PageSwapCache(page)) {
|
||
|
+ __dec_zone_state(oldzone, NR_SHMEM);
|
||
|
+ __inc_zone_state(newzone, NR_SHMEM);
|
||
|
+ }
|
||
|
+ if (dirty && mapping_cap_account_dirty(mapping)) {
|
||
|
+ __dec_zone_state(oldzone, NR_FILE_DIRTY);
|
||
|
+ __inc_zone_state(newzone, NR_FILE_DIRTY);
|
||
|
+ }
|
||
|
}
|
||
|
- spin_unlock_irq(&mapping->tree_lock);
|
||
|
+ local_irq_enable();
|
||
|
|
||
|
return MIGRATEPAGE_SUCCESS;
|
||
|
}
|
||
|
@@ -460,20 +482,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
|
||
|
if (PageMappedToDisk(page))
|
||
|
SetPageMappedToDisk(newpage);
|
||
|
|
||
|
- if (PageDirty(page)) {
|
||
|
- clear_page_dirty_for_io(page);
|
||
|
- /*
|
||
|
- * Want to mark the page and the radix tree as dirty, and
|
||
|
- * redo the accounting that clear_page_dirty_for_io undid,
|
||
|
- * but we can't use set_page_dirty because that function
|
||
|
- * is actually a signal that all of the page has become dirty.
|
||
|
- * Whereas only part of our page may be dirty.
|
||
|
- */
|
||
|
- if (PageSwapBacked(page))
|
||
|
- SetPageDirty(newpage);
|
||
|
- else
|
||
|
- __set_page_dirty_nobuffers(newpage);
|
||
|
- }
|
||
|
+ /* Move dirty on pages not done by migrate_page_move_mapping() */
|
||
|
+ if (PageDirty(page))
|
||
|
+ SetPageDirty(newpage);
|
||
|
|
||
|
mlock_migrate_page(newpage, page);
|
||
|
ksm_migrate_page(newpage, page);
|
||
|
--
|
||
|
cgit v1.1
|
||
|
|