aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2013-05-09 16:21:27 +0900
committerVinayak Menon <vinmenon@codeaurora.org>2015-04-14 15:11:59 -0700
commitbcdef59b598ea572c4b866483b3c666bec13ce1b (patch)
treec482202dc52dd0027ed0fbf67a13dd2009f5726c /include/linux
parente1e65c80d14abe0f70946f00f497f1ef26a071d5 (diff)
downloadqcom-msm-v3.10-bcdef59b598ea572c4b866483b3c666bec13ce1b.tar.gz
mm: Enhance per process reclaim to consider shared pages
Some pages could be shared by several processes. (ex, libc) In case of that, it's too bad to reclaim them from the beginnig. This patch causes VM to keep them on memory until last task try to reclaim them so shared pages will be reclaimed only if all of task has gone swapping out. This feature doesn't handle non-linear mapping on ramfs because it's very time-consuming and doesn't make sure of reclaiming and not common. Change-Id: I7e5f34f2e947f5db6d405867fe2ad34863ca40f7 Signed-off-by: Sangseok Lee <sangseok.lee@lge.com> Signed-off-by: Minchan Kim <minchan@kernel.org> Patch-mainline: linux-mm @ 9 May 2013 16:21:27 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ksm.h6
-rw-r--r--include/linux/rmap.h8
2 files changed, 9 insertions, 5 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 45c9b6a17bc..d8e556b3fc6 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -75,7 +75,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
int page_referenced_ksm(struct page *page,
struct mem_cgroup *memcg, unsigned long *vm_flags);
-int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
+int try_to_unmap_ksm(struct page *page,
+ enum ttu_flags flags, struct vm_area_struct *vma);
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
@@ -115,7 +116,8 @@ static inline int page_referenced_ksm(struct page *page,
return 0;
}
-static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
+static inline int try_to_unmap_ksm(struct page *page,
+ enum ttu_flags flags, struct vm_area_struct *target_vma)
{
return 0;
}
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index a24e34efd0e..6c7d030bfa9 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -12,7 +12,8 @@
extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
-extern unsigned long reclaim_pages_from_list(struct list_head *page_list);
+extern unsigned long reclaim_pages_from_list(struct list_head *page_list,
+ struct vm_area_struct *vma);
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -192,7 +193,8 @@ int page_referenced_one(struct page *, struct vm_area_struct *,
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
-int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap(struct page *, enum ttu_flags flags,
+ struct vm_area_struct *vma);
int try_to_unmap_one(struct page *, struct vm_area_struct *,
unsigned long address, enum ttu_flags flags);
@@ -259,7 +261,7 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
-#define try_to_unmap(page, refs) SWAP_FAIL
+#define try_to_unmap(page, refs, vma) SWAP_FAIL
static inline int page_mkclean(struct page *page)
{