mm: make shrink_page_list with pages work from multiple zones

Shrink_page_list expects all pages come from a same zone
but it's too limited to use.

This patch removes the dependency so next patch can use
shrink_page_list with pages from multiple zones.

Change-Id: I34469b7f0a79f2b79e30e40033ba8b3e1dd5f2d0
Signed-off-by: Minchan Kim <minchan@kernel.org>
Patch-mainline: linux-mm @ 9 May 2013 16:21:25
[vinmenon@codeaurora.org: changes for node based lrus]
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
Minchan Kim 2013-05-09 16:21:25 +09:00 committed by Vinayak Menon
parent fde15c0345
commit f99a8d7a20

View File

@ -1152,6 +1152,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep;
VM_BUG_ON_PAGE(PageActive(page), page);
if (pgdat)
VM_BUG_ON_PAGE(page_pgdat(page) != pgdat, page);
nr_pages = compound_nr(page);
@ -1238,7 +1240,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
/* Case 1 above */
if (current_is_kswapd() &&
PageReclaim(page) &&
test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
(pgdat &&
test_bit(PGDAT_WRITEBACK, &pgdat->flags))) {
stat->nr_immediate++;
goto activate_locked;
@ -1372,7 +1375,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
if (page_is_file_cache(page) &&
(!current_is_kswapd() || !PageReclaim(page) ||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) {
(pgdat &&
!test_bit(PGDAT_DIRTY, &pgdat->flags)))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()