diff options
author | Mark Brown <broonie@linaro.org> | 2014-05-07 09:50:19 +0100 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2014-05-07 09:50:19 +0100 |
commit | 6ba9615888ad3c49af92bc307f6ff231bc0bc0d7 (patch) | |
tree | 41332b2277e87bfdce04b66a624a9cbe5c1a8c67 /mm | |
parent | 22d2266cff16067c0142938062d22a89b8dfa348 (diff) | |
parent | 03b120027537c2ed889393e4c4e4e2f87c868027 (diff) |
Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 16 | ||||
-rw-r--r-- | mm/hugetlb.c | 1 | ||||
-rw-r--r-- | mm/mlock.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 14 |
4 files changed, 27 insertions, 6 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 502517492258..eea1a9dfac38 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -287,13 +287,19 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) * Note, we wouldn't bother setting up the timer, but this function is on the * fast-path (used by '__mark_inode_dirty()'), so we save few context switches * by delaying the wake-up. + * + * We have to be careful not to postpone flush work if it is scheduled for + * earlier. Thus we use queue_delayed_work(). */ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) { unsigned long timeout; timeout = msecs_to_jiffies(dirty_writeback_interval * 10); - mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); + spin_lock_bh(&bdi->wb_lock); + if (test_bit(BDI_registered, &bdi->state)) + queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); + spin_unlock_bh(&bdi->wb_lock); } /* @@ -306,9 +312,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi) spin_unlock_bh(&bdi_lock); synchronize_rcu_expedited(); - - /* bdi_list is now unused, clear it to mark @bdi dying */ - INIT_LIST_HEAD(&bdi->bdi_list); } int bdi_register(struct backing_dev_info *bdi, struct device *parent, @@ -359,6 +362,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) */ bdi_remove_from_list(bdi); + /* Make sure nobody queues further work */ + spin_lock_bh(&bdi->wb_lock); + clear_bit(BDI_registered, &bdi->state); + spin_unlock_bh(&bdi->wb_lock); + /* * Drain work list and shutdown the delayed_work. At this point, * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 96cfebd0d67a..5a51ea060dab 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1487,6 +1487,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, while (min_count < persistent_huge_pages(h)) { if (!free_pool_huge_page(h, nodes_allowed, 0)) break; + cond_resched_lock(&hugetlb_lock); } while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, 1)) diff --git a/mm/mlock.c b/mm/mlock.c index 33861c780070..3dcea72277be 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -76,6 +76,7 @@ void clear_page_mlock(struct page *page) */ void mlock_vma_page(struct page *page) { + /* Serialize with page migration */ BUG_ON(!PageLocked(page)); if (!TestSetPageMlocked(page)) { @@ -106,6 +107,7 @@ unsigned int munlock_vma_page(struct page *page) { unsigned int page_mask = 0; + /* For try_to_munlock() and to serialize with page migration */ BUG_ON(!PageLocked(page)); if (TestClearPageMlocked(page)) { diff --git a/mm/rmap.c b/mm/rmap.c index 3f6077461aea..fbf0040a7342 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1390,9 +1390,19 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, BUG_ON(!page || PageAnon(page)); if (locked_vma) { - mlock_vma_page(page); /* no-op if already mlocked */ - if (page == check_page) + if (page == check_page) { + /* we know we have check_page locked */ + mlock_vma_page(page); ret = SWAP_MLOCK; + } else if (trylock_page(page)) { + /* + * If we can lock the page, perform mlock. + * Otherwise leave the page alone, it will be + * eventually encountered again later. + */ + mlock_vma_page(page); + unlock_page(page); + } continue; /* don't unmap */ } |