mm: make read_cache_page synchronous
Ensure pages are uptodate after returning from read_cache_page, which allows
us to cut out most of the filesystem-internal PageUptodate calls.
I didn't have a great look down the call chains, but this appears to fixes 7
possible use-before uptodate in hfs, 2 in hfsplus, 1 in jfs, a few in
ecryptfs, 1 in jffs2, and a possible cleared data overwritten with readpage in
block2mtd. All depending on whether the filler is async and/or can return
with a !uptodate page.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index 9393f4b..caecc58 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -89,9 +89,8 @@
struct page *page = read_mapping_page(mapping, index, NULL);
if (!IS_ERR(page)) {
- wait_on_page_locked(page);
kmap(page);
- if (PageUptodate(page) && !PageError(page))
+ if (!PageError(page))
return page;
ntfs_unmap_page(page);
return ERR_PTR(-EIO);
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 7659cc1..1c08fef 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -2532,14 +2532,7 @@
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read first partial "
- "page (sync error, index 0x%lx).", idx);
- return PTR_ERR(page);
- }
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page))) {
- ntfs_error(vol->sb, "Failed to read first partial page "
- "(async error, index 0x%lx).", idx);
- page_cache_release(page);
+ "page (error, index 0x%lx).", idx);
return PTR_ERR(page);
}
/*
@@ -2602,14 +2595,7 @@
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read last partial page "
- "(sync error, index 0x%lx).", idx);
- return PTR_ERR(page);
- }
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page))) {
- ntfs_error(vol->sb, "Failed to read last partial page "
- "(async error, index 0x%lx).", idx);
- page_cache_release(page);
+ "(error, index 0x%lx).", idx);
return PTR_ERR(page);
}
kaddr = kmap_atomic(page, KM_USER0);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index d69c459..dbbac55 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -236,8 +236,7 @@
err = PTR_ERR(page);
goto init_err_out;
}
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page) || PageError(page))) {
+ if (unlikely(PageError(page))) {
page_cache_release(page);
err = -EIO;
goto init_err_out;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1594c90..2ddde53 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2471,7 +2471,6 @@
s64 nr_free = vol->nr_clusters;
u32 *kaddr;
struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
- filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
struct page *page;
pgoff_t index, max_index;
@@ -2494,24 +2493,14 @@
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
- page = read_cache_page(mapping, index, (filler_t*)readpage,
- NULL);
+ page = read_mapping_page(mapping, index, NULL);
/* Ignore pages which errored synchronously. */
if (IS_ERR(page)) {
- ntfs_debug("Sync read_cache_page() error. Skipping "
+ ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
nr_free -= PAGE_CACHE_SIZE * 8;
continue;
}
- wait_on_page_locked(page);
- /* Ignore pages which errored asynchronously. */
- if (!PageUptodate(page)) {
- ntfs_debug("Async read_cache_page() error. Skipping "
- "page (index 0x%lx).", index);
- page_cache_release(page);
- nr_free -= PAGE_CACHE_SIZE * 8;
- continue;
- }
kaddr = (u32*)kmap_atomic(page, KM_USER0);
/*
* For each 4 bytes, subtract the number of set bits. If this
@@ -2562,7 +2551,6 @@
{
u32 *kaddr;
struct address_space *mapping = vol->mftbmp_ino->i_mapping;
- filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
struct page *page;
pgoff_t index;
@@ -2576,24 +2564,14 @@
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
- page = read_cache_page(mapping, index, (filler_t*)readpage,
- NULL);
+ page = read_mapping_page(mapping, index, NULL);
/* Ignore pages which errored synchronously. */
if (IS_ERR(page)) {
- ntfs_debug("Sync read_cache_page() error. Skipping "
+ ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
nr_free -= PAGE_CACHE_SIZE * 8;
continue;
}
- wait_on_page_locked(page);
- /* Ignore pages which errored asynchronously. */
- if (!PageUptodate(page)) {
- ntfs_debug("Async read_cache_page() error. Skipping "
- "page (index 0x%lx).", index);
- page_cache_release(page);
- nr_free -= PAGE_CACHE_SIZE * 8;
- continue;
- }
kaddr = (u32*)kmap_atomic(page, KM_USER0);
/*
* For each 4 bytes, subtract the number of set bits. If this