aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFengguang Wu <wfg@mail.ustc.edu.cn>2007-10-16 01:24:33 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:42:52 -0700
commitf4e6b498d6e06742d72706ef50593a9c4dd72214 (patch)
tree74a573302b2ea086c0d21907175be604f110f5b1
parentreadahead: mmap read-around simplification (diff)
downloadlinux-dev-f4e6b498d6e06742d72706ef50593a9c4dd72214.tar.xz
linux-dev-f4e6b498d6e06742d72706ef50593a9c4dd72214.zip
readahead: combine file_ra_state.prev_index/prev_offset into prev_pos
Combine the file_ra_state members unsigned long prev_index unsigned int prev_offset into loff_t prev_pos It is more consistent and better supports huge files. Thanks to Peter for the nice proposal! [akpm@linux-foundation.org: fix shift overflow] Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/ext3/dir.c2
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/splice.c2
-rw-r--r--include/linux/fs.h3
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/readahead.c15
6 files changed, 19 insertions, 18 deletions
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index c00723a99f44..c2c3491b18cf 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -143,7 +143,7 @@ static int ext3_readdir(struct file * filp,
sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra, filp,
index, 1);
- filp->f_ra.prev_index = index;
+ filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext3_bread(NULL, inode, blk, 0, &err);
}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 3ab01c04e00c..e11890acfa21 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -142,7 +142,7 @@ static int ext4_readdir(struct file * filp,
sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra, filp,
index, 1);
- filp->f_ra.prev_index = index;
+ filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext4_bread(NULL, inode, blk, 0, &err);
}
diff --git a/fs/splice.c b/fs/splice.c
index e95a36228863..2df6be43c667 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -447,7 +447,7 @@ fill_it:
*/
while (page_nr < nr_pages)
page_cache_release(pages[page_nr++]);
- in->f_ra.prev_index = index;
+ in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
if (spd.nr_pages)
return splice_to_pipe(pipe, &spd);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8250811081ff..500ffc0e4ac7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -704,8 +704,7 @@ struct file_ra_state {
unsigned int ra_pages; /* Maximum readahead window */
int mmap_miss; /* Cache miss stat for mmap accesses */
- unsigned long prev_index; /* Cache last read() position */
- unsigned int prev_offset; /* Offset where last read() ended in a page */
+ loff_t prev_pos; /* Cache last read() position */
};
/*
diff --git a/mm/filemap.c b/mm/filemap.c
index 5dc18d76e703..bbcca456d8a6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -879,8 +879,8 @@ void do_generic_mapping_read(struct address_space *mapping,
cached_page = NULL;
index = *ppos >> PAGE_CACHE_SHIFT;
next_index = index;
- prev_index = ra.prev_index;
- prev_offset = ra.prev_offset;
+ prev_index = ra.prev_pos >> PAGE_CACHE_SHIFT;
+ prev_offset = ra.prev_pos & (PAGE_CACHE_SIZE-1);
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
@@ -966,7 +966,6 @@ page_ok:
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
prev_offset = offset;
- ra.prev_offset = offset;
page_cache_release(page);
if (ret == nr && desc->count)
@@ -1056,9 +1055,11 @@ no_cached_page:
out:
*_ra = ra;
- _ra->prev_index = prev_index;
+ _ra->prev_pos = prev_index;
+ _ra->prev_pos <<= PAGE_CACHE_SHIFT;
+ _ra->prev_pos |= prev_offset;
- *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
+ *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
if (cached_page)
page_cache_release(cached_page);
if (filp)
@@ -1396,7 +1397,7 @@ retry_find:
* Found the page and have a reference on it.
*/
mark_page_accessed(page);
- ra->prev_index = page->index;
+ ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
vmf->page = page;
return ret | VM_FAULT_LOCKED;
diff --git a/mm/readahead.c b/mm/readahead.c
index d2504877b269..4a58befbde4a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -46,7 +46,7 @@ void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
ra->ra_pages = mapping->backing_dev_info->ra_pages;
- ra->prev_index = -1;
+ ra->prev_pos = -1;
}
EXPORT_SYMBOL_GPL(file_ra_state_init);
@@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
* indicator. The flag won't be set on already cached pages, to avoid the
* readahead-for-nothing fuss, saving pointless page cache lookups.
*
- * prev_index tracks the last visited page in the _previous_ read request.
+ * prev_pos tracks the last visited byte in the _previous_ read request.
* It should be maintained by the caller, and will be used for detecting
* small random reads. Note that the readahead algorithm checks loosely
* for sequential patterns. Hence interleaved reads might be served as
@@ -351,11 +351,9 @@ ondemand_readahead(struct address_space *mapping,
bool hit_readahead_marker, pgoff_t offset,
unsigned long req_size)
{
- int max; /* max readahead pages */
- int sequential;
-
- max = ra->ra_pages;
- sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
+ int max = ra->ra_pages; /* max readahead pages */
+ pgoff_t prev_offset;
+ int sequential;
/*
* It's the expected callback offset, assume sequential access.
@@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping,
goto readit;
}
+ prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
+ sequential = offset - prev_offset <= 1UL || req_size > max;
+
/*
* Standalone, small read.
* Read as is, and do not pollute the readahead state.