aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c200
1 files changed, 72 insertions, 128 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 62cfd17dc5fe..23f1f3a68077 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -160,12 +160,7 @@ int sync_blockdev(struct block_device *bdev)
}
EXPORT_SYMBOL(sync_blockdev);
-/*
- * Write out and wait upon all dirty data associated with this
- * superblock. Filesystem data as well as the underlying block
- * device. Takes the superblock lock.
- */
-int fsync_super(struct super_block *sb)
+static void __fsync_super(struct super_block *sb)
{
sync_inodes_sb(sb, 0);
DQUOT_SYNC(sb);
@@ -177,7 +172,16 @@ int fsync_super(struct super_block *sb)
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
sync_inodes_sb(sb, 1);
+}
+/*
+ * Write out and wait upon all dirty data associated with this
+ * superblock. Filesystem data as well as the underlying block
+ * device. Takes the superblock lock.
+ */
+int fsync_super(struct super_block *sb)
+{
+ __fsync_super(sb);
return sync_blockdev(sb->s_bdev);
}
@@ -201,7 +205,7 @@ int fsync_bdev(struct block_device *bdev)
* freeze_bdev -- lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
*
- * This takes the block device bd_mount_sem to make sure no new mounts
+ * This takes the block device bd_mount_mutex to make sure no new mounts
* happen on bdev until thaw_bdev() is called.
* If a superblock is found on this device, we take the s_umount semaphore
* on it to make sure nobody unmounts until the snapshot creation is done.
@@ -210,25 +214,13 @@ struct super_block *freeze_bdev(struct block_device *bdev)
{
struct super_block *sb;
- down(&bdev->bd_mount_sem);
+ mutex_lock(&bdev->bd_mount_mutex);
sb = get_super(bdev);
if (sb && !(sb->s_flags & MS_RDONLY)) {
sb->s_frozen = SB_FREEZE_WRITE;
smp_wmb();
- sync_inodes_sb(sb, 0);
- DQUOT_SYNC(sb);
-
- lock_super(sb);
- if (sb->s_dirt && sb->s_op->write_super)
- sb->s_op->write_super(sb);
- unlock_super(sb);
-
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
-
- sync_blockdev(sb->s_bdev);
- sync_inodes_sb(sb, 1);
+ __fsync_super(sb);
sb->s_frozen = SB_FREEZE_TRANS;
smp_wmb();
@@ -264,7 +256,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
drop_super(sb);
}
- up(&bdev->bd_mount_sem);
+ mutex_unlock(&bdev->bd_mount_mutex);
}
EXPORT_SYMBOL(thaw_bdev);
@@ -327,31 +319,24 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
return ret;
}
-static long do_fsync(unsigned int fd, int datasync)
+long do_fsync(struct file *file, int datasync)
{
- struct file * file;
- struct address_space *mapping;
- int ret, err;
-
- ret = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
+ int ret;
+ int err;
+ struct address_space *mapping = file->f_mapping;
- ret = -EINVAL;
if (!file->f_op || !file->f_op->fsync) {
/* Why? We can still call filemap_fdatawrite */
- goto out_putf;
+ ret = -EINVAL;
+ goto out;
}
- mapping = file->f_mapping;
-
current->flags |= PF_SYNCWRITE;
ret = filemap_fdatawrite(mapping);
/*
- * We need to protect against concurrent writers,
- * which could cause livelocks in fsync_buffers_list
+ * We need to protect against concurrent writers, which could cause
+ * livelocks in fsync_buffers_list().
*/
mutex_lock(&mapping->host->i_mutex);
err = file->f_op->fsync(file, file->f_dentry, datasync);
@@ -362,21 +347,31 @@ static long do_fsync(unsigned int fd, int datasync)
if (!ret)
ret = err;
current->flags &= ~PF_SYNCWRITE;
-
-out_putf:
- fput(file);
out:
return ret;
}
+static long __do_fsync(unsigned int fd, int datasync)
+{
+ struct file *file;
+ int ret = -EBADF;
+
+ file = fget(fd);
+ if (file) {
+ ret = do_fsync(file, datasync);
+ fput(file);
+ }
+ return ret;
+}
+
asmlinkage long sys_fsync(unsigned int fd)
{
- return do_fsync(fd, 0);
+ return __do_fsync(fd, 0);
}
asmlinkage long sys_fdatasync(unsigned int fd)
{
- return do_fsync(fd, 1);
+ return __do_fsync(fd, 1);
}
/*
@@ -431,8 +426,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
if (all_mapped) {
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block, (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+ (unsigned long long)block,
+ (unsigned long long)bh->b_blocknr);
+ printk("b_state=0x%08lx, b_size=%zu\n",
+ bh->b_state, bh->b_size);
printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
}
out_unlock:
@@ -496,7 +493,7 @@ static void free_more_memory(void)
wakeup_pdflush(1024);
yield();
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
if (*zones)
try_to_free_pages(zones, GFP_NOFS);
@@ -801,8 +798,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
if (!mapping->assoc_mapping) {
mapping->assoc_mapping = buffer_mapping;
} else {
- if (mapping->assoc_mapping != buffer_mapping)
- BUG();
+ BUG_ON(mapping->assoc_mapping != buffer_mapping);
}
if (list_empty(&bh->b_assoc_buffers)) {
spin_lock(&buffer_mapping->private_lock);
@@ -865,8 +861,8 @@ int __set_page_dirty_buffers(struct page *page)
}
write_unlock_irq(&mapping->tree_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return 1;
}
-
return 0;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
@@ -1119,8 +1115,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
if (!page)
return NULL;
- if (!PageLocked(page))
- BUG();
+ BUG_ON(!PageLocked(page));
if (page_has_buffers(page)) {
bh = page_buffers(page);
@@ -1527,8 +1522,7 @@ void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset)
{
bh->b_page = page;
- if (offset >= PAGE_SIZE)
- BUG();
+ BUG_ON(offset >= PAGE_SIZE);
if (PageHighMem(page))
/*
* This catches illegal uses and preserves the offset:
@@ -1598,11 +1592,10 @@ EXPORT_SYMBOL(try_to_release_page);
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-int block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
- int ret = 1;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
@@ -1629,19 +1622,18 @@ int block_invalidatepage(struct page *page, unsigned long offset)
* so real IO is not possible anymore.
*/
if (offset == 0)
- ret = try_to_release_page(page, 0);
+ try_to_release_page(page, 0);
out:
- return ret;
+ return;
}
EXPORT_SYMBOL(block_invalidatepage);
-int do_invalidatepage(struct page *page, unsigned long offset)
+void do_invalidatepage(struct page *page, unsigned long offset)
{
- int (*invalidatepage)(struct page *, unsigned long);
- invalidatepage = page->mapping->a_ops->invalidatepage;
- if (invalidatepage == NULL)
- invalidatepage = block_invalidatepage;
- return (*invalidatepage)(page, offset);
+ void (*invalidatepage)(struct page *, unsigned long);
+ invalidatepage = page->mapping->a_ops->invalidatepage ? :
+ block_invalidatepage;
+ (*invalidatepage)(page, offset);
}
/*
@@ -1743,6 +1735,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
+ const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
BUG_ON(!PageLocked(page));
@@ -1750,7 +1743,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
- create_empty_buffers(page, 1 << inode->i_blkbits,
+ create_empty_buffers(page, blocksize,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
@@ -1785,6 +1778,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
@@ -1938,6 +1932,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
@@ -2093,6 +2088,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
fully_mapped = 0;
if (iblock < lblock) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
SetPageError(page);
@@ -2414,6 +2410,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
create = 1;
if (block_start >= to)
create = 0;
+ map_bh.b_size = blocksize;
ret = get_block(inode, block_in_file + block_in_page,
&map_bh, create);
if (ret)
@@ -2674,6 +2671,7 @@ int block_truncate_page(struct address_space *mapping,
err = 0;
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
goto unlock;
@@ -2760,6 +2758,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
+ tmp.b_size = 1 << inode->i_blkbits;
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
@@ -3012,7 +3011,7 @@ out:
}
EXPORT_SYMBOL(try_to_free_buffers);
-int block_sync_page(struct page *page)
+void block_sync_page(struct page *page)
{
struct address_space *mapping;
@@ -3020,7 +3019,6 @@ int block_sync_page(struct page *page)
mapping = page_mapping(page);
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, page);
- return 0;
}
/*
@@ -3051,66 +3049,6 @@ asmlinkage long sys_bdflush(int func, long data)
}
/*
- * Migration function for pages with buffers. This function can only be used
- * if the underlying filesystem guarantees that no other references to "page"
- * exist.
- */
-#ifdef CONFIG_MIGRATION
-int buffer_migrate_page(struct page *newpage, struct page *page)
-{
- struct address_space *mapping = page->mapping;
- struct buffer_head *bh, *head;
-
- if (!mapping)
- return -EAGAIN;
-
- if (!page_has_buffers(page))
- return migrate_page(newpage, page);
-
- head = page_buffers(page);
-
- if (migrate_page_remove_references(newpage, page, 3))
- return -EAGAIN;
-
- bh = head;
- do {
- get_bh(bh);
- lock_buffer(bh);
- bh = bh->b_this_page;
-
- } while (bh != head);
-
- ClearPagePrivate(page);
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
- put_page(page);
- get_page(newpage);
-
- bh = head;
- do {
- set_bh_page(bh, newpage, bh_offset(bh));
- bh = bh->b_this_page;
-
- } while (bh != head);
-
- SetPagePrivate(newpage);
-
- migrate_page_copy(newpage, page);
-
- bh = head;
- do {
- unlock_buffer(bh);
- put_bh(bh);
- bh = bh->b_this_page;
-
- } while (bh != head);
-
- return 0;
-}
-EXPORT_SYMBOL(buffer_migrate_page);
-#endif
-
-/*
* Buffer-head allocation
*/
static kmem_cache_t *bh_cachep;
@@ -3138,7 +3076,7 @@ static void recalc_bh_state(void)
if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
return;
__get_cpu_var(bh_accounting).ratelimit = 0;
- for_each_cpu(i)
+ for_each_online_cpu(i)
tot += per_cpu(bh_accounting, i).nr;
buffer_heads_over_limit = (tot > max_buffer_heads);
}
@@ -3187,6 +3125,9 @@ static void buffer_exit_cpu(int cpu)
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
+ get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+ per_cpu(bh_accounting, cpu).nr = 0;
+ put_cpu_var(bh_accounting);
}
static int buffer_cpu_notify(struct notifier_block *self,
@@ -3203,8 +3144,11 @@ void __init buffer_init(void)
int nrpages;
bh_cachep = kmem_cache_create("buffer_head",
- sizeof(struct buffer_head), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
+ sizeof(struct buffer_head), 0,
+ (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+ SLAB_MEM_SPREAD),
+ init_buffer_head,
+ NULL);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL