aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/file.c6
-rw-r--r--fs/bio.c25
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/buffer.c36
-rw-r--r--fs/cifs/cifsfs.c18
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cramfs/inode.c31
-rw-r--r--fs/dcache.c14
-rw-r--r--fs/dcookies.c25
-rw-r--r--fs/direct-io.c27
-rw-r--r--fs/dnotify.c4
-rw-r--r--fs/eventpoll.c6
-rw-r--r--fs/exec.c2
-rw-r--r--fs/ext2/inode.c14
-rw-r--r--fs/ext3/balloc.c109
-rw-r--r--fs/ext3/dir.c5
-rw-r--r--fs/ext3/inode.c582
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/hfs/inode.c13
-rw-r--r--fs/hfsplus/inode.c13
-rw-r--r--fs/inode.c8
-rw-r--r--fs/inotify.c12
-rw-r--r--fs/jbd/transaction.c13
-rw-r--r--fs/jffs2/compr_zlib.c19
-rw-r--r--fs/jfs/inode.c5
-rw-r--r--fs/jfs/jfs_logmgr.c27
-rw-r--r--fs/jfs/jfs_metapage.c11
-rw-r--r--fs/lockd/host.c19
-rw-r--r--fs/lockd/svc.c17
-rw-r--r--fs/lockd/svcsubs.c17
-rw-r--r--fs/locks.c41
-rw-r--r--fs/mpage.c104
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/callback.c11
-rw-r--r--fs/nfs/file.c3
-rw-r--r--fs/nfs/read.c6
-rw-r--r--fs/nfs/write.c12
-rw-r--r--fs/nfsd/nfs4state.c47
-rw-r--r--fs/ntfs/logfile.c4
-rw-r--r--fs/ntfs/mft.c2
-rw-r--r--fs/ntfs/ntfs.h29
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/journal.c10
-rw-r--r--fs/ocfs2/namei.c5
-rw-r--r--fs/partitions/devfs.c12
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/array.c5
-rw-r--r--fs/proc/generic.c32
-rw-r--r--fs/proc/proc_devtree.c2
-rw-r--r--fs/reiserfs/inode.c9
-rw-r--r--fs/reiserfs/prints.c2
-rw-r--r--fs/super.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c5
58 files changed, 885 insertions, 603 deletions
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 150b1922792..7bb716887e2 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -28,7 +28,7 @@ static int afs_file_release(struct inode *inode, struct file *file);
#endif
static int afs_file_readpage(struct file *file, struct page *page);
-static int afs_file_invalidatepage(struct page *page, unsigned long offset);
+static void afs_file_invalidatepage(struct page *page, unsigned long offset);
static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
struct inode_operations afs_file_inode_operations = {
@@ -212,7 +212,7 @@ int afs_cache_get_page_cookie(struct page *page,
/*
* invalidate part or all of a page
*/
-static int afs_file_invalidatepage(struct page *page, unsigned long offset)
+static void afs_file_invalidatepage(struct page *page, unsigned long offset)
{
int ret = 1;
@@ -238,11 +238,11 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset)
if (!PageWriteback(page))
ret = page->mapping->a_ops->releasepage(page,
0);
+ /* possibly should BUG_ON(!ret); - neilb */
}
}
_leave(" = %d", ret);
- return ret;
} /* end afs_file_invalidatepage() */
/*****************************************************************************/
diff --git a/fs/bio.c b/fs/bio.c
index 73e664c01d3..eb8fbc53f2c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
#define BIO_POOL_SIZE 256
-static kmem_cache_t *bio_slab;
+static kmem_cache_t *bio_slab __read_mostly;
#define BIOVEC_NR_POOLS 6
@@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab;
* basically we just need to survive
*/
#define BIO_SPLIT_ENTRIES 8
-mempool_t *bio_split_pool;
+mempool_t *bio_split_pool __read_mostly;
struct biovec_slab {
int nr_vecs;
@@ -1125,16 +1125,6 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
return bp;
}
-static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
-{
- return kmalloc(sizeof(struct bio_pair), gfp_flags);
-}
-
-static void bio_pair_free(void *bp, void *data)
-{
- kfree(bp);
-}
-
/*
* create memory pools for biovec's in a bio_set.
@@ -1151,8 +1141,7 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
if (i >= scale)
pool_entries >>= 1;
- *bvp = mempool_create(pool_entries, mempool_alloc_slab,
- mempool_free_slab, bp->slab);
+ *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
if (!*bvp)
return -ENOMEM;
}
@@ -1189,9 +1178,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
if (!bs)
return NULL;
- bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab,
- mempool_free_slab, bio_slab);
-
+ bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
if (!bs->bio_pool)
goto bad;
@@ -1254,8 +1241,8 @@ static int __init init_bio(void)
if (!fs_bio_set)
panic("bio: can't allocate bios\n");
- bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
- bio_pair_alloc, bio_pair_free, NULL);
+ bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
+ sizeof(struct bio_pair));
if (!bio_split_pool)
panic("bio: can't create split pool\n");
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 573fc8e0b67..5983d42df01 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -131,9 +131,10 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
static int
blkdev_get_blocks(struct inode *inode, sector_t iblock,
- unsigned long max_blocks, struct buffer_head *bh, int create)
+ struct buffer_head *bh, int create)
{
sector_t end_block = max_block(I_BDEV(inode));
+ unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
if ((iblock + max_blocks) > end_block) {
max_blocks = end_block - iblock;
@@ -234,7 +235,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
-static kmem_cache_t * bdev_cachep;
+static kmem_cache_t * bdev_cachep __read_mostly;
static struct inode *bdev_alloc_inode(struct super_block *sb)
{
@@ -308,7 +309,7 @@ static struct file_system_type bd_type = {
.kill_sb = kill_anon_super,
};
-static struct vfsmount *bd_mnt;
+static struct vfsmount *bd_mnt __read_mostly;
struct super_block *blockdev_superblock;
void __init bdev_cache_init(void)
diff --git a/fs/buffer.c b/fs/buffer.c
index 4342ab0ad99..d597758dd12 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -426,8 +426,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
if (all_mapped) {
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block, (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+ (unsigned long long)block,
+ (unsigned long long)bh->b_blocknr);
+ printk("b_state=0x%08lx, b_size=%zu\n",
+ bh->b_state, bh->b_size);
printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
}
out_unlock:
@@ -1590,11 +1592,10 @@ EXPORT_SYMBOL(try_to_release_page);
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-int block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
- int ret = 1;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
@@ -1621,19 +1622,18 @@ int block_invalidatepage(struct page *page, unsigned long offset)
* so real IO is not possible anymore.
*/
if (offset == 0)
- ret = try_to_release_page(page, 0);
+ try_to_release_page(page, 0);
out:
- return ret;
+ return;
}
EXPORT_SYMBOL(block_invalidatepage);
-int do_invalidatepage(struct page *page, unsigned long offset)
+void do_invalidatepage(struct page *page, unsigned long offset)
{
- int (*invalidatepage)(struct page *, unsigned long);
- invalidatepage = page->mapping->a_ops->invalidatepage;
- if (invalidatepage == NULL)
- invalidatepage = block_invalidatepage;
- return (*invalidatepage)(page, offset);
+ void (*invalidatepage)(struct page *, unsigned long);
+ invalidatepage = page->mapping->a_ops->invalidatepage ? :
+ block_invalidatepage;
+ (*invalidatepage)(page, offset);
}
/*
@@ -1735,6 +1735,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
+ const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
BUG_ON(!PageLocked(page));
@@ -1742,7 +1743,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
- create_empty_buffers(page, 1 << inode->i_blkbits,
+ create_empty_buffers(page, blocksize,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
@@ -1777,6 +1778,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
@@ -1930,6 +1932,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
@@ -2085,6 +2088,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
fully_mapped = 0;
if (iblock < lblock) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
SetPageError(page);
@@ -2406,6 +2410,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
create = 1;
if (block_start >= to)
create = 0;
+ map_bh.b_size = blocksize;
ret = get_block(inode, block_in_file + block_in_page,
&map_bh, create);
if (ret)
@@ -2666,6 +2671,7 @@ int block_truncate_page(struct address_space *mapping,
err = 0;
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
goto unlock;
@@ -2752,6 +2758,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
+ tmp.b_size = 1 << inode->i_blkbits;
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
@@ -3004,7 +3011,7 @@ out:
}
EXPORT_SYMBOL(try_to_free_buffers);
-int block_sync_page(struct page *page)
+void block_sync_page(struct page *page)
{
struct address_space *mapping;
@@ -3012,7 +3019,6 @@ int block_sync_page(struct page *page)
mapping = page_mapping(page);
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, page);
- return 0;
}
/*
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 221b3334b73..6b99b51d669 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -738,10 +738,8 @@ cifs_init_request_bufs(void)
cERROR(1,("cifs_min_rcv set to maximum (64)"));
}
- cifs_req_poolp = mempool_create(cifs_min_rcv,
- mempool_alloc_slab,
- mempool_free_slab,
- cifs_req_cachep);
+ cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
+ cifs_req_cachep);
if(cifs_req_poolp == NULL) {
kmem_cache_destroy(cifs_req_cachep);
@@ -771,10 +769,8 @@ cifs_init_request_bufs(void)
cFYI(1,("cifs_min_small set to maximum (256)"));
}
- cifs_sm_req_poolp = mempool_create(cifs_min_small,
- mempool_alloc_slab,
- mempool_free_slab,
- cifs_sm_req_cachep);
+ cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
+ cifs_sm_req_cachep);
if(cifs_sm_req_poolp == NULL) {
mempool_destroy(cifs_req_poolp);
@@ -808,10 +804,8 @@ cifs_init_mids(void)
if (cifs_mid_cachep == NULL)
return -ENOMEM;
- cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */,
- mempool_alloc_slab,
- mempool_free_slab,
- cifs_mid_cachep);
+ /* 3 is a reasonable minimum number of simultaneous operations */
+ cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
if(cifs_mid_poolp == NULL) {
kmem_cache_destroy(cifs_mid_cachep);
return -ENOMEM;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 165d6742638..fb49aef1f2e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1339,7 +1339,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
return rc;
}
-/* static int cifs_sync_page(struct page *page)
+/* static void cifs_sync_page(struct page *page)
{
struct address_space *mapping;
struct inode *inode;
@@ -1353,16 +1353,18 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
return 0;
inode = mapping->host;
if (!inode)
- return 0; */
+ return; */
/* fill in rpages then
result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
+#if 0
if (rc < 0)
return rc;
return 0;
+#endif
} */
/*
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ff93a9f81d1..598eec9778f 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -163,9 +163,9 @@ int cifs_get_inode_info_unix(struct inode **pinode,
if (num_of_bytes < end_of_file)
cFYI(1, ("allocation size less than end of file"));
- cFYI(1,
- ("Size %ld and blocks %ld",
- (unsigned long) inode->i_size, inode->i_blocks));
+ cFYI(1, ("Size %ld and blocks %llu",
+ (unsigned long) inode->i_size,
+ (unsigned long long)inode->i_blocks));
if (S_ISREG(inode->i_mode)) {
cFYI(1, ("File inode"));
inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index edb3b6eb34b..488bd0d81dc 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -197,10 +197,10 @@ static void fill_in_inode(struct inode *tmp_inode,
if (allocation_size < end_of_file)
cFYI(1, ("May be sparse file, allocation less than file size"));
- cFYI(1,
- ("File Size %ld and blocks %ld and blocksize %ld",
- (unsigned long)tmp_inode->i_size, tmp_inode->i_blocks,
- tmp_inode->i_blksize));
+ cFYI(1, ("File Size %ld and blocks %llu and blocksize %ld",
+ (unsigned long)tmp_inode->i_size,
+ (unsigned long long)tmp_inode->i_blocks,
+ tmp_inode->i_blksize));
if (S_ISREG(tmp_inode->i_mode)) {
cFYI(1, ("File inode"));
tmp_inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 8ad52f5bf25..acc1b2c10a8 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -22,6 +22,7 @@
#include <linux/cramfs_fs_sb.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
+#include <linux/mutex.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@@ -31,7 +32,7 @@ static struct inode_operations cramfs_dir_inode_operations;
static struct file_operations cramfs_directory_operations;
static struct address_space_operations cramfs_aops;
-static DECLARE_MUTEX(read_mutex);
+static DEFINE_MUTEX(read_mutex);
/* These two macros may change in future, to provide better st_ino
@@ -250,20 +251,20 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
memset(sbi, 0, sizeof(struct cramfs_sb_info));
/* Invalidate the read buffers on mount: think disk change.. */
- down(&read_mutex);
+ mutex_lock(&read_mutex);
for (i = 0; i < READ_BUFFERS; i++)
buffer_blocknr[i] = -1;
/* Read the first block and get the superblock from it */
memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
/* Do sanity checks on the superblock */
if (super.magic != CRAMFS_MAGIC) {
/* check at 512 byte offset */
- down(&read_mutex);
+ mutex_lock(&read_mutex);
memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
if (super.magic != CRAMFS_MAGIC) {
if (!silent)
printk(KERN_ERR "cramfs: wrong magic\n");
@@ -366,7 +367,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
mode_t mode;
int namelen, error;
- down(&read_mutex);
+ mutex_lock(&read_mutex);
de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256);
name = (char *)(de+1);
@@ -379,7 +380,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
memcpy(buf, name, namelen);
ino = CRAMINO(de);
mode = de->mode;
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
nextoffset = offset + sizeof(*de) + namelen;
for (;;) {
if (!namelen) {
@@ -410,7 +411,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
unsigned int offset = 0;
int sorted;
- down(&read_mutex);
+ mutex_lock(&read_mutex);
sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
while (offset < dir->i_size) {
struct cramfs_inode *de;
@@ -433,7 +434,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
for (;;) {
if (!namelen) {
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
return ERR_PTR(-EIO);
}
if (name[namelen-1])
@@ -447,7 +448,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
continue;
if (!retval) {
struct cramfs_inode entry = *de;
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
return NULL;
}
@@ -455,7 +456,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
if (sorted)
break;
}
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
d_add(dentry, NULL);
return NULL;
}
@@ -474,21 +475,21 @@ static int cramfs_readpage(struct file *file, struct page * page)
u32 start_offset, compr_len;
start_offset = OFFSET(inode) + maxblock*4;
- down(&read_mutex);
+ mutex_lock(&read_mutex);
if (page->index)
start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4);
compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset);
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
pgdata = kmap(page);
if (compr_len == 0)
; /* hole */
else {
- down(&read_mutex);
+ mutex_lock(&read_mutex);
bytes_filled = cramfs_uncompress_block(pgdata,
PAGE_CACHE_SIZE,
cramfs_read(sb, start_offset, compr_len),
compr_len);
- up(&read_mutex);
+ mutex_unlock(&read_mutex);
}
} else
pgdata = kmap(page);
diff --git a/fs/dcache.c b/fs/dcache.c
index 0778f49f993..19458d39950 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -35,7 +35,7 @@
#include <linux/bootmem.h>
-int sysctl_vfs_cache_pressure = 100;
+int sysctl_vfs_cache_pressure __read_mostly = 100;
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
@@ -43,7 +43,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
EXPORT_SYMBOL(dcache_lock);
-static kmem_cache_t *dentry_cache;
+static kmem_cache_t *dentry_cache __read_mostly;
#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
@@ -58,9 +58,9 @@ static kmem_cache_t *dentry_cache;
#define D_HASHBITS d_hash_shift
#define D_HASHMASK d_hash_mask
-static unsigned int d_hash_mask;
-static unsigned int d_hash_shift;
-static struct hlist_head *dentry_hashtable;
+static unsigned int d_hash_mask __read_mostly;
+static unsigned int d_hash_shift __read_mostly;
+static struct hlist_head *dentry_hashtable __read_mostly;
static LIST_HEAD(dentry_unused);
/* Statistics gathering. */
@@ -1710,10 +1710,10 @@ static void __init dcache_init(unsigned long mempages)
}
/* SLAB cache for __getname() consumers */
-kmem_cache_t *names_cachep;
+kmem_cache_t *names_cachep __read_mostly;
/* SLAB cache for file structures */
-kmem_cache_t *filp_cachep;
+kmem_cache_t *filp_cachep __read_mostly;
EXPORT_SYMBOL(d_genocide);
diff --git a/fs/dcookies.c b/fs/dcookies.c
index f8274a8f83b..8749339bf4f 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/dcookies.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
/* The dcookies are allocated from a kmem_cache and
@@ -36,10 +37,10 @@ struct dcookie_struct {
};
static LIST_HEAD(dcookie_users);
-static DECLARE_MUTEX(dcookie_sem);
-static kmem_cache_t * dcookie_cache;
-static struct list_head * dcookie_hashtable;
-static size_t hash_size;
+static DEFINE_MUTEX(dcookie_mutex);
+static kmem_cache_t *dcookie_cache __read_mostly;
+static struct list_head *dcookie_hashtable __read_mostly;
+static size_t hash_size __read_mostly;
static inline int is_live(void)
{
@@ -114,7 +115,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
int err = 0;
struct dcookie_struct * dcs;
- down(&dcookie_sem);
+ mutex_lock(&dcookie_mutex);
if (!is_live()) {
err = -EINVAL;
@@ -134,7 +135,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
*cookie = dcookie_value(dcs);
out:
- up(&dcookie_sem);
+ mutex_unlock(&dcookie_mutex);
return err;
}
@@ -157,7 +158,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- down(&dcookie_sem);
+ mutex_lock(&dcookie_mutex);
if (!is_live()) {
err = -EINVAL;
@@ -192,7 +193,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
out_free:
kfree(kbuf);
out:
- up(&dcookie_sem);
+ mutex_unlock(&dcookie_mutex);
return err;
}
@@ -290,7 +291,7 @@ struct dcookie_user * dcookie_register(void)
{
struct dcookie_user * user;
- down(&dcookie_sem);
+ mutex_lock(&dcookie_mutex);
user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL);
if (!user)
@@ -302,7 +303,7 @@ struct dcookie_user * dcookie_register(void)
list_add(&user->next, &dcookie_users);
out:
- up(&dcookie_sem);
+ mutex_unlock(&dcookie_mutex);
return user;
out_free:
kfree(user);
@@ -313,7 +314,7 @@ out_free:
void dcookie_unregister(struct dcookie_user * user)
{
- down(&dcookie_sem);
+ mutex_lock(&dcookie_mutex);
list_del(&user->next);
kfree(user);
@@ -321,7 +322,7 @@ void dcookie_unregister(struct dcookie_user * user)
if (!is_live())
dcookie_exit();
- up(&dcookie_sem);
+ mutex_unlock(&dcookie_mutex);
}
EXPORT_SYMBOL_GPL(dcookie_register);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 235ed8d1f11..9d1d2aa73e4 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -86,12 +86,12 @@ struct dio {
unsigned first_block_in_page; /* doesn't change, Used only once */
int boundary; /* prev block is at a boundary */
int reap_counter; /* rate limit reaping */
- get_blocks_t *get_blocks; /* block mapping function */
+ get_block_t *get_block; /* block mapping function */
dio_iodone_t *end_io; /* IO completion function */
sector_t final_block_in_bio; /* current final block in bio + 1 */
sector_t next_block_for_io; /* next block to be put under IO,
in dio_blocks units */
- struct buffer_head map_bh; /* last get_blocks() result */
+ struct buffer_head map_bh; /* last get_block() result */
/*
* Deferred addition of a page to the dio. These variables are
@@ -211,9 +211,9 @@ static struct page *dio_get_page(struct dio *dio)
/*
* Called when all DIO BIO I/O has been completed - let the filesystem
- * know, if it registered an interest earlier via get_blocks. Pass the
+ * know, if it registered an interest earlier via get_block. Pass the
* private field of the map buffer_head so that filesystems can use it
- * to hold additional state between get_blocks calls and dio_complete.
+ * to hold additional state between get_block calls and dio_complete.
*/
static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes)
{
@@ -493,7 +493,7 @@ static int dio_bio_reap(struct dio *dio)
* The fs is allowed to map lots of blocks at once. If it wants to do that,
* it uses the passed inode-relative block number as the file offset, as usual.
*
- * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io
+ * get_block() is passed the number of i_blkbits-sized blocks which direct_io
* has remaining to do. The fs should not map more than this number of blocks.
*
* If the fs has mapped a lot of blocks, it should populate bh->b_size to
@@ -506,7 +506,7 @@ static int dio_bio_reap(struct dio *dio)
* In the case of filesystem holes: the fs may return an arbitrarily-large
* hole by returning an appropriate value in b_size and by clearing
* buffer_mapped(). However the direct-io code will only process holes one
- * block at a time - it will repeatedly call get_blocks() as it walks the hole.
+ * block at a time - it will repeatedly call get_block() as it walks the hole.
*/
static int get_more_blocks(struct dio *dio)
{
@@ -548,7 +548,8 @@ static int get_more_blocks(struct dio *dio)
* at a higher level for inside-i_size block-instantiating
* writes.
*/
- ret = (*dio->get_blocks)(dio->inode, fs_startblk, fs_count,
+ map_bh->b_size = fs_count << dio->blkbits;
+ ret = (*dio->get_block)(dio->inode, fs_startblk,
map_bh, create);
}
return ret;
@@ -783,11 +784,11 @@ static void dio_zero_block(struct dio *dio, int end)
* happily perform page-sized but 512-byte aligned IOs. It is important that
* blockdev IO be able to have fine alignment and large sizes.
*
- * So what we do is to permit the ->get_blocks function to populate bh.b_size
+ * So what we do is to permit the ->get_block function to populate bh.b_size
* with the size of IO which is permitted at this offset and this i_blkbits.
*
* For best results, the blockdev should be set up with 512-byte i_blkbits and
- * it should set b_size to PAGE_SIZE or more inside get_blocks(). This gives
+ * it should set b_size to PAGE_SIZE or more inside get_block(). This gives
* fine alignment but still allows this function to work in PAGE_SIZE units.
*/
static int do_direct_IO(struct dio *dio)
@@ -947,7 +948,7 @@ out:
static ssize_t
direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
const struct iovec *iov, loff_t offset, unsigned long nr_segs,
- unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io,
+ unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
struct dio *dio)
{
unsigned long user_addr;
@@ -969,7 +970,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
dio->boundary = 0;
dio->reap_counter = 0;
- dio->get_blocks = get_blocks;
+ dio->get_block = get_block;
dio->end_io = end_io;
dio->map_bh.b_private = NULL;
dio->final_block_in_bio = -1;
@@ -1177,7 +1178,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
- unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io,
+ unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
int dio_lock_type)
{
int seg;
@@ -1273,7 +1274,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
(end > i_size_read(inode)));
retval = direct_io_worker(rw, iocb, inode, iov, offset,
- nr_segs, blkbits, get_blocks, end_io, dio);
+ nr_segs, blkbits, get_block, end_io, dio);
if (rw == READ && dio_lock_type == DIO_LOCKING)
release_i_mutex = 0;
diff --git a/fs/dnotify.c b/fs/dnotify.c
index f3b540dd5d1..f932591df5a 100644
--- a/fs/dnotify.c
+++ b/fs/dnotify.c
@@ -21,9 +21,9 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
-int dir_notify_enable = 1;
+int dir_notify_enable __read_mostly = 1;
-static kmem_cache_t *dn_cache;
+static kmem_cache_t *dn_cache __read_mostly;
static void redo_inode_mask(struct inode *inode)
{
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a0f682cdd03..e067a06c646 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -281,13 +281,13 @@ static struct mutex epmutex;
static struct poll_safewake psw;
/* Slab cache used to allocate "struct epitem" */
-static kmem_cache_t *epi_cache;
+static kmem_cache_t *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */
-static kmem_cache_t *pwq_cache;
+static kmem_cache_t *pwq_cache __read_mostly;
/* Virtual fs used to allocate inodes for eventpoll files */
-static struct vfsmount *eventpoll_mnt;
+static struct vfsmount *eventpoll_mnt __read_mostly;
/* File callbacks that implement the eventpoll file behaviour */
static struct file_operations eventpoll_fops = {
diff --git a/fs/exec.c b/fs/exec.c
index 995cba3c62b..c7397c46ad6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -632,7 +632,7 @@ static int de_thread(struct task_struct *tsk)
* synchronize with any firing (by calling del_timer_sync)
* before we can safely let the old group leader die.
*/
- sig->real_timer.data = current;
+ sig->tsk = current;
spin_unlock_irq(lock);
if (hrtimer_cancel(&sig->real_timer))
hrtimer_restart(&sig->real_timer);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index a717837f272..04af9c45dce 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -667,18 +667,6 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping,block,ext2_get_block);
}
-static int
-ext2_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
- struct buffer_head *bh_result, int create)
-{
- int ret;
-
- ret = ext2_get_block(inode, iblock, bh_result, create);
- if (ret == 0)
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-}
-
static ssize_t
ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs)
@@ -687,7 +675,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct inode *inode = file->f_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, ext2_get_blocks, NULL);
+ offset, nr_segs, ext2_get_block, NULL);
}
static int
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 46623f77666..77927d6938f 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -653,9 +653,11 @@ claim_block(spinlock_t *lock, int block, struct buffer_head *bh)
*/
static int
ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
- struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv)
+ struct buffer_head *bitmap_bh, int goal,
+ unsigned long *count, struct ext3_reserve_window *my_rsv)
{
int group_first_block, start, end;
+ unsigned long num = 0;
/* we do allocation within the reservation window if we have a window */
if (my_rsv) {
@@ -713,8 +715,18 @@ repeat:
goto fail_access;
goto repeat;
}
- return goal;
+ num++;
+ goal++;
+ while (num < *count && goal < end
+ && ext3_test_allocatable(goal, bitmap_bh)
+ && claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) {
+ num++;
+ goal++;
+ }
+ *count = num;
+ return goal - num;
fail_access:
+ *count = num;
return -1;
}
@@ -999,6 +1011,31 @@ retry:
goto retry;
}
+static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
+ struct super_block *sb, int size)
+{
+ struct ext3_reserve_window_node *next_rsv;
+ struct rb_node *next;
+ spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
+
+ if (!spin_trylock(rsv_lock))
+ return;
+
+ next = rb_next(&my_rsv->rsv_node);
+
+ if (!next)
+ my_rsv->rsv_end += size;
+ else {
+ next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
+
+ if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
+ my_rsv->rsv_end += size;
+ else
+ my_rsv->rsv_end = next_rsv->rsv_start - 1;
+ }
+ spin_unlock(rsv_lock);
+}
+
/*
* This is the main function used to allocate a new block and its reservation
* window.
@@ -1024,11 +1061,12 @@ static int
ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
unsigned int group, struct buffer_head *bitmap_bh,
int goal, struct ext3_reserve_window_node * my_rsv,
- int *errp)
+ unsigned long *count, int *errp)
{
unsigned long group_first_block;
int ret = 0;
int fatal;
+ unsigned long num = *count;
*errp = 0;
@@ -1051,7 +1089,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
* or last attempt to allocate a block with reservation turned on failed
*/
if (my_rsv == NULL ) {
- ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL);
+ ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
+ goal, count, NULL);
goto out;
}
/*
@@ -1081,6 +1120,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
while (1) {
if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) {
+ if (my_rsv->rsv_goal_size < *count)
+ my_rsv->rsv_goal_size = *count;
ret = alloc_new_reservation(my_rsv, goal, sb,
group, bitmap_bh);
if (ret < 0)
@@ -1088,16 +1129,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb))
goal = -1;
- }
+ } else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count)
+ try_to_extend_reservation(my_rsv, sb,
+ *count-my_rsv->rsv_end + goal - 1);
+
if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
|| (my_rsv->rsv_end < group_first_block))
BUG();
ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal,
- &my_rsv->rsv_window);
+ &num, &my_rsv->rsv_window);
if (ret >= 0) {
- my_rsv->rsv_alloc_hit++;
+ my_rsv->rsv_alloc_hit += num;
+ *count = num;
break; /* succeed */
}
+ num = *count;
}
out:
if (ret >= 0) {
@@ -1154,8 +1200,8 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
* bitmap, and then for any free bit if that fails.
* This function also updates quota and i_blocks field.
*/
-int ext3_new_block(handle_t *handle, struct inode *inode,
- unsigned long goal, int *errp)
+int ext3_new_blocks(handle_t *handle, struct inode *inode,
+ unsigned long goal, unsigned long *count, int *errp)
{
struct buffer_head *bitmap_bh = NULL;
struct buffer_head *gdp_bh;
@@ -1178,6 +1224,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode,
static int goal_hits, goal_attempts;
#endif
unsigned long ngroups;
+ unsigned long num = *count;
*errp = -ENOSPC;
sb = inode->i_sb;
@@ -1189,7 +1236,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode,
/*
* Check quota for allocation of this block.
*/
- if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+ if (DQUOT_ALLOC_BLOCK(inode, num)) {
*errp = -EDQUOT;
return 0;
}
@@ -1244,7 +1291,7 @@ retry:
if (!bitmap_bh)
goto io_error;
ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
- bitmap_bh, ret_block, my_rsv, &fatal);
+ bitmap_bh, ret_block, my_rsv, &num, &fatal);
if (fatal)
goto out;
if (ret_block >= 0)
@@ -1281,7 +1328,7 @@ retry:
if (!bitmap_bh)
goto io_error;
ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
- bitmap_bh, -1, my_rsv, &fatal);
+ bitmap_bh, -1, my_rsv, &num, &fatal);
if (fatal)
goto out;
if (ret_block >= 0)
@@ -1316,13 +1363,15 @@ allocated:
target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb)
+ le32_to_cpu(es->s_first_data_block);
- if (target_block == le32_to_cpu(gdp->bg_block_bitmap) ||
- target_block == le32_to_cpu(gdp->bg_inode_bitmap) ||
+ if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) ||
+ in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) ||
in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
+ EXT3_SB(sb)->s_itb_per_group) ||
+ in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
EXT3_SB(sb)->s_itb_per_group))
ext3_error(sb, "ext3_new_block",
"Allocating block in system zone - "
- "block = %u", target_block);
+ "blocks from %u, length %lu", target_block, num);
performed_allocation = 1;
@@ -1341,10 +1390,14 @@ allocated:
jbd_lock_bh_state(bitmap_bh);
spin_lock(sb_bgl_lock(sbi, group_no));
if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
- if (ext3_test_bit(ret_block,
- bh2jh(bitmap_bh)->b_committed_data)) {
- printk("%s: block was unexpectedly set in "
- "b_committed_data\n", __FUNCTION__);
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (ext3_test_bit(ret_block,
+ bh2jh(bitmap_bh)->b_committed_data)) {
+ printk("%s: block was unexpectedly set in "
+ "b_committed_data\n", __FUNCTION__);
+ }
}
}
ext3_debug("found bit %d\n", ret_block);
@@ -1355,7 +1408,7 @@ allocated:
/* ret_block was blockgroup-relative. Now it becomes fs-relative */
ret_block = target_block;
- if (ret_block >= le32_to_cpu(es->s_blocks_count)) {
+ if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
ext3_error(sb, "ext3_new_block",
"block(%d) >= blocks count(%d) - "
"block_group = %d, es == %p ", ret_block,
@@ -1373,9 +1426,9 @@ allocated:
spin_lock(sb_bgl_lock(sbi, group_no));
gdp->bg_free_blocks_count =
- cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1);
+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num);
spin_unlock(sb_bgl_lock(sbi, group_no));
- percpu_counter_mod(&sbi->s_freeblocks_counter, -1);
+ percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
err = ext3_journal_dirty_metadata(handle, gdp_bh);
@@ -1388,6 +1441,8 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
+ DQUOT_FREE_BLOCK(inode, *count-num);
+ *count = num;
return ret_block;
io_error:
@@ -1401,11 +1456,19 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
- DQUOT_FREE_BLOCK(inode, 1);
+ DQUOT_FREE_BLOCK(inode, *count);
brelse(bitmap_bh);
return 0;
}
+int ext3_new_block(handle_t *handle, struct inode *inode,
+ unsigned long goal, int *errp)
+{
+ unsigned long count = 1;
+
+ return ext3_new_blocks(handle, inode, goal, &count, errp);
+}
+
unsigned long ext3_count_free_blocks(struct super_block *sb)
{
unsigned long desc_count;
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 773459164bb..38bd3f6ec14 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp,
struct buffer_head *bh = NULL;
map_bh.b_state = 0;
- err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0);
- if (!err) {
+ err = ext3_get_blocks_handle(NULL, inode, blk, 1,
+ &map_bh, 0, 0);
+ if (err > 0) {
page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra,
filp,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 2c361377e0a..48ae0339af1 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -44,16 +44,16 @@ static int ext3_writepage_trans_blocks(struct inode *inode);
/*
* Test whether an inode is a fast symlink.
*/
-static inline int ext3_inode_is_fast_symlink(struct inode *inode)
+static int ext3_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT3_I(inode)->i_file_acl ?
(inode->i_sb->s_blocksize >> 9) : 0;
- return (S_ISLNK(inode->i_mode) &&
- inode->i_blocks - ea_blocks == 0);
+ return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}
-/* The ext3 forget function must perform a revoke if we are freeing data
+/*
+ * The ext3 forget function must perform a revoke if we are freeing data
* which has been journaled. Metadata (eg. indirect blocks) must be
* revoked in all cases.
*
@@ -61,10 +61,8 @@ static inline int ext3_inode_is_fast_symlink(struct inode *inode)
* but there may still be a record of it in the journal, and that record
* still needs to be revoked.
*/
-
-int ext3_forget(handle_t *handle, int is_metadata,
- struct inode *inode, struct buffer_head *bh,
- int blocknr)
+int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
+ struct buffer_head *bh, int blocknr)
{
int err;
@@ -104,10 +102,9 @@ int ext3_forget(handle_t *handle, int is_metadata,
}
/*
- * Work out how many blocks we need to progress with the next chunk of a
+ * Work out how many blocks we need to proceed with the next chunk of a
* truncate transaction.
*/
-
static unsigned long blocks_for_truncate(struct inode *inode)
{
unsigned long needed;
@@ -141,7 +138,6 @@ static unsigned long blocks_for_truncate(struct inode *inode)
* extend fails, we need to propagate the failure up and restart the
* transaction in the top-level truncate loop. --sct
*/
-
static handle_t *start_transaction(struct inode *inode)
{
handle_t *result;
@@ -194,9 +190,11 @@ void ext3_delete_inode (struct inode * inode)
handle = start_transaction(inode);
if (IS_ERR(handle)) {
- /* If we're going to skip the normal cleanup, we still
- * need to make sure that the in-core orphan linked list
- * is properly cleaned up. */
+ /*
+ * If we're going to skip the normal cleanup, we still need to
+ * make sure that the in-core orphan linked list is properly
+ * cleaned up.
+ */
ext3_orphan_del(NULL, inode);
goto no_delete;
}
@@ -235,16 +233,6 @@ no_delete:
clear_inode(inode); /* We must guarantee clearing of inode... */
}
-static int ext3_alloc_block (handle_t *handle,
- struct inode * inode, unsigned long goal, int *err)
-{
- unsigned long result;
-
- result = ext3_new_block(handle, inode, goal, err);
- return result;
-}
-
-
typedef struct {
__le32 *p;
__le32 key;
@@ -257,7 +245,7 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
p->bh = bh;
}
-static inline int verify_chain(Indirect *from, Indirect *to)
+static int verify_chain(Indirect *from, Indirect *to)
{
while (from <= to && from->key == *from->p)
from++;
@@ -327,10 +315,10 @@ static int ext3_block_to_path(struct inode *inode,
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
- ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
+ ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
}
if (boundary)
- *boundary = (i_block & (ptrs - 1)) == (final - 1);
+ *boundary = final - 1 - (i_block & (ptrs - 1));
return n;
}
@@ -419,7 +407,6 @@ no_block:
*
* Caller must make sure that @ind is valid and will stay that way.
*/
-
static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
{
struct ext3_inode_info *ei = EXT3_I(inode);
@@ -429,17 +416,18 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
unsigned long colour;
/* Try to find previous block */
- for (p = ind->p - 1; p >= start; p--)
+ for (p = ind->p - 1; p >= start; p--) {
if (*p)
return le32_to_cpu(*p);
+ }
/* No such thing, so let's try location of indirect block */
if (ind->bh)
return ind->bh->b_blocknr;
/*
- * It is going to be refered from inode itself? OK, just put it into
- * the same cylinder group then.
+ * It is going to be referred to from the inode itself? OK, just put it
+ * into the same cylinder group then.
*/
bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
@@ -463,7 +451,9 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
static unsigned long ext3_find_goal(struct inode *inode, long block,
Indirect chain[4], Indirect *partial)
{
- struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
+ struct ext3_block_alloc_info *block_i;
+
+ block_i = EXT3_I(inode)->i_block_alloc_info;
/*
* try the heuristic for sequential allocation,
@@ -478,13 +468,113 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
}
/**
+ * ext3_blks_to_allocate: Look up the block map and count the number
+ * of direct blocks need to be allocated for the given branch.
+ *
+ * @branch: chain of indirect blocks
+ * @k: number of blocks need for indirect blocks
+ * @blks: number of data blocks to be mapped.
+ * @blocks_to_boundary: the offset in the indirect block
+ *
+ * return the total number of blocks to be allocate, including the
+ * direct and indirect blocks.
+ */
+static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
+ int blocks_to_boundary)
+{
+ unsigned long count = 0;
+
+ /*
+ * Simple case, [t,d]Indirect block(s) has not allocated yet
+ * then it's clear blocks on that path have not allocated
+ */
+ if (k > 0) {
+ /* right now we don't handle cross boundary allocation */
+ if (blks < blocks_to_boundary + 1)
+ count += blks;
+ else
+ count += blocks_to_boundary + 1;
+ return count;
+ }
+
+ count++;
+ while (count < blks && count <= blocks_to_boundary &&
+ le32_to_cpu(*(branch[0].p + count)) == 0) {
+ count++;
+ }
+ return count;
+}
+
+/**
+ * ext3_alloc_blocks: multiple allocate blocks needed for a branch
+ * @indirect_blks: the number of blocks need to allocate for indirect
+ * blocks
+ *
+ * @new_blocks: on return it will store the new block numbers for
+ * the indirect blocks(if needed) and the first direct block,
+ * @blks: on return it will store the total number of allocated
+ * direct blocks
+ */
+static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
+ unsigned long goal, int indirect_blks, int blks,
+ unsigned long long new_blocks[4], int *err)
+{
+ int target, i;
+ unsigned long count = 0;
+ int index = 0;
+ unsigned long current_block = 0;
+ int ret = 0;
+
+ /*
+ * Here we try to allocate the requested multiple blocks at once,
+ * on a best-effort basis.
+ * To build a branch, we should allocate blocks for
+ * the indirect blocks(if not allocated yet), and at least
+ * the first direct block of this branch. That's the
+ * minimum number of blocks need to allocate(required)
+ */
+ target = blks + indirect_blks;
+
+ while (1) {
+ count = target;
+ /* allocating blocks for indirect blocks and direct blocks */
+ current_block = ext3_new_blocks(handle,inode,goal,&count,err);
+ if (*err)
+ goto failed_out;
+
+ target -= count;
+ /* allocate blocks for indirect blocks */
+ while (index < indirect_blks && count) {
+ new_blocks[index++] = current_block++;
+ count--;
+ }
+
+ if (count > 0)
+ break;
+ }
+
+ /* save the new block number for the first direct block */
+ new_blocks[index] = current_block;
+
+ /* total number of blocks allocated for direct blocks */
+ ret = count;
+ *err = 0;
+ return ret;
+failed_out:
+ for (i = 0; i <index; i++)
+ ext3_free_blocks(handle, inode, new_blocks[i], 1);
+ return ret;
+}
+
+/**
* ext3_alloc_branch - allocate and set up a chain of blocks.
* @inode: owner
- * @num: depth of the chain (number of blocks to allocate)
+ * @indirect_blks: number of allocated indirect blocks
+ * @blks: number of allocated direct blocks
* @offsets: offsets (in the blocks) to store the pointers to next.
* @branch: place to store the chain in.
*
- * This function allocates @num blocks, zeroes out all but the last one,
+ * This function allocates blocks, zeroes out all but the last one,
* links them into chain and (if we are synchronous) writes them to disk.
* In other words, it prepares a branch that can be spliced onto the
* inode. It stores the information about that chain in the branch[], in
@@ -501,97 +591,106 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
* ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
* as described above and return 0.
*/
-
static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
- int num,
- unsigned long goal,
- int *offsets,
- Indirect *branch)
+ int indirect_blks, int *blks, unsigned long goal,
+ int *offsets, Indirect *branch)
{
int blocksize = inode->i_sb->s_blocksize;
- int n = 0, keys = 0;
+ int i, n = 0;
int err = 0;
- int i;
- int parent = ext3_alloc_block(handle, inode, goal, &err);
-
- branch[0].key = cpu_to_le32(parent);
- if (parent) {
- for (n = 1; n < num; n++) {
- struct buffer_head *bh;
- /* Allocate the next block */
- int nr = ext3_alloc_block(handle, inode, parent, &err);
- if (!nr)
- break;
- branch[n].key = cpu_to_le32(nr);
+ struct buffer_head *bh;
+ int num;
+ unsigned long long new_blocks[4];
+ unsigned long long current_block;
- /*
- * Get buffer_head for parent block, zero it out
- * and set the pointer to new one, then send
- * parent to disk.
- */
- bh = sb_getblk(inode->i_sb, parent);
- if (!bh)
- break;
- keys = n+1;
- branch[n].bh = bh;
- lock_buffer(bh);
- BUFFER_TRACE(bh, "call get_create_access");
- err = ext3_journal_get_create_access(handle, bh);
- if (err) {
- unlock_buffer(bh);
- brelse(bh);
- break;
- }
+ num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
+ *blks, new_blocks, &err);
+ if (err)
+ return err;
- memset(bh->b_data, 0, blocksize);
- branch[n].p = (__le32*) bh->b_data + offsets[n];
- *branch[n].p = branch[n].key;
- BUFFER_TRACE(bh, "marking uptodate");
- set_buffer_uptodate(bh);
+ branch[0].key = cpu_to_le32(new_blocks[0]);
+ /*
+ * metadata blocks and data blocks are allocated.
+ */
+ for (n = 1; n <= indirect_blks; n++) {
+ /*
+ * Get buffer_head for parent block, zero it out
+ * and set the pointer to new one, then send
+ * parent to disk.
+ */
+ bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ branch[n].bh = bh;
+ lock_buffer(bh);
+ BUFFER_TRACE(bh, "call get_create_access");
+ err = ext3_journal_get_create_access(handle, bh);
+ if (err) {
unlock_buffer(bh);
+ brelse(bh);
+ goto failed;
+ }
- BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
- err = ext3_journal_dirty_metadata(handle, bh);
- if (err)
- break;
-
- parent = nr;
+ memset(bh->b_data, 0, blocksize);
+ branch[n].p = (__le32 *) bh->b_data + offsets[n];
+ branch[n].key = cpu_to_le32(new_blocks[n]);
+ *branch[n].p = branch[n].key;
+ if ( n == indirect_blks) {
+ current_block = new_blocks[n];
+ /*
+ * End of chain, update the last new metablock of
+ * the chain to point to the new allocated
+ * data blocks numbers
+ */
+ for (i=1; i < num; i++)
+ *(branch[n].p + i) = cpu_to_le32(++current_block);
}
- }
- if (n == num)
- return 0;
+ BUFFER_TRACE(bh, "marking uptodate");
+ set_buffer_uptodate(bh);
+ unlock_buffer(bh);
+ BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+ err = ext3_journal_dirty_metadata(handle, bh);
+ if (err)
+ goto failed;
+ }
+ *blks = num;
+ return err;
+failed:
/* Allocation failed, free what we already allocated */
- for (i = 1; i < keys; i++) {
+ for (i = 1; i <= n ; i++) {
BUFFER_TRACE(branch[i].bh, "call journal_forget");
ext3_journal_forget(handle, branch[i].bh);
}
- for (i = 0; i < keys; i++)
- ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
+ for (i = 0; i <indirect_blks; i++)
+ ext3_free_blocks(handle, inode, new_blocks[i], 1);
+
+ ext3_free_blocks(handle, inode, new_blocks[i], num);
+
return err;
}
/**
- * ext3_splice_branch - splice the allocated branch onto inode.
- * @inode: owner
- * @block: (logical) number of block we are adding
- * @chain: chain of indirect blocks (with a missing link - see
- * ext3_alloc_branch)
- * @where: location of missing link
- * @num: number of blocks we are adding
- *
- * This function fills the missing link and does all housekeeping needed in
- * inode (->i_blocks, etc.). In case of success we end up with the full
- * chain to new block and return 0.
+ * ext3_splice_branch - splice the allocated branch onto inode.
+ * @inode: owner
+ * @block: (logical) number of block we are adding
+ * @chain: chain of indirect blocks (with a missing link - see
+ * ext3_alloc_branch)
+ * @where: location of missing link
+ * @num: number of indirect blocks we are adding
+ * @blks: number of direct blocks we are adding
+ *
+ * This function fills the missing link and does all housekeeping needed in
+ * inode (->i_blocks, etc.). In case of success we end up with the full
+ * chain to new block and return 0.
*/
-
-static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
- Indirect chain[4], Indirect *where, int num)
+static int ext3_splice_branch(handle_t *handle, struct inode *inode,
+ long block, Indirect *where, int num, int blks)
{
int i;
int err = 0;
- struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
+ struct ext3_block_alloc_info *block_i;
+ unsigned long current_block;
+ block_i = EXT3_I(inode)->i_block_alloc_info;
/*
* If we're splicing into a [td]indirect block (as opposed to the
* inode) then we need to get write access to the [td]indirect block
@@ -608,13 +707,24 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
*where->p = where->key;
/*
+ * Update the host buffer_head or inode to point to more just allocated
+ * direct blocks blocks
+ */
+ if (num == 0 && blks > 1) {
+ current_block = le32_to_cpu(where->key + 1);
+ for (i = 1; i < blks; i++)
+ *(where->p + i ) = cpu_to_le32(current_block++);
+ }
+
+ /*
* update the most recently allocated logical & physical block
* in i_block_alloc_info, to assist find the proper goal block for next
* allocation
*/
if (block_i) {
- block_i->last_alloc_logical_block = block;
- block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key);
+ block_i->last_alloc_logical_block = block + blks - 1;
+ block_i->last_alloc_physical_block =
+ le32_to_cpu(where[num].key + blks - 1);
}
/* We are done with atomic stuff, now do the rest of housekeeping */
@@ -625,7 +735,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
/* had we spliced it onto indirect block? */
if (where->bh) {
/*
- * akpm: If we spliced it onto an indirect block, we haven't
+ * If we spliced it onto an indirect block, we haven't
* altered the inode. Note however that if it is being spliced
* onto an indirect block at the very end of the file (the
* file is growing) then we *will* alter the inode to reflect
@@ -647,10 +757,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
return err;
err_out:
- for (i = 1; i < num; i++) {
+ for (i = 1; i <= num; i++) {
BUFFER_TRACE(where[i].bh, "call journal_forget");
ext3_journal_forget(handle, where[i].bh);
+ ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
}
+ ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
+
return err;
}
@@ -666,26 +779,33 @@ err_out:
* allocations is needed - we simply release blocks and do not touch anything
* reachable from inode.
*
- * akpm: `handle' can be NULL if create == 0.
+ * `handle' can be NULL if create == 0.
*
* The BKL may not be held on entry here. Be sure to take it early.
+ * return > 0, # of blocks mapped or allocated.
+ * return = 0, if plain lookup failed.
+ * return < 0, error case.
*/
-
-int
-ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create, int extend_disksize)
+int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
+ sector_t iblock, unsigned long maxblocks,
+ struct buffer_head *bh_result,
+ int create, int extend_disksize)
{
int err = -EIO;
int offsets[4];
Indirect chain[4];
Indirect *partial;
unsigned long goal;
- int left;
- int boundary = 0;
- const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
+ int indirect_blks;
+ int blocks_to_boundary = 0;
+ int depth;
struct ext3_inode_info *ei = EXT3_I(inode);
+ int count = 0;
+ unsigned long first_block = 0;
+
J_ASSERT(handle != NULL || create == 0);
+ depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
if (depth == 0)
goto out;
@@ -694,8 +814,31 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
/* Simplest case - block found, no allocation needed */
if (!partial) {
+ first_block = chain[depth - 1].key;
clear_buffer_new(bh_result);
- goto got_it;
+ count++;
+ /*map more blocks*/
+ while (count < maxblocks && count <= blocks_to_boundary) {
+ if (!verify_chain(chain, partial)) {
+ /*
+ * Indirect block might be removed by
+ * truncate while we were reading it.
+ * Handling of that case: forget what we've
+ * got now. Flag the err as EAGAIN, so it
+ * will reread.
+ */
+ err = -EAGAIN;
+ count = 0;
+ break;
+ }
+ if (le32_to_cpu(*(chain[depth-1].p+count) ==
+ (first_block + count)))
+ count++;
+ else
+ break;
+ }
+ if (err != -EAGAIN)
+ goto got_it;
}
/* Next simple case - plain lookup or failed read of indirect block */
@@ -723,6 +866,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
}
partial = ext3_get_branch(inode, depth, offsets, chain, &err);
if (!partial) {
+ count++;
mutex_unlock(&ei->truncate_mutex);
if (err)
goto cleanup;
@@ -740,12 +884,19 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
goal = ext3_find_goal(inode, iblock, chain, partial);
- left = (chain + depth) - partial;
+ /* the number of blocks need to allocate for [d,t]indirect blocks */
+ indirect_blks = (chain + depth) - partial - 1;
/*
+ * Next look up the indirect map to count the totoal number of
+ * direct blocks to allocate for this branch.
+ */
+ count = ext3_blks_to_allocate(partial, indirect_blks,
+ maxblocks, blocks_to_boundary);
+ /*
* Block out ext3_truncate while we alter the tree
*/
- err = ext3_alloc_branch(handle, inode, left, goal,
+ err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
offsets + (partial - chain), partial);
/*
@@ -756,8 +907,8 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
* may need to return -EAGAIN upwards in the worst case. --sct
*/
if (!err)
- err = ext3_splice_branch(handle, inode, iblock, chain,
- partial, left);
+ err = ext3_splice_branch(handle, inode, iblock,
+ partial, indirect_blks, count);
/*
* i_disksize growing is protected by truncate_mutex. Don't forget to
* protect it if you're about to implement concurrent
@@ -772,8 +923,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
set_buffer_new(bh_result);
got_it:
map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
- if (boundary)
+ if (blocks_to_boundary == 0)
set_buffer_boundary(bh_result);
+ err = count;
/* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */
cleanup:
@@ -787,34 +939,21 @@ out:
return err;
}
-static int ext3_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- handle_t *handle = NULL;
- int ret;
-
- if (create) {
- handle = ext3_journal_current_handle();
- J_ASSERT(handle != 0);
- }
- ret = ext3_get_block_handle(handle, inode, iblock,
- bh_result, create, 1);
- return ret;
-}
-
#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
-static int
-ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
- unsigned long max_blocks, struct buffer_head *bh_result,
- int create)
+static int ext3_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
{
handle_t *handle = journal_current_handle();
int ret = 0;
+ unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
- if (!handle)
+ if (!create)
goto get_block; /* A read */
+ if (max_blocks == 1)
+ goto get_block; /* A single block get */
+
if (handle->h_transaction->t_state == T_LOCKED) {
/*
* Huge direct-io writes can hold off commits for long
@@ -841,18 +980,22 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
}
get_block:
- if (ret == 0)
- ret = ext3_get_block_handle(handle, inode, iblock,
- bh_result, create, 0);
- bh_result->b_size = (1 << inode->i_blkbits);
+ if (ret == 0) {
+ ret = ext3_get_blocks_handle(handle, inode, iblock,
+ max_blocks, bh_result, create, 0);
+ if (ret > 0) {
+ bh_result->b_size = (ret << inode->i_blkbits);
+ ret = 0;
+ }
+ }
return ret;
}
/*
* `handle' can be NULL if create is zero
*/
-struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
- long block, int create, int * errp)
+struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
+ long block, int create, int *errp)
{
struct buffer_head dummy;
int fatal = 0, err;
@@ -862,8 +1005,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
dummy.b_state = 0;
dummy.b_blocknr = -1000;
buffer_trace_init(&dummy.b_history);
- *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
- if (!*errp && buffer_mapped(&dummy)) {
+ err = ext3_get_blocks_handle(handle, inode, block, 1,
+ &dummy, create, 1);
+ if (err == 1) {
+ err = 0;
+ } else if (err >= 0) {
+ WARN_ON(1);
+ err = -EIO;
+ }
+ *errp = err;
+ if (!err && buffer_mapped(&dummy)) {
struct buffer_head *bh;
bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (!bh) {
@@ -874,17 +1025,18 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
J_ASSERT(create != 0);
J_ASSERT(handle != 0);
- /* Now that we do not always journal data, we
- should keep in mind whether this should
- always journal the new buffer as metadata.
- For now, regular file writes use
- ext3_get_block instead, so it's not a
- problem. */
+ /*
+ * Now that we do not always journal data, we should
+ * keep in mind whether this should always journal the
+ * new buffer as metadata. For now, regular file
+ * writes use ext3_get_block instead, so it's not a
+ * problem.
+ */
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
fatal = ext3_journal_get_create_access(handle, bh);
if (!fatal && !buffer_uptodate(bh)) {
- memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+ memset(bh->b_data,0,inode->i_sb->s_blocksize);
set_buffer_uptodate(bh);
}
unlock_buffer(bh);
@@ -906,7 +1058,7 @@ err:
return NULL;
}
-struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
+struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
int block, int create, int *err)
{
struct buffer_head * bh;
@@ -982,9 +1134,8 @@ static int walk_page_buffers( handle_t *handle,
* is elevated. We'll still have enough credits for the tiny quotafile
* write.
*/
-
-static int do_journal_get_write_access(handle_t *handle,
- struct buffer_head *bh)
+static int do_journal_get_write_access(handle_t *handle,
+ struct buffer_head *bh)
{
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
@@ -1025,8 +1176,7 @@ out:
return ret;
}
-int
-ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
+int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
{
int err = journal_dirty_data(handle, bh);
if (err)
@@ -1051,7 +1201,6 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
* ext3 never places buffers on inode->i_mapping->private_list. metadata
* buffers are managed internally.
*/
-
static int ext3_ordered_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
@@ -1261,7 +1410,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
* we don't need to open a transaction here.
*/
static int ext3_ordered_writepage(struct page *page,
- struct writeback_control *wbc)
+ struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct buffer_head *page_bufs;
@@ -1430,7 +1579,7 @@ ext3_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
}
-static int ext3_invalidatepage(struct page *page, unsigned long offset)
+static void ext3_invalidatepage(struct page *page, unsigned long offset)
{
journal_t *journal = EXT3_JOURNAL(page->mapping->host);
@@ -1440,7 +1589,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset)
if (offset == 0)
ClearPageChecked(page);
- return journal_invalidatepage(journal, page, offset);
+ journal_invalidatepage(journal, page, offset);
}
static int ext3_releasepage(struct page *page, gfp_t wait)
@@ -1492,11 +1641,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs,
- ext3_direct_io_get_blocks, NULL);
+ ext3_get_block, NULL);
/*
- * Reacquire the handle: ext3_direct_io_get_block() can restart the
- * transaction
+ * Reacquire the handle: ext3_get_block() can restart the transaction
*/
handle = journal_current_handle();
@@ -1752,11 +1900,8 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
* c) free the subtrees growing from the inode past the @chain[0].
* (no partially truncated stuff there). */
-static Indirect *ext3_find_shared(struct inode *inode,
- int depth,
- int offsets[4],
- Indirect chain[4],
- __le32 *top)
+static Indirect *ext3_find_shared(struct inode *inode, int depth,
+ int offsets[4], Indirect chain[4], __le32 *top)
{
Indirect *partial, *p;
int k, err;
@@ -1795,8 +1940,7 @@ static Indirect *ext3_find_shared(struct inode *inode,
}
/* Writer: end */
- while(partial > p)
- {
+ while(partial > p) {
brelse(partial->bh);
partial--;
}
@@ -1812,10 +1956,9 @@ no_top:
* We release `count' blocks on disk, but (last - first) may be greater
* than `count' because there can be holes in there.
*/
-static void
-ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
- unsigned long block_to_free, unsigned long count,
- __le32 *first, __le32 *last)
+static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, unsigned long block_to_free,
+ unsigned long count, __le32 *first, __le32 *last)
{
__le32 *p;
if (try_to_extend_transaction(handle, inode)) {
@@ -2076,8 +2219,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
* that's fine - as long as they are linked from the inode, the post-crash
* ext3_truncate() run will find them and release them.
*/
-
-void ext3_truncate(struct inode * inode)
+void ext3_truncate(struct inode *inode)
{
handle_t *handle;
struct ext3_inode_info *ei = EXT3_I(inode);
@@ -2201,29 +2343,26 @@ void ext3_truncate(struct inode * inode)
do_indirects:
/* Kill the remaining (whole) subtrees */
switch (offsets[0]) {
- default:
- nr = i_data[EXT3_IND_BLOCK];
- if (nr) {
- ext3_free_branches(handle, inode, NULL,
- &nr, &nr+1, 1);
- i_data[EXT3_IND_BLOCK] = 0;
- }
- case EXT3_IND_BLOCK:
- nr = i_data[EXT3_DIND_BLOCK];
- if (nr) {
- ext3_free_branches(handle, inode, NULL,
- &nr, &nr+1, 2);
- i_data[EXT3_DIND_BLOCK] = 0;
- }
- case EXT3_DIND_BLOCK:
- nr = i_data[EXT3_TIND_BLOCK];
- if (nr) {
- ext3_free_branches(handle, inode, NULL,
- &nr, &nr+1, 3);
- i_data[EXT3_TIND_BLOCK] = 0;
- }
- case EXT3_TIND_BLOCK:
- ;
+ default:
+ nr = i_data[EXT3_IND_BLOCK];
+ if (nr) {
+ ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
+ i_data[EXT3_IND_BLOCK] = 0;
+ }
+ case EXT3_IND_BLOCK:
+ nr = i_data[EXT3_DIND_BLOCK];
+ if (nr) {
+ ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
+ i_data[EXT3_DIND_BLOCK] = 0;
+ }
+ case EXT3_DIND_BLOCK:
+ nr = i_data[EXT3_TIND_BLOCK];
+ if (nr) {
+ ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
+ i_data[EXT3_TIND_BLOCK] = 0;
+ }
+ case EXT3_TIND_BLOCK:
+ ;
}
ext3_discard_reservation(inode);
@@ -2232,8 +2371,10 @@ do_indirects:
inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, inode);
- /* In a multi-transaction truncate, we only make the final
- * transaction synchronous */
+ /*
+ * In a multi-transaction truncate, we only make the final transaction
+ * synchronous
+ */
if (IS_SYNC(inode))
handle->h_sync = 1;
out_stop:
@@ -2259,20 +2400,16 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
struct ext3_group_desc * gdp;
- if ((ino != EXT3_ROOT_INO &&
- ino != EXT3_JOURNAL_INO &&
- ino != EXT3_RESIZE_INO &&
- ino < EXT3_FIRST_INO(sb)) ||
- ino > le32_to_cpu(
- EXT3_SB(sb)->s_es->s_inodes_count)) {
- ext3_error (sb, "ext3_get_inode_block",
+ if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO &&
+ ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) ||
+ ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) {
+ ext3_error(sb, "ext3_get_inode_block",
"bad inode number: %lu", ino);
return 0;
}
block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
if (block_group >= EXT3_SB(sb)->s_groups_count) {
- ext3_error (sb, "ext3_get_inode_block",
- "group >= groups count");
+ ext3_error(sb,"ext3_get_inode_block","group >= groups count");
return 0;
}
smp_rmb();
@@ -2285,7 +2422,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
return 0;
}
- gdp = (struct ext3_group_desc *) bh->b_data;
+ gdp = (struct ext3_group_desc *)bh->b_data;
/*
* Figure out the offset within the block group inode table
*/
@@ -2834,7 +2971,7 @@ err_out:
/*
- * akpm: how many blocks doth make a writepage()?
+ * How many blocks doth make a writepage()?
*
* With N blocks per page, it may be:
* N data blocks
@@ -2924,8 +3061,8 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
}
/*
- * akpm: What we do here is to mark the in-core inode as clean
- * with respect to inode dirtiness (it may still be data-dirty).
+ * What we do here is to mark the in-core inode as clean with respect to inode
+ * dirtiness (it may still be data-dirty).
* This means that the in-core inode may be reaped by prune_icache
* without having to perform any I/O. This is a very good thing,
* because *any* task may call prune_icache - even ones which
@@ -2957,7 +3094,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
}
/*
- * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
+ * ext3_dirty_inode() is called from __mark_inode_dirty()
*
* We're really interested in the case where a file is being extended.
* i_size has been changed by generic_commit_write() and we thus need
@@ -2993,7 +3130,7 @@ out:
return;
}
-#ifdef AKPM
+#if 0
/*
* Bind an inode's backing buffer_head into this transaction, to prevent
* it from being flushed to disk early. Unlike
@@ -3001,8 +3138,7 @@ out:
* returns no iloc structure, so the caller needs to repeat the iloc
* lookup to mark the inode dirty later.
*/
-static inline int
-ext3_pin_inode(handle_t *handle, struct inode *inode)
+static int ext3_pin_inode(handle_t *handle, struct inode *inode)
{
struct ext3_iloc iloc;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 86e443182de..f8a5266ea1f 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1678,12 +1678,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
}
if (test_opt(sb, NOBH)) {
- if (sb->s_blocksize_bits != PAGE_CACHE_SHIFT) {
- printk(KERN_WARNING "EXT3-fs: Ignoring nobh option "
- "since filesystem blocksize doesn't match "
- "pagesize\n");
- clear_opt(sbi->s_mount_opt, NOBH);
- }
if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) {
printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - "
"its supported only with writeback mode\n");
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 297300fe81c..404bfc9f738 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -101,11 +101,11 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock,
}
static int fat_get_blocks(struct inode *inode, sector_t iblock,
- unsigned long max_blocks,
struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
int err;
+ unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create);
if (err)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 03c789560fb..2a2479196f9 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -412,7 +412,7 @@ out:
/* Table to convert sigio signal codes into poll band bitmaps */
-static long band_table[NSIGPOLL] = {
+static const long band_table[NSIGPOLL] = {
POLLIN | POLLRDNORM, /* POLL_IN */
POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
}
static DEFINE_RWLOCK(fasync_lock);
-static kmem_cache_t *fasync_cache;
+static kmem_cache_t *fasync_cache __read_mostly;
/*
* fasync_helper() is used by some character device drivers (mainly mice)
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 39fd85b9b91..2c564701724 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -98,17 +98,6 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
return res ? try_to_free_buffers(page) : 0;
}
-static int hfs_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
- struct buffer_head *bh_result, int create)
-{
- int ret;
-
- ret = hfs_get_block(inode, iblock, bh_result, create);
- if (!ret)
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-}
-
static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
@@ -116,7 +105,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, hfs_get_blocks, NULL);
+ offset, nr_segs, hfs_get_block, NULL);
}
static int hfs_writepages(struct address_space *mapping,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 12ed2b7d046..9fbe4d2aeec 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -93,17 +93,6 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
return res ? try_to_free_buffers(page) : 0;
}
-static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
- struct buffer_head *bh_result, int create)
-{
- int ret;
-
- ret = hfsplus_get_block(inode, iblock, bh_result, create);
- if (!ret)
- bh_result->b_size = (1 << inode->i_blkbits);
- return ret;
-}
-
static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
@@ -111,7 +100,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, hfsplus_get_blocks, NULL);
+ offset, nr_segs, hfsplus_get_block, NULL);
}
static int hfsplus_writepages(struct address_space *mapping,
diff --git a/fs/inode.c b/fs/inode.c
index 85da11044ad..1fddf2803af 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -56,8 +56,8 @@
#define I_HASHBITS i_hash_shift
#define I_HASHMASK i_hash_mask
-static unsigned int i_hash_mask;
-static unsigned int i_hash_shift;
+static unsigned int i_hash_mask __read_mostly;
+static unsigned int i_hash_shift __read_mostly;
/*
* Each inode can be on two separate lists. One is
@@ -73,7 +73,7 @@ static unsigned int i_hash_shift;
LIST_HEAD(inode_in_use);
LIST_HEAD(inode_unused);
-static struct hlist_head *inode_hashtable;
+static struct hlist_head *inode_hashtable __read_mostly;
/*
* A simple spinlock to protect the list manipulations.
@@ -98,7 +98,7 @@ static DEFINE_MUTEX(iprune_mutex);
*/
struct inodes_stat_t inodes_stat;
-static kmem_cache_t * inode_cachep;
+static kmem_cache_t * inode_cachep __read_mostly;
static struct inode *alloc_inode(struct super_block *sb)
{
diff --git a/fs/inotify.c b/fs/inotify.c
index a61e93e1785..f48a3dae071 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -39,15 +39,15 @@
static atomic_t inotify_cookie;
-static kmem_cache_t *watch_cachep;
-static kmem_cache_t *event_cachep;
+static kmem_cache_t *watch_cachep __read_mostly;
+static kmem_cache_t *event_cachep __read_mostly;
-static struct vfsmount *inotify_mnt;
+static struct vfsmount *inotify_mnt __read_mostly;
/* these are configurable via /proc/sys/fs/inotify/ */
-int inotify_max_user_instances;
-int inotify_max_user_watches;
-int inotify_max_queued_events;
+int inotify_max_user_instances __read_mostly;
+int inotify_max_user_watches __read_mostly;
+int inotify_max_queued_events __read_mostly;
/*
* Lock ordering:
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index ada31fa272e..c609f5034fc 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1873,16 +1873,15 @@ zap_buffer_unlocked:
}
/**
- * int journal_invalidatepage()
+ * void journal_invalidatepage()
* @journal: journal to use for flush...
* @page: page to flush
* @offset: length of page to invalidate.
*
* Reap page buffers containing data after offset in page.
*
- * Return non-zero if the page's buffers were successfully reaped.
*/
-int journal_invalidatepage(journal_t *journal,
+void journal_invalidatepage(journal_t *journal,
struct page *page,
unsigned long offset)
{
@@ -1893,7 +1892,7 @@ int journal_invalidatepage(journal_t *journal,
if (!PageLocked(page))
BUG();
if (!page_has_buffers(page))
- return 1;
+ return;
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
@@ -1916,11 +1915,9 @@ int journal_invalidatepage(journal_t *journal,
} while (bh != head);
if (!offset) {
- if (!may_free || !try_to_free_buffers(page))
- return 0;
- J_ASSERT(!page_has_buffers(page));
+ if (may_free && try_to_free_buffers(page))
+ J_ASSERT(!page_has_buffers(page));
}
- return 1;
}
/*
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index 4db8be8e90c..5c63e0cdcf4 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -33,13 +33,14 @@
*/
#define STREAM_END_SPACE 12
-static DECLARE_MUTEX(deflate_sem);
-static DECLARE_MUTEX(inflate_sem);
+static DEFINE_MUTEX(deflate_mutex);
+static DEFINE_MUTEX(inflate_mutex);
static z_stream inf_strm, def_strm;
#ifdef __KERNEL__ /* Linux-only */
#include <linux/vmalloc.h>
#include <linux/init.h>
+#include <linux/mutex.h>
static int __init alloc_workspaces(void)
{
@@ -79,11 +80,11 @@ static int jffs2_zlib_compress(unsigned char *data_in,
if (*dstlen <= STREAM_END_SPACE)
return -1;
- down(&deflate_sem);
+ mutex_lock(&deflate_mutex);
if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
printk(KERN_WARNING "deflateInit failed\n");
- up(&deflate_sem);
+ mutex_unlock(&deflate_mutex);
return -1;
}
@@ -104,7 +105,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
if (ret != Z_OK) {
D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
zlib_deflateEnd(&def_strm);
- up(&deflate_sem);
+ mutex_unlock(&deflate_mutex);
return -1;
}
}
@@ -133,7 +134,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
*sourcelen = def_strm.total_in;
ret = 0;
out:
- up(&deflate_sem);
+ mutex_unlock(&deflate_mutex);
return ret;
}
@@ -145,7 +146,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
int ret;
int wbits = MAX_WBITS;
- down(&inflate_sem);
+ mutex_lock(&inflate_mutex);
inf_strm.next_in = data_in;
inf_strm.avail_in = srclen;
@@ -173,7 +174,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
printk(KERN_WARNING "inflateInit failed\n");
- up(&inflate_sem);
+ mutex_unlock(&inflate_mutex);
return 1;
}
@@ -183,7 +184,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
printk(KERN_NOTICE "inflate returned %d\n", ret);
}
zlib_inflateEnd(&inf_strm);
- up(&inflate_sem);
+ mutex_unlock(&inflate_mutex);
return 0;
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 51a5fed90cc..04eb78f1252 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -258,7 +258,8 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
static int jfs_get_block(struct inode *ip, sector_t lblock,
struct buffer_head *bh_result, int create)
{
- return jfs_get_blocks(ip, lblock, 1, bh_result, create);
+ return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits,
+ bh_result, create);
}
static int jfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -301,7 +302,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode = file->f_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, jfs_get_blocks, NULL);
+ offset, nr_segs, jfs_get_block, NULL);
}
struct address_space_operations jfs_aops = {
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 0b348b13b55..3315f0b1fbc 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -69,6 +69,7 @@
#include <linux/bio.h>
#include <linux/suspend.h>
#include <linux/delay.h>
+#include <linux/mutex.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
@@ -165,7 +166,7 @@ do { \
*/
static LIST_HEAD(jfs_external_logs);
static struct jfs_log *dummy_log = NULL;
-static DECLARE_MUTEX(jfs_log_sem);
+static DEFINE_MUTEX(jfs_log_mutex);
/*
* forward references
@@ -1085,20 +1086,20 @@ int lmLogOpen(struct super_block *sb)
if (sbi->mntflag & JFS_INLINELOG)
return open_inline_log(sb);
- down(&jfs_log_sem);
+ mutex_lock(&jfs_log_mutex);
list_for_each_entry(log, &jfs_external_logs, journal_list) {
if (log->bdev->bd_dev == sbi->logdev) {
if (memcmp(log->uuid, sbi->loguuid,
sizeof(log->uuid))) {
jfs_warn("wrong uuid on JFS journal\n");
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return -EINVAL;
}
/*
* add file system to log active file system list
*/
if ((rc = lmLogFileSystem(log, sbi, 1))) {
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return rc;
}
goto journal_found;
@@ -1106,7 +1107,7 @@ int lmLogOpen(struct super_block *sb)
}
if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) {
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return -ENOMEM;
}
INIT_LIST_HEAD(&log->sb_list);
@@ -1151,7 +1152,7 @@ journal_found:
sbi->log = log;
LOG_UNLOCK(log);
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return 0;
/*
@@ -1168,7 +1169,7 @@ journal_found:
blkdev_put(bdev);
free: /* free log descriptor */
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
kfree(log);
jfs_warn("lmLogOpen: exit(%d)", rc);
@@ -1212,11 +1213,11 @@ static int open_dummy_log(struct super_block *sb)
{
int rc;
- down(&jfs_log_sem);
+ mutex_lock(&jfs_log_mutex);
if (!dummy_log) {
dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL);
if (!dummy_log) {
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return -ENOMEM;
}
INIT_LIST_HEAD(&dummy_log->sb_list);
@@ -1229,7 +1230,7 @@ static int open_dummy_log(struct super_block *sb)
if (rc) {
kfree(dummy_log);
dummy_log = NULL;
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return rc;
}
}
@@ -1238,7 +1239,7 @@ static int open_dummy_log(struct super_block *sb)
list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list);
JFS_SBI(sb)->log = dummy_log;
LOG_UNLOCK(dummy_log);
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
return 0;
}
@@ -1466,7 +1467,7 @@ int lmLogClose(struct super_block *sb)
jfs_info("lmLogClose: log:0x%p", log);
- down(&jfs_log_sem);
+ mutex_lock(&jfs_log_mutex);
LOG_LOCK(log);
list_del(&sbi->log_list);
LOG_UNLOCK(log);
@@ -1516,7 +1517,7 @@ int lmLogClose(struct super_block *sb)
kfree(log);
out:
- up(&jfs_log_sem);
+ mutex_unlock(&jfs_log_mutex);
jfs_info("lmLogClose: exit(%d)", rc);
return rc;
}
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 5fbaeaadccd..f28696f235c 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -220,8 +220,8 @@ int __init metapage_init(void)
if (metapage_cache == NULL)
return -ENOMEM;
- metapage_mempool = mempool_create(METAPOOL_MIN_PAGES, mempool_alloc_slab,
- mempool_free_slab, metapage_cache);
+ metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
+ metapage_cache);
if (metapage_mempool == NULL) {
kmem_cache_destroy(metapage_cache);
@@ -578,14 +578,13 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
return 0;
}
-static int metapage_invalidatepage(struct page *page, unsigned long offset)
+static void metapage_invalidatepage(struct page *page, unsigned long offset)
{
BUG_ON(offset);
- if (PageWriteback(page))
- return 0;
+ BUG_ON(PageWriteback(page));
- return metapage_releasepage(page, 0);
+ metapage_releasepage(page, 0);
}
struct address_space_operations jfs_metapage_aops = {
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 112ebf8b8df..729ac427d35 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -16,6 +16,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/sm_inter.h>
+#include <linux/mutex.h>
#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
@@ -30,7 +31,7 @@
static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
static unsigned long next_gc;
static int nrhosts;
-static DECLARE_MUTEX(nlm_host_sema);
+static DEFINE_MUTEX(nlm_host_mutex);
static void nlm_gc_hosts(void);
@@ -71,7 +72,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
/* Lock hash table */
- down(&nlm_host_sema);
+ mutex_lock(&nlm_host_mutex);
if (time_after_eq(jiffies, next_gc))
nlm_gc_hosts();
@@ -91,7 +92,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
nlm_hosts[hash] = host;
}
nlm_get_host(host);
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
return host;
}
}
@@ -130,7 +131,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
next_gc = 0;
nohost:
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
return host;
}
@@ -141,19 +142,19 @@ nlm_find_client(void)
* and return it
*/
int hash;
- down(&nlm_host_sema);
+ mutex_lock(&nlm_host_mutex);
for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
struct nlm_host *host, **hp;
for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
if (host->h_server &&
host->h_killed == 0) {
nlm_get_host(host);
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
return host;
}
}
}
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
return NULL;
}
@@ -265,7 +266,7 @@ nlm_shutdown_hosts(void)
int i;
dprintk("lockd: shutting down host module\n");
- down(&nlm_host_sema);
+ mutex_lock(&nlm_host_mutex);
/* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts...\n");
@@ -276,7 +277,7 @@ nlm_shutdown_hosts(void)
/* Then, perform a garbage collection pass */
nlm_gc_hosts();
- up(&nlm_host_sema);
+ mutex_unlock(&nlm_host_mutex);
/* complain if any hosts are left */
if (nrhosts) {
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 5e85bde6c12..fd56c8872f3 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h>
@@ -43,13 +44,13 @@ static struct svc_program nlmsvc_program;
struct nlmsvc_binding * nlmsvc_ops;
EXPORT_SYMBOL(nlmsvc_ops);
-static DECLARE_MUTEX(nlmsvc_sema);
+static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
static pid_t nlmsvc_pid;
int nlmsvc_grace_period;
unsigned long nlmsvc_timeout;
-static DECLARE_MUTEX_LOCKED(lockd_start);
+static DECLARE_COMPLETION(lockd_start_done);
static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
/*
@@ -112,7 +113,7 @@ lockd(struct svc_rqst *rqstp)
* Let our maker know we're running.
*/
nlmsvc_pid = current->pid;
- up(&lockd_start);
+ complete(&lockd_start_done);
daemonize("lockd");
@@ -215,7 +216,7 @@ lockd_up(void)
struct svc_serv * serv;
int error = 0;
- down(&nlmsvc_sema);
+ mutex_lock(&nlmsvc_mutex);
/*
* Unconditionally increment the user count ... this is
* the number of clients who _want_ a lockd process.
@@ -263,7 +264,7 @@ lockd_up(void)
"lockd_up: create thread failed, error=%d\n", error);
goto destroy_and_out;
}
- down(&lockd_start);
+ wait_for_completion(&lockd_start_done);
/*
* Note: svc_serv structures have an initial use count of 1,
@@ -272,7 +273,7 @@ lockd_up(void)
destroy_and_out:
svc_destroy(serv);
out:
- up(&nlmsvc_sema);
+ mutex_unlock(&nlmsvc_mutex);
return error;
}
EXPORT_SYMBOL(lockd_up);
@@ -285,7 +286,7 @@ lockd_down(void)
{
static int warned;
- down(&nlmsvc_sema);
+ mutex_lock(&nlmsvc_mutex);
if (nlmsvc_users) {
if (--nlmsvc_users)
goto out;
@@ -315,7 +316,7 @@ lockd_down(void)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
out:
- up(&nlmsvc_sema);
+ mutex_unlock(&nlmsvc_mutex);
}
EXPORT_SYMBOL(lockd_down);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index c7a6e3ae44d..a570e5c8a93 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -11,6 +11,7 @@
#include <linux/string.h>
#include <linux/time.h>
#include <linux/in.h>
+#include <linux/mutex.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfsd/nfsfh.h>
@@ -28,7 +29,7 @@
#define FILE_HASH_BITS 5
#define FILE_NRHASH (1<<FILE_HASH_BITS)
static struct nlm_file * nlm_files[FILE_NRHASH];
-static DECLARE_MUTEX(nlm_file_sema);
+static DEFINE_MUTEX(nlm_file_mutex);
#ifdef NFSD_DEBUG
static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
@@ -91,7 +92,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
hash = file_hash(f);
/* Lock file table */
- down(&nlm_file_sema);
+ mutex_lock(&nlm_file_mutex);
for (file = nlm_files[hash]; file; file = file->f_next)
if (!nfs_compare_fh(&file->f_handle, f))
@@ -130,7 +131,7 @@ found:
nfserr = 0;
out_unlock:
- up(&nlm_file_sema);
+ mutex_unlock(&nlm_file_mutex);
return nfserr;
out_free:
@@ -239,14 +240,14 @@ nlm_traverse_files(struct nlm_host *host, int action)
struct nlm_file *file, **fp;
int i;
- down(&nlm_file_sema);
+ mutex_lock(&nlm_file_mutex);
for (i = 0; i < FILE_NRHASH; i++) {
fp = nlm_files + i;
while ((file = *fp) != NULL) {
/* Traverse locks, blocks and shares of this file
* and update file->f_locks count */
if (nlm_inspect_file(host, file, action)) {
- up(&nlm_file_sema);
+ mutex_unlock(&nlm_file_mutex);
return 1;
}
@@ -261,7 +262,7 @@ nlm_traverse_files(struct nlm_host *host, int action)
}
}
}
- up(&nlm_file_sema);
+ mutex_unlock(&nlm_file_mutex);
return 0;
}
@@ -281,7 +282,7 @@ nlm_release_file(struct nlm_file *file)
file, file->f_count);
/* Lock file table */
- down(&nlm_file_sema);
+ mutex_lock(&nlm_file_mutex);
/* If there are no more locks etc, delete the file */
if(--file->f_count == 0) {
@@ -289,7 +290,7 @@ nlm_release_file(struct nlm_file *file)
nlm_delete_file(file);
}
- up(&nlm_file_sema);
+ mutex_unlock(&nlm_file_mutex);
}
/*
diff --git a/fs/locks.c b/fs/locks.c
index 56f996e98bb..4d9e71d43e7 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,7 +142,7 @@ int lease_break_time = 45;
static LIST_HEAD(file_lock_list);
static LIST_HEAD(blocked_list);
-static kmem_cache_t *filelock_cache;
+static kmem_cache_t *filelock_cache __read_mostly;
/* Allocate an empty lock structure. */
static struct file_lock *locks_alloc_lock(void)
@@ -533,12 +533,7 @@ static void locks_delete_block(struct file_lock *waiter)
static void locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
- if (!list_empty(&waiter->fl_block)) {
- printk(KERN_ERR "locks_insert_block: removing duplicated lock "
- "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid,
- waiter->fl_start, waiter->fl_end, waiter->fl_type);
- __locks_delete_block(waiter);
- }
+ BUG_ON(!list_empty(&waiter->fl_block));
list_add_tail(&waiter->fl_block, &blocker->fl_block);
waiter->fl_next = blocker;
if (IS_POSIX(blocker))
@@ -797,9 +792,7 @@ out:
return error;
}
-EXPORT_SYMBOL(posix_lock_file);
-
-static int __posix_lock_file(struct inode *inode, struct file_lock *request)
+static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
{
struct file_lock *fl;
struct file_lock *new_fl, *new_fl2;
@@ -823,6 +816,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
continue;
if (!posix_locks_conflict(request, fl))
continue;
+ if (conflock)
+ locks_copy_lock(conflock, fl);
error = -EAGAIN;
if (!(request->fl_flags & FL_SLEEP))
goto out;
@@ -992,8 +987,24 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
*/
int posix_lock_file(struct file *filp, struct file_lock *fl)
{
- return __posix_lock_file(filp->f_dentry->d_inode, fl);
+ return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, NULL);
+}
+EXPORT_SYMBOL(posix_lock_file);
+
+/**
+ * posix_lock_file_conf - Apply a POSIX-style lock to a file
+ * @filp: The file to apply the lock to
+ * @fl: The lock to be applied
+ * @conflock: Place to return a copy of the conflicting lock, if found.
+ *
+ * Except for the conflock parameter, acts just like posix_lock_file.
+ */
+int posix_lock_file_conf(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+{
+ return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, conflock);
}
+EXPORT_SYMBOL(posix_lock_file_conf);
/**
* posix_lock_file_wait - Apply a POSIX-style lock to a file
@@ -1009,7 +1020,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
int error;
might_sleep ();
for (;;) {
- error = __posix_lock_file(filp->f_dentry->d_inode, fl);
+ error = posix_lock_file(filp, fl);
if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP))
break;
error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1081,7 +1092,7 @@ int locks_mandatory_area(int read_write, struct inode *inode,
fl.fl_end = offset + count - 1;
for (;;) {
- error = __posix_lock_file(inode, &fl);
+ error = __posix_lock_file_conf(inode, &fl, NULL);
if (error != -EAGAIN)
break;
if (!(fl.fl_flags & FL_SLEEP))
@@ -1694,7 +1705,7 @@ again:
error = filp->f_op->lock(filp, cmd, file_lock);
else {
for (;;) {
- error = __posix_lock_file(inode, file_lock);
+ error = posix_lock_file(filp, file_lock);
if ((error != -EAGAIN) || (cmd == F_SETLK))
break;
error = wait_event_interruptible(file_lock->fl_wait,
@@ -1837,7 +1848,7 @@ again:
error = filp->f_op->lock(filp, cmd, file_lock);
else {
for (;;) {
- error = __posix_lock_file(inode, file_lock);
+ error = posix_lock_file(filp, file_lock);
if ((error != -EAGAIN) || (cmd == F_SETLK64))
break;
error = wait_event_interruptible(file_lock->fl_wait,
diff --git a/fs/mpage.c b/fs/mpage.c
index e431cb3878d..9bf2eb30e6f 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -163,9 +163,19 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
} while (page_bh != head);
}
+/*
+ * This is the worker routine which does all the work of mapping the disk
+ * blocks and constructs largest possible bios, submits them for IO if the
+ * blocks are not contiguous on the disk.
+ *
+ * We pass a buffer_head back and forth and use its buffer_mapped() flag to
+ * represent the validity of its disk mapping and to decide when to do the next
+ * get_block() call.
+ */
static struct bio *
do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
- sector_t *last_block_in_bio, get_block_t get_block)
+ sector_t *last_block_in_bio, struct buffer_head *map_bh,
+ unsigned long *first_logical_block, get_block_t get_block)
{
struct inode *inode = page->mapping->host;
const unsigned blkbits = inode->i_blkbits;
@@ -173,33 +183,72 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
const unsigned blocksize = 1 << blkbits;
sector_t block_in_file;
sector_t last_block;
+ sector_t last_block_in_file;
sector_t blocks[MAX_BUF_PER_PAGE];
unsigned page_block;
unsigned first_hole = blocks_per_page;
struct block_device *bdev = NULL;
- struct buffer_head bh;
int length;
int fully_mapped = 1;
+ unsigned nblocks;
+ unsigned relative_block;
if (page_has_buffers(page))
goto confused;
block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
- last_block = (i_size_read(inode) + blocksize - 1) >> blkbits;
+ last_block = block_in_file + nr_pages * blocks_per_page;
+ last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+ if (last_block > last_block_in_file)
+ last_block = last_block_in_file;
+ page_block = 0;
+
+ /*
+ * Map blocks using the result from the previous get_blocks call first.
+ */
+ nblocks = map_bh->b_size >> blkbits;
+ if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
+ block_in_file < (*first_logical_block + nblocks)) {
+ unsigned map_offset = block_in_file - *first_logical_block;
+ unsigned last = nblocks - map_offset;
+
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == last) {
+ clear_buffer_mapped(map_bh);
+ break;
+ }
+ if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map_bh->b_blocknr + map_offset +
+ relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ bdev = map_bh->b_bdev;
+ }
+
+ /*
+ * Then do more get_blocks calls until we are done with this page.
+ */
+ map_bh->b_page = page;
+ while (page_block < blocks_per_page) {
+ map_bh->b_state = 0;
+ map_bh->b_size = 0;
- bh.b_page = page;
- for (page_block = 0; page_block < blocks_per_page;
- page_block++, block_in_file++) {
- bh.b_state = 0;
if (block_in_file < last_block) {
- if (get_block(inode, block_in_file, &bh, 0))
+ map_bh->b_size = (last_block-block_in_file) << blkbits;
+ if (get_block(inode, block_in_file, map_bh, 0))
goto confused;
+ *first_logical_block = block_in_file;
}
- if (!buffer_mapped(&bh)) {
+ if (!buffer_mapped(map_bh)) {
fully_mapped = 0;
if (first_hole == blocks_per_page)
first_hole = page_block;
+ page_block++;
+ block_in_file++;
+ clear_buffer_mapped(map_bh);
continue;
}
@@ -209,8 +258,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
* we just collected from get_block into the page's buffers
* so readpage doesn't have to repeat the get_block call
*/
- if (buffer_uptodate(&bh)) {
- map_buffer_to_page(page, &bh, page_block);
+ if (buffer_uptodate(map_bh)) {
+ map_buffer_to_page(page, map_bh, page_block);
goto confused;
}
@@ -218,10 +267,20 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
- if (page_block && blocks[page_block-1] != bh.b_blocknr-1)
+ if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
goto confused;
- blocks[page_block] = bh.b_blocknr;
- bdev = bh.b_bdev;
+ nblocks = map_bh->b_size >> blkbits;
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == nblocks) {
+ clear_buffer_mapped(map_bh);
+ break;
+ } else if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map_bh->b_blocknr+relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ bdev = map_bh->b_bdev;
}
if (first_hole != blocks_per_page) {
@@ -260,7 +319,7 @@ alloc_new:
goto alloc_new;
}
- if (buffer_boundary(&bh) || (first_hole != blocks_per_page))
+ if (buffer_boundary(map_bh) || (first_hole != blocks_per_page))
bio = mpage_bio_submit(READ, bio);
else
*last_block_in_bio = blocks[blocks_per_page - 1];
@@ -331,7 +390,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
unsigned page_idx;
sector_t last_block_in_bio = 0;
struct pagevec lru_pvec;
+ struct buffer_head map_bh;
+ unsigned long first_logical_block = 0;
+ clear_buffer_mapped(&map_bh);
pagevec_init(&lru_pvec, 0);
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_entry(pages->prev, struct page, lru);
@@ -342,7 +404,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
page->index, GFP_KERNEL)) {
bio = do_mpage_readpage(bio, page,
nr_pages - page_idx,
- &last_block_in_bio, get_block);
+ &last_block_in_bio, &map_bh,
+ &first_logical_block,
+ get_block);
if (!pagevec_add(&lru_pvec, page))
__pagevec_lru_add(&lru_pvec);
} else {
@@ -364,9 +428,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
+ struct buffer_head map_bh;
+ unsigned long first_logical_block = 0;
- bio = do_mpage_readpage(bio, page, 1,
- &last_block_in_bio, get_block);
+ clear_buffer_mapped(&map_bh);
+ bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
+ &map_bh, &first_logical_block, get_block);
if (bio)
mpage_bio_submit(READ, bio);
return 0;
@@ -472,6 +539,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
for (page_block = 0; page_block < blocks_per_page; ) {
map_bh.b_state = 0;
+ map_bh.b_size = 1 << blkbits;
if (get_block(inode, block_in_file, &map_bh, 1))
goto confused;
if (buffer_new(&map_bh))
diff --git a/fs/namespace.c b/fs/namespace.c
index 71e75bcf4d2..e069a4c5e38 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
static int event;
-static struct list_head *mount_hashtable;
+static struct list_head *mount_hashtable __read_mostly;
static int hash_mask __read_mostly, hash_bits __read_mostly;
-static kmem_cache_t *mnt_cache;
+static kmem_cache_t *mnt_cache __read_mostly;
static struct rw_semaphore namespace_sem;
/* /sys/fs */
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 99d2cfbce86..90c95adc8c1 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -14,6 +14,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/nfs_fs.h>
+#include <linux/mutex.h>
#include <net/inet_sock.h>
@@ -31,7 +32,7 @@ struct nfs_callback_data {
};
static struct nfs_callback_data nfs_callback_info;
-static DECLARE_MUTEX(nfs_callback_sema);
+static DEFINE_MUTEX(nfs_callback_mutex);
static struct svc_program nfs4_callback_program;
unsigned int nfs_callback_set_tcpport;
@@ -95,7 +96,7 @@ int nfs_callback_up(void)
int ret = 0;
lock_kernel();
- down(&nfs_callback_sema);
+ mutex_lock(&nfs_callback_mutex);
if (nfs_callback_info.users++ || nfs_callback_info.pid != 0)
goto out;
init_completion(&nfs_callback_info.started);
@@ -121,7 +122,7 @@ int nfs_callback_up(void)
nfs_callback_info.serv = serv;
wait_for_completion(&nfs_callback_info.started);
out:
- up(&nfs_callback_sema);
+ mutex_unlock(&nfs_callback_mutex);
unlock_kernel();
return ret;
out_destroy:
@@ -139,7 +140,7 @@ int nfs_callback_down(void)
int ret = 0;
lock_kernel();
- down(&nfs_callback_sema);
+ mutex_lock(&nfs_callback_mutex);
nfs_callback_info.users--;
do {
if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0)
@@ -147,7 +148,7 @@ int nfs_callback_down(void)
if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0)
break;
} while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0);
- up(&nfs_callback_sema);
+ mutex_unlock(&nfs_callback_mutex);
unlock_kernel();
return ret;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 5263b2864a4..dee49a0cb99 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -318,10 +318,9 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
return status;
}
-static int nfs_invalidate_page(struct page *page, unsigned long offset)
+static void nfs_invalidate_page(struct page *page, unsigned long offset)
{
/* FIXME: we really should cancel any unstarted writes on this page */
- return 1;
}
static int nfs_release_page(struct page *page, gfp_t gfp)
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 3961524fd4a..624ca7146b6 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -663,10 +663,8 @@ int nfs_init_readpagecache(void)
if (nfs_rdata_cachep == NULL)
return -ENOMEM;
- nfs_rdata_mempool = mempool_create(MIN_POOL_READ,
- mempool_alloc_slab,
- mempool_free_slab,
- nfs_rdata_cachep);
+ nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
+ nfs_rdata_cachep);
if (nfs_rdata_mempool == NULL)
return -ENOMEM;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3f5225404c9..4cfada2cc09 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1521,17 +1521,13 @@ int nfs_init_writepagecache(void)
if (nfs_wdata_cachep == NULL)
return -ENOMEM;
- nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE,
- mempool_alloc_slab,
- mempool_free_slab,
- nfs_wdata_cachep);
+ nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
+ nfs_wdata_cachep);
if (nfs_wdata_mempool == NULL)
return -ENOMEM;
- nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT,
- mempool_alloc_slab,
- mempool_free_slab,
- nfs_wdata_cachep);
+ nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
+ nfs_wdata_cachep);
if (nfs_commit_mempool == NULL)
return -ENOMEM;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f6ab762bea9..47ec112b266 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -49,6 +49,7 @@
#include <linux/nfsd/state.h>
#include <linux/nfsd/xdr4.h>
#include <linux/namei.h>
+#include <linux/mutex.h>
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -77,11 +78,11 @@ static void nfs4_set_recdir(char *recdir);
/* Locking:
*
- * client_sema:
+ * client_mutex:
* protects clientid_hashtbl[], clientstr_hashtbl[],
* unconfstr_hashtbl[], uncofid_hashtbl[].
*/
-static DECLARE_MUTEX(client_sema);
+static DEFINE_MUTEX(client_mutex);
static kmem_cache_t *stateowner_slab = NULL;
static kmem_cache_t *file_slab = NULL;
@@ -91,13 +92,13 @@ static kmem_cache_t *deleg_slab = NULL;
void
nfs4_lock_state(void)
{
- down(&client_sema);
+ mutex_lock(&client_mutex);
}
void
nfs4_unlock_state(void)
{
- up(&client_sema);
+ mutex_unlock(&client_mutex);
}
static inline u32
@@ -2749,37 +2750,31 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
* Note: locks.c uses the BKL to protect the inode's lock list.
*/
- status = posix_lock_file(filp, &file_lock);
- dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status);
+ /* XXX?: Just to divert the locks_release_private at the start of
+ * locks_copy_lock: */
+ conflock.fl_ops = NULL;
+ conflock.fl_lmops = NULL;
+ status = posix_lock_file_conf(filp, &file_lock, &conflock);
+ dprintk("NFSD: nfsd4_lock: posix_lock_file_conf status %d\n",status);
switch (-status) {
case 0: /* success! */
update_stateid(&lock_stp->st_stateid);
memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid,
sizeof(stateid_t));
- goto out;
- case (EAGAIN):
- goto conflicting_lock;
+ break;
+ case (EAGAIN): /* conflock holds conflicting lock */
+ status = nfserr_denied;
+ dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
+ nfs4_set_lock_denied(&conflock, &lock->lk_denied);
+ break;
case (EDEADLK):
status = nfserr_deadlock;
- dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status);
- goto out;
+ break;
default:
- status = nfserrno(status);
- dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status);
- goto out;
- }
-
-conflicting_lock:
- dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
- status = nfserr_denied;
- /* XXX There is a race here. Future patch needed to provide
- * an atomic posix_lock_and_test_file
- */
- if (!posix_test_lock(filp, &file_lock, &conflock)) {
- status = nfserr_serverfault;
- goto out;
+ dprintk("NFSD: nfsd4_lock: posix_lock_file_conf() failed! status %d\n",status);
+ status = nfserr_resource;
+ break;
}
- nfs4_set_lock_denied(&conflock, &lock->lk_denied);
out:
if (status && lock->lk_is_new && lock_sop)
release_stateowner(lock_sop);
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 0fd70295cca..4af2ad1193e 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -515,10 +515,10 @@ BOOL ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
log_page_size = PAGE_CACHE_SIZE;
log_page_mask = log_page_size - 1;
/*
- * Use generic_ffs() instead of ffs() to enable the compiler to
+ * Use ntfs_ffs() instead of ffs() to enable the compiler to
* optimize log_page_size and log_page_bits into constants.
*/
- log_page_bits = generic_ffs(log_page_size) - 1;
+ log_page_bits = ntfs_ffs(log_page_size) - 1;
size &= ~(s64)(log_page_size - 1);
/*
* Ensure the log file is big enough to store at least the two restart
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 4e72bc7afdf..2438c00ec0c 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2670,7 +2670,7 @@ mft_rec_already_initialized:
ni->name_len = 4;
ni->itype.index.block_size = 4096;
- ni->itype.index.block_size_bits = generic_ffs(4096) - 1;
+ ni->itype.index.block_size_bits = ntfs_ffs(4096) - 1;
ni->itype.index.collation_rule = COLLATION_FILE_NAME;
if (vol->cluster_size <= ni->itype.index.block_size) {
ni->itype.index.vcn_size = vol->cluster_size;
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index 0624c8ef4d9..166142960b5 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -132,4 +132,33 @@ extern int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
/* From fs/ntfs/upcase.c */
extern ntfschar *generate_default_upcase(void);
+static inline int ntfs_ffs(int x)
+{
+ int r = 1;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
#endif /* _LINUX_NTFS_H */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index bf931ba1d36..0d858d0b25b 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -540,7 +540,6 @@ bail:
* fs_count, map_bh, dio->rw == WRITE);
*/
static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
- unsigned long max_blocks,
struct buffer_head *bh_result, int create)
{
int ret;
@@ -548,6 +547,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
u64 p_blkno;
int contig_blocks;
unsigned char blocksize_bits;
+ unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
if (!inode || !bh_result) {
mlog(ML_ERROR, "inode or bh_result is null\n");
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index ae3440ca083..6a610ae5358 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -377,7 +377,7 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
BUG_ON(!bh);
BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
- mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %hu\n",
+ mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
(unsigned long long)bh->b_blocknr, type,
(type == OCFS2_JOURNAL_ACCESS_CREATE) ?
"OCFS2_JOURNAL_ACCESS_CREATE" :
@@ -582,7 +582,8 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
}
mlog(0, "inode->i_size = %lld\n", inode->i_size);
- mlog(0, "inode->i_blocks = %lu\n", inode->i_blocks);
+ mlog(0, "inode->i_blocks = %llu\n",
+ (unsigned long long)inode->i_blocks);
mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
/* call the kernels journal init function now */
@@ -850,8 +851,9 @@ static int ocfs2_force_read_journal(struct inode *inode)
memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
- mlog(0, "Force reading %lu blocks\n",
- (inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9)));
+ mlog(0, "Force reading %llu blocks\n",
+ (unsigned long long)(inode->i_blocks >>
+ (inode->i_sb->s_blocksize_bits - 9)));
v_blkno = 0;
while (v_blkno <
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 274f61d0cda..0673862c8bd 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -1444,8 +1444,9 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
* write i_size + 1 bytes. */
blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
- mlog_entry("i_blocks = %lu, i_size = %llu, blocks = %d\n",
- inode->i_blocks, i_size_read(inode), blocks);
+ mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n",
+ (unsigned long long)inode->i_blocks,
+ i_size_read(inode), blocks);
/* Sanity check -- make sure we're going to fit. */
if (bytes_left >
diff --git a/fs/partitions/devfs.c b/fs/partitions/devfs.c
index 87f50444fd3..3f0a780c9ce 100644
--- a/fs/partitions/devfs.c
+++ b/fs/partitions/devfs.c
@@ -6,7 +6,7 @@
#include <linux/vmalloc.h>
#include <linux/genhd.h>
#include <linux/bitops.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
struct unique_numspace {
@@ -16,7 +16,7 @@ struct unique_numspace {
struct semaphore mutex;
};
-static DECLARE_MUTEX(numspace_mutex);
+static DEFINE_MUTEX(numspace_mutex);
static int expand_numspace(struct unique_numspace *s)
{
@@ -48,7 +48,7 @@ static int alloc_unique_number(struct unique_numspace *s)
{
int rval = 0;
- down(&numspace_mutex);
+ mutex_lock(&numspace_mutex);
if (s->num_free < 1)
rval = expand_numspace(s);
if (!rval) {
@@ -56,7 +56,7 @@ static int alloc_unique_number(struct unique_numspace *s)
--s->num_free;
__set_bit(rval, s->bits);
}
- up(&numspace_mutex);
+ mutex_unlock(&numspace_mutex);
return rval;
}
@@ -66,11 +66,11 @@ static void dealloc_unique_number(struct unique_numspace *s, int number)
int old_val;
if (number >= 0) {
- down(&numspace_mutex);
+ mutex_lock(&numspace_mutex);
old_val = __test_and_clear_bit(number, s->bits);
if (old_val)
++s->num_free;
- up(&numspace_mutex);
+ mutex_unlock(&numspace_mutex);
}
}
diff --git a/fs/pipe.c b/fs/pipe.c
index d976866a115..4384c929094 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -675,7 +675,7 @@ fail_page:
return NULL;
}
-static struct vfsmount *pipe_mnt;
+static struct vfsmount *pipe_mnt __read_mostly;
static int pipefs_delete_dentry(struct dentry *dentry)
{
return 1;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 7eb1bd7f800..7a76ad57023 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -330,7 +330,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
unsigned long min_flt = 0, maj_flt = 0;
cputime_t cutime, cstime, utime, stime;
unsigned long rsslim = 0;
- DEFINE_KTIME(it_real_value);
struct task_struct *t;
char tcomm[sizeof(task->comm)];
@@ -386,7 +385,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
utime = cputime_add(utime, task->signal->utime);
stime = cputime_add(stime, task->signal->stime);
}
- it_real_value = task->signal->real_timer.expires;
}
ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
read_unlock(&tasklist_lock);
@@ -413,7 +411,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
start_time = nsec_to_clock_t(start_time);
res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
-%lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \
+%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
task->pid,
tcomm,
@@ -435,7 +433,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
priority,
nice,
num_threads,
- (long) ktime_to_clock_t(it_real_value),
start_time,
vsize,
mm ? get_mm_rss(mm) : 0,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 20e5c4509a4..47b7a20d45e 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -19,6 +19,7 @@
#include <linux/idr.h>
#include <linux/namei.h>
#include <linux/bitops.h>
+#include <linux/spinlock.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -29,6 +30,8 @@ static ssize_t proc_file_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos);
static loff_t proc_file_lseek(struct file *, loff_t, int);
+DEFINE_SPINLOCK(proc_subdir_lock);
+
int proc_match(int len, const char *name, struct proc_dir_entry *de)
{
if (de->namelen != len)
@@ -277,7 +280,9 @@ static int xlate_proc_name(const char *name,
const char *cp = name, *next;
struct proc_dir_entry *de;
int len;
+ int rtn = 0;
+ spin_lock(&proc_subdir_lock);
de = &proc_root;
while (1) {
next = strchr(cp, '/');
@@ -289,13 +294,17 @@ static int xlate_proc_name(const char *name,
if (proc_match(len, cp, de))
break;
}
- if (!de)
- return -ENOENT;
+ if (!de) {
+ rtn = -ENOENT;
+ goto out;
+ }
cp += len + 1;
}
*residual = cp;
*ret = de;
- return 0;
+out:
+ spin_unlock(&proc_subdir_lock);
+ return rtn;
}
static DEFINE_IDR(proc_inum_idr);
@@ -380,6 +389,7 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
int error = -ENOENT;
lock_kernel();
+ spin_lock(&proc_subdir_lock);
de = PDE(dir);
if (de) {
for (de = de->subdir; de ; de = de->next) {
@@ -388,12 +398,15 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
unsigned int ino = de->low_ino;
+ spin_unlock(&proc_subdir_lock);
error = -EINVAL;
inode = proc_get_inode(dir->i_sb, ino, de);
+ spin_lock(&proc_subdir_lock);
break;
}
}
}
+ spin_unlock(&proc_subdir_lock);
unlock_kernel();
if (inode) {
@@ -447,11 +460,13 @@ int proc_readdir(struct file * filp,
filp->f_pos++;
/* fall through */
default:
+ spin_lock(&proc_subdir_lock);
de = de->subdir;
i -= 2;
for (;;) {
if (!de) {
ret = 1;
+ spin_unlock(&proc_subdir_lock);
goto out;
}
if (!i)
@@ -461,12 +476,16 @@ int proc_readdir(struct file * filp,
}
do {
+ /* filldir passes info to user space */
+ spin_unlock(&proc_subdir_lock);
if (filldir(dirent, de->name, de->namelen, filp->f_pos,
de->low_ino, de->mode >> 12) < 0)
goto out;
+ spin_lock(&proc_subdir_lock);
filp->f_pos++;
de = de->next;
} while (de);
+ spin_unlock(&proc_subdir_lock);
}
ret = 1;
out: unlock_kernel();
@@ -500,9 +519,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
if (i == 0)
return -EAGAIN;
dp->low_ino = i;
+
+ spin_lock(&proc_subdir_lock);
dp->next = dir->subdir;
dp->parent = dir;
dir->subdir = dp;
+ spin_unlock(&proc_subdir_lock);
+
if (S_ISDIR(dp->mode)) {
if (dp->proc_iops == NULL) {
dp->proc_fops = &proc_dir_operations;
@@ -694,6 +717,8 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
goto out;
len = strlen(fn);
+
+ spin_lock(&proc_subdir_lock);
for (p = &parent->subdir; *p; p=&(*p)->next ) {
if (!proc_match(len, fn, *p))
continue;
@@ -714,6 +739,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
}
break;
}
+ spin_unlock(&proc_subdir_lock);
out:
return;
}
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 9bdd077d6f5..596b4b4f1cc 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -136,9 +136,11 @@ void proc_device_tree_add_node(struct device_node *np,
* properties are quite unimportant for us though, thus we
* simply "skip" them here, but we do have to check.
*/
+ spin_lock(&proc_subdir_lock);
for (ent = de->subdir; ent != NULL; ent = ent->next)
if (!strcmp(ent->name, pp->name))
break;
+ spin_unlock(&proc_subdir_lock);
if (ent != NULL) {
printk(KERN_WARNING "device-tree: property \"%s\" name"
" conflicts with node in %s\n", pp->name,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index d60f6238c66..9857e50f85e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -466,7 +466,6 @@ static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
direct_IO request. */
static int reiserfs_get_blocks_direct_io(struct inode *inode,
sector_t iblock,
- unsigned long max_blocks,
struct buffer_head *bh_result,
int create)
{
@@ -2793,7 +2792,7 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
}
/* clm -- taken from fs/buffer.c:block_invalidate_page */
-static int reiserfs_invalidatepage(struct page *page, unsigned long offset)
+static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
struct inode *inode = page->mapping->host;
@@ -2832,10 +2831,12 @@ static int reiserfs_invalidatepage(struct page *page, unsigned long offset)
* The get_block cached value has been unconditionally invalidated,
* so real IO is not possible anymore.
*/
- if (!offset && ret)
+ if (!offset && ret) {
ret = try_to_release_page(page, 0);
+ /* maybe should BUG_ON(!ret); - neilb */
+ }
out:
- return ret;
+ return;
}
static int reiserfs_set_page_dirty(struct page *page)
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 78b40621b88..27bd3a1df2a 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -143,7 +143,7 @@ static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
char b[BDEVNAME_SIZE];
sprintf(buf,
- "dev %s, size %d, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+ "dev %s, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
bdevname(bh->b_bdev, b), bh->b_size,
(unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
bh->b_state, bh->b_page,
diff --git a/fs/super.c b/fs/super.c
index 8743e9bbb29..a66f66bb804 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -37,6 +37,7 @@
#include <linux/writeback.h> /* for the emergency remount stuff */
#include <linux/idr.h>
#include <linux/kobject.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -380,9 +381,9 @@ restart:
void sync_filesystems(int wait)
{
struct super_block *sb;
- static DECLARE_MUTEX(mutex);
+ static DEFINE_MUTEX(mutex);
- down(&mutex); /* Could be down_interruptible */
+ mutex_lock(&mutex); /* Could be down_interruptible */
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
if (!sb->s_op->sync_fs)
@@ -411,7 +412,7 @@ restart:
goto restart;
}
spin_unlock(&sb_lock);
- up(&mutex);
+ mutex_unlock(&mutex);
}
/**
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 97fc056130e..c02f7c5b746 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1310,20 +1310,21 @@ xfs_get_block(
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_block(inode, iblock, 0, bh_result,
- create, 0, BMAPI_WRITE);
+ return __xfs_get_block(inode, iblock,
+ bh_result->b_size >> inode->i_blkbits,
+ bh_result, create, 0, BMAPI_WRITE);
}
STATIC int
xfs_get_blocks_direct(
struct inode *inode,
sector_t iblock,
- unsigned long max_blocks,
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_block(inode, iblock, max_blocks, bh_result,
- create, 1, BMAPI_WRITE|BMAPI_DIRECT);
+ return __xfs_get_block(inode, iblock,
+ bh_result->b_size >> inode->i_blkbits,
+ bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
}
STATIC void
@@ -1442,14 +1443,14 @@ xfs_vm_readpages(
return mpage_readpages(mapping, pages, nr_pages, xfs_get_block);
}
-STATIC int
+STATIC void
xfs_vm_invalidatepage(
struct page *page,
unsigned long offset)
{
xfs_page_trace(XFS_INVALIDPAGE_ENTER,
page->mapping->host, page, offset);
- return block_invalidatepage(page, offset);
+ block_invalidatepage(page, offset);
}
struct address_space_operations xfs_address_space_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 8355faf8ffd..1884300417e 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -375,9 +375,8 @@ xfs_init_zones(void)
if (!xfs_ioend_zone)
goto out_destroy_vnode_zone;
- xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
- mempool_alloc_slab, mempool_free_slab,
- xfs_ioend_zone);
+ xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
+ xfs_ioend_zone);
if (!xfs_ioend_pool)
goto out_free_ioend_zone;
return 0;