aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/v9fs_vfs.h4
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_super.c5
-rw-r--r--fs/Kconfig56
-rw-r--r--fs/Makefile6
-rw-r--r--fs/adfs/adfs.h2
-rw-r--r--fs/adfs/dir.c2
-rw-r--r--fs/affs/affs.h3
-rw-r--r--fs/affs/amigaffs.c8
-rw-r--r--fs/affs/namei.c4
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/anon_inodes.c2
-rw-r--r--fs/attr.c3
-rw-r--r--fs/autofs/root.c2
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/root.c4
-rw-r--r--fs/bio-integrity.c85
-rw-r--r--fs/bio.c87
-rw-r--r--fs/block_dev.c146
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/buffer.c145
-rw-r--r--fs/cifs/CHANGES11
-rw-r--r--fs/cifs/Kconfig21
-rw-r--r--fs/cifs/README22
-rw-r--r--fs/cifs/cifs_debug.c2
-rw-r--r--fs/cifs/cifs_dfs_ref.c36
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifspdu.h76
-rw-r--r--fs/cifs/cifsproto.h9
-rw-r--r--fs/cifs/cifssmb.c27
-rw-r--r--fs/cifs/connect.c9
-rw-r--r--fs/cifs/dir.c10
-rw-r--r--fs/cifs/file.c199
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/cifs/smbfsctl.h84
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/compat.c28
-rw-r--r--fs/compat_ioctl.c8
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/dcache.c48
-rw-r--r--fs/devpts/inode.c188
-rw-r--r--fs/dlm/dir.c18
-rw-r--r--fs/dlm/dlm_internal.h2
-rw-r--r--fs/dlm/lock.c60
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/dlm/lowcomms.c181
-rw-r--r--fs/dlm/user.c24
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/ecryptfs/dentry.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/eventpoll.c12
-rw-r--r--fs/ext2/balloc.c8
-rw-r--r--fs/ext2/ialloc.c10
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext2/xattr.c8
-rw-r--r--fs/ext3/balloc.c8
-rw-r--r--fs/ext3/ialloc.c12
-rw-r--r--fs/ext3/inode.c24
-rw-r--r--fs/ext3/namei.c6
-rw-r--r--fs/ext3/super.c48
-rw-r--r--fs/ext3/xattr.c6
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/ialloc.c12
-rw-r--r--fs/ext4/inode.c40
-rw-r--r--fs/ext4/mballoc.c46
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/super.c54
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/fat/namei_msdos.c2
-rw-r--r--fs/fat/namei_vfat.c4
-rw-r--r--fs/fcntl.c33
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/gfs2/ops_dentry.c2
-rw-r--r--fs/gfs2/super.h2
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/sysdep.c2
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/dentry.c2
-rw-r--r--fs/inode.c6
-rw-r--r--fs/ioctl.c18
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/jfs/acl.c2
-rw-r--r--fs/jfs/inode.c6
-rw-r--r--fs/jfs/jfs_dtree.c18
-rw-r--r--fs/jfs/jfs_extent.c10
-rw-r--r--fs/jfs/jfs_inode.c4
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/jfs_xtree.c14
-rw-r--r--fs/jfs/namei.c10
-rw-r--r--fs/jfs/xattr.c12
-rw-r--r--fs/libfs.c5
-rw-r--r--fs/namei.c48
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/ncpfs/dir.c4
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/notify/inotify/inotify.c16
-rw-r--r--fs/ocfs2/dcache.c2
-rw-r--r--fs/ocfs2/dcache.h2
-rw-r--r--fs/open.c2
-rw-r--r--fs/partitions/ibm.c101
-rw-r--r--fs/pipe.c23
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/quota/Kconfig59
-rw-r--r--fs/quota/Makefile14
-rw-r--r--fs/quota/dquot.c (renamed from fs/dquot.c)577
-rw-r--r--fs/quota/quota.c (renamed from fs/quota.c)37
-rw-r--r--fs/quota/quota_tree.c (renamed from fs/quota_tree.c)132
-rw-r--r--fs/quota/quota_tree.h (renamed from fs/quota_tree.h)0
-rw-r--r--fs/quota/quota_v1.c (renamed from fs/quota_v1.c)48
-rw-r--r--fs/quota/quota_v2.c (renamed from fs/quota_v2.c)3
-rw-r--r--fs/quota/quotaio_v1.h (renamed from fs/quotaio_v1.h)0
-rw-r--r--fs/quota/quotaio_v2.h (renamed from fs/quotaio_v2.h)0
-rw-r--r--fs/ramfs/file-nommu.c6
-rw-r--r--fs/reiserfs/bitmap.c14
-rw-r--r--fs/reiserfs/inode.c10
-rw-r--r--fs/reiserfs/namei.c6
-rw-r--r--fs/reiserfs/stree.c14
-rw-r--r--fs/reiserfs/super.c60
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/smbfs/dir.c4
-rw-r--r--fs/super.c28
-rw-r--r--fs/sync.c16
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysv/namei.c2
-rw-r--r--fs/sysv/sysv.h2
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/udf/balloc.c14
-rw-r--r--fs/udf/ialloc.c8
-rw-r--r--fs/ufs/balloc.c12
-rw-r--r--fs/ufs/ialloc.c8
-rw-r--r--fs/ufs/inode.c39
-rw-r--r--fs/ufs/namei.c2
-rw-r--r--fs/ufs/super.c11
-rw-r--r--fs/ufs/ufs.h2
148 files changed, 2000 insertions, 1512 deletions
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index c295ba786ed..f0c7de78e20 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -41,8 +41,8 @@ extern struct file_system_type v9fs_fs_type;
extern const struct address_space_operations v9fs_addr_operations;
extern const struct file_operations v9fs_file_operations;
extern const struct file_operations v9fs_dir_operations;
-extern struct dentry_operations v9fs_dentry_operations;
-extern struct dentry_operations v9fs_cached_dentry_operations;
+extern const struct dentry_operations v9fs_dentry_operations;
+extern const struct dentry_operations v9fs_cached_dentry_operations;
struct inode *v9fs_get_inode(struct super_block *sb, int mode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 06dcc7c4f23..d74325295b1 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -104,12 +104,12 @@ void v9fs_dentry_release(struct dentry *dentry)
}
}
-struct dentry_operations v9fs_cached_dentry_operations = {
+const struct dentry_operations v9fs_cached_dentry_operations = {
.d_delete = v9fs_cached_dentry_delete,
.d_release = v9fs_dentry_release,
};
-struct dentry_operations v9fs_dentry_operations = {
+const struct dentry_operations v9fs_dentry_operations = {
.d_delete = v9fs_dentry_delete,
.d_release = v9fs_dentry_release,
};
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 93212e40221..5f8ab8adb5f 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -168,8 +168,9 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
p9stat_free(st);
kfree(st);
-P9_DPRINTK(P9_DEBUG_VFS, " return simple set mount\n");
- return simple_set_mnt(mnt, sb);
+P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
+ simple_set_mnt(mnt, sb);
+ return 0;
release_sb:
if (sb) {
diff --git a/fs/Kconfig b/fs/Kconfig
index 93945dd0b1a..cef8b18ceaa 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -56,61 +56,7 @@ endif # BLOCK
source "fs/notify/Kconfig"
-config QUOTA
- bool "Quota support"
- help
- If you say Y here, you will be able to set per user limits for disk
- usage (also called disk quotas). Currently, it works for the
- ext2, ext3, and reiserfs file system. ext3 also supports journalled
- quotas for which you don't need to run quotacheck(8) after an unclean
- shutdown.
- For further details, read the Quota mini-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, or the documentation provided
- with the quota tools. Probably the quota support is only useful for
- multi user systems. If unsure, say N.
-
-config QUOTA_NETLINK_INTERFACE
- bool "Report quota messages through netlink interface"
- depends on QUOTA && NET
- help
- If you say Y here, quota warnings (about exceeding softlimit, reaching
- hardlimit, etc.) will be reported through netlink interface. If unsure,
- say Y.
-
-config PRINT_QUOTA_WARNING
- bool "Print quota warnings to console (OBSOLETE)"
- depends on QUOTA
- default y
- help
- If you say Y here, quota warnings (about exceeding softlimit, reaching
- hardlimit, etc.) will be printed to the process' controlling terminal.
- Note that this behavior is currently deprecated and may go away in
- future. Please use notification via netlink socket instead.
-
-# Generic support for tree structured quota files. Seleted when needed.
-config QUOTA_TREE
- tristate
-
-config QFMT_V1
- tristate "Old quota format support"
- depends on QUOTA
- help
- This quota format was (is) used by kernels earlier than 2.4.22. If
- you have quota working and you don't want to convert to new quota
- format say Y here.
-
-config QFMT_V2
- tristate "Quota format v2 support"
- depends on QUOTA
- select QUOTA_TREE
- help
- This quota format allows using quotas with 32-bit UIDs/GIDs. If you
- need this functionality say Y here.
-
-config QUOTACTL
- bool
- depends on XFS_QUOTA || QUOTA
- default y
+source "fs/quota/Kconfig"
source "fs/autofs/Kconfig"
source "fs/autofs4/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index dc20db34867..6e82a307bcd 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -51,11 +51,7 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o
obj-$(CONFIG_NFS_COMMON) += nfs_common/
obj-$(CONFIG_GENERIC_ACL) += generic_acl.o
-obj-$(CONFIG_QUOTA) += dquot.o
-obj-$(CONFIG_QFMT_V1) += quota_v1.o
-obj-$(CONFIG_QFMT_V2) += quota_v2.o
-obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
-obj-$(CONFIG_QUOTACTL) += quota.o
+obj-y += quota/
obj-$(CONFIG_PROC_FS) += proc/
obj-y += partitions/
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 831157502d5..e0a85dbeeb8 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -86,7 +86,7 @@ void __adfs_error(struct super_block *sb, const char *function,
/* dir_*.c */
extern const struct inode_operations adfs_dir_inode_operations;
extern const struct file_operations adfs_dir_operations;
-extern struct dentry_operations adfs_dentry_operations;
+extern const struct dentry_operations adfs_dentry_operations;
extern struct adfs_dir_ops adfs_f_dir_ops;
extern struct adfs_dir_ops adfs_fplus_dir_ops;
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index 85a30e92980..e867ccf3724 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -263,7 +263,7 @@ adfs_compare(struct dentry *parent, struct qstr *entry, struct qstr *name)
return 0;
}
-struct dentry_operations adfs_dentry_operations = {
+const struct dentry_operations adfs_dentry_operations = {
.d_hash = adfs_hash,
.d_compare = adfs_compare,
};
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e9ec915f755..1a2d5e3c7f4 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -199,8 +199,7 @@ extern const struct address_space_operations affs_symlink_aops;
extern const struct address_space_operations affs_aops;
extern const struct address_space_operations affs_aops_ofs;
-extern struct dentry_operations affs_dentry_operations;
-extern struct dentry_operations affs_dentry_operations_intl;
+extern const struct dentry_operations affs_dentry_operations;
static inline void
affs_set_blocksize(struct super_block *sb, int size)
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 805573005de..7d0f0a30f7a 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -179,14 +179,18 @@ affs_remove_link(struct dentry *dentry)
affs_lock_dir(dir);
affs_fix_dcache(dentry, link_ino);
retval = affs_remove_hash(dir, link_bh);
- if (retval)
+ if (retval) {
+ affs_unlock_dir(dir);
goto done;
+ }
mark_buffer_dirty_inode(link_bh, inode);
memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32);
retval = affs_insert_hash(dir, bh);
- if (retval)
+ if (retval) {
+ affs_unlock_dir(dir);
goto done;
+ }
mark_buffer_dirty_inode(bh, inode);
affs_unlock_dir(dir);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index cfcf1b6cf82..960d336ec69 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -19,12 +19,12 @@ static int affs_intl_toupper(int ch);
static int affs_intl_hash_dentry(struct dentry *, struct qstr *);
static int affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
-struct dentry_operations affs_dentry_operations = {
+const struct dentry_operations affs_dentry_operations = {
.d_hash = affs_hash_dentry,
.d_compare = affs_compare_dentry,
};
-static struct dentry_operations affs_intl_dentry_operations = {
+static const struct dentry_operations affs_intl_dentry_operations = {
.d_hash = affs_intl_hash_dentry,
.d_compare = affs_intl_compare_dentry,
};
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 99cf390641f..9bd757774c9 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -62,7 +62,7 @@ const struct inode_operations afs_dir_inode_operations = {
.setattr = afs_setattr,
};
-static struct dentry_operations afs_fs_dentry_operations = {
+static const struct dentry_operations afs_fs_dentry_operations = {
.d_revalidate = afs_d_revalidate,
.d_delete = afs_d_delete,
.d_release = afs_d_release,
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 3bbdb9d0237..1dd96d4406c 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -48,7 +48,7 @@ static struct file_system_type anon_inode_fs_type = {
.get_sb = anon_inodefs_get_sb,
.kill_sb = kill_anon_super,
};
-static struct dentry_operations anon_inodefs_dentry_operations = {
+static const struct dentry_operations anon_inodefs_dentry_operations = {
.d_delete = anon_inodefs_delete_dentry,
};
diff --git a/fs/attr.c b/fs/attr.c
index f4360192a93..9fe1b1bd30a 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -173,7 +173,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
if (!error) {
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
- error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+ error = vfs_dq_transfer(inode, attr) ?
+ -EDQUOT : 0;
if (!error)
error = inode_setattr(inode, attr);
}
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 8aacade5695..4a1401cea0a 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -192,7 +192,7 @@ static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd)
return 1;
}
-static struct dentry_operations autofs_dentry_operations = {
+static const struct dentry_operations autofs_dentry_operations = {
.d_revalidate = autofs_revalidate,
};
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 716e12b627b..69c8142da83 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -310,7 +310,7 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi)
return ino;
}
-static struct dentry_operations autofs4_sb_dentry_operations = {
+static const struct dentry_operations autofs4_sb_dentry_operations = {
.d_release = autofs4_dentry_release,
};
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2a41c2a7fc5..74b1469a950 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -349,13 +349,13 @@ void autofs4_dentry_release(struct dentry *de)
}
/* For dentries of directories in the root dir */
-static struct dentry_operations autofs4_root_dentry_operations = {
+static const struct dentry_operations autofs4_root_dentry_operations = {
.d_revalidate = autofs4_revalidate,
.d_release = autofs4_dentry_release,
};
/* For other dentries */
-static struct dentry_operations autofs4_dentry_operations = {
+static const struct dentry_operations autofs4_dentry_operations = {
.d_revalidate = autofs4_revalidate,
.d_release = autofs4_dentry_release,
};
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index fe2b1aa2464..31c46a241ba 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -26,23 +26,23 @@
#include <linux/workqueue.h>
static struct kmem_cache *bio_integrity_slab __read_mostly;
+static mempool_t *bio_integrity_pool;
+static struct bio_set *integrity_bio_set;
static struct workqueue_struct *kintegrityd_wq;
/**
- * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
+ * bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
* @gfp_mask: Memory allocation mask
* @nr_vecs: Number of integrity metadata scatter-gather elements
- * @bs: bio_set to allocate from
*
* Description: This function prepares a bio for attaching integrity
* metadata. nr_vecs specifies the maximum number of pages containing
* integrity metadata that can be attached.
*/
-struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
- gfp_t gfp_mask,
- unsigned int nr_vecs,
- struct bio_set *bs)
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs)
{
struct bio_integrity_payload *bip;
struct bio_vec *iv;
@@ -50,7 +50,7 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
BUG_ON(bio == NULL);
- bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
+ bip = mempool_alloc(bio_integrity_pool, gfp_mask);
if (unlikely(bip == NULL)) {
printk(KERN_ERR "%s: could not alloc bip\n", __func__);
return NULL;
@@ -58,10 +58,10 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
memset(bip, 0, sizeof(*bip));
- iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, bs);
+ iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, integrity_bio_set);
if (unlikely(iv == NULL)) {
printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__);
- mempool_free(bip, bs->bio_integrity_pool);
+ mempool_free(bip, bio_integrity_pool);
return NULL;
}
@@ -72,35 +72,16 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
return bip;
}
-EXPORT_SYMBOL(bio_integrity_alloc_bioset);
-
-/**
- * bio_integrity_alloc - Allocate integrity payload and attach it to bio
- * @bio: bio to attach integrity metadata to
- * @gfp_mask: Memory allocation mask
- * @nr_vecs: Number of integrity metadata scatter-gather elements
- *
- * Description: This function prepares a bio for attaching integrity
- * metadata. nr_vecs specifies the maximum number of pages containing
- * integrity metadata that can be attached.
- */
-struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
- gfp_t gfp_mask,
- unsigned int nr_vecs)
-{
- return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
-}
EXPORT_SYMBOL(bio_integrity_alloc);
/**
* bio_integrity_free - Free bio integrity payload
* @bio: bio containing bip to be freed
- * @bs: bio_set this bio was allocated from
*
* Description: Used to free the integrity portion of a bio. Usually
* called from bio_free().
*/
-void bio_integrity_free(struct bio *bio, struct bio_set *bs)
+void bio_integrity_free(struct bio *bio)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
@@ -111,8 +92,8 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs)
&& bip->bip_buf != NULL)
kfree(bip->bip_buf);
- bvec_free_bs(bs, bip->bip_vec, bip->bip_pool);
- mempool_free(bip, bs->bio_integrity_pool);
+ bvec_free_bs(integrity_bio_set, bip->bip_vec, bip->bip_pool);
+ mempool_free(bip, bio_integrity_pool);
bio->bi_integrity = NULL;
}
@@ -686,19 +667,17 @@ EXPORT_SYMBOL(bio_integrity_split);
* @bio: New bio
* @bio_src: Original bio
* @gfp_mask: Memory allocation mask
- * @bs: bio_set to allocate bip from
*
* Description: Called to allocate a bip when cloning a bio
*/
-int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
- gfp_t gfp_mask, struct bio_set *bs)
+int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask)
{
struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
struct bio_integrity_payload *bip;
BUG_ON(bip_src == NULL);
- bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
+ bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
if (bip == NULL)
return -EIO;
@@ -714,37 +693,25 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
}
EXPORT_SYMBOL(bio_integrity_clone);
-int bioset_integrity_create(struct bio_set *bs, int pool_size)
+static int __init bio_integrity_init(void)
{
- bs->bio_integrity_pool = mempool_create_slab_pool(pool_size,
- bio_integrity_slab);
- if (!bs->bio_integrity_pool)
- return -1;
-
- return 0;
-}
-EXPORT_SYMBOL(bioset_integrity_create);
+ kintegrityd_wq = create_workqueue("kintegrityd");
-void bioset_integrity_free(struct bio_set *bs)
-{
- if (bs->bio_integrity_pool)
- mempool_destroy(bs->bio_integrity_pool);
-}
-EXPORT_SYMBOL(bioset_integrity_free);
+ if (!kintegrityd_wq)
+ panic("Failed to create kintegrityd\n");
-void __init bio_integrity_init_slab(void)
-{
bio_integrity_slab = KMEM_CACHE(bio_integrity_payload,
SLAB_HWCACHE_ALIGN|SLAB_PANIC);
-}
-static int __init integrity_init(void)
-{
- kintegrityd_wq = create_workqueue("kintegrityd");
+ bio_integrity_pool = mempool_create_slab_pool(BIO_POOL_SIZE,
+ bio_integrity_slab);
+ if (!bio_integrity_pool)
+ panic("bio_integrity: can't allocate bip pool\n");
- if (!kintegrityd_wq)
- panic("Failed to create kintegrityd\n");
+ integrity_bio_set = bioset_create(BIO_POOL_SIZE, 0);
+ if (!integrity_bio_set)
+ panic("bio_integrity: can't allocate bio_set\n");
return 0;
}
-subsys_initcall(integrity_init);
+subsys_initcall(bio_integrity_init);
diff --git a/fs/bio.c b/fs/bio.c
index d4f06327c81..a040cde7f6f 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -248,7 +248,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
if (bio_integrity(bio))
- bio_integrity_free(bio, bs);
+ bio_integrity_free(bio);
/*
* If we have front padding, adjust the bio pointer before freeing
@@ -301,48 +301,51 @@ void bio_init(struct bio *bio)
**/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
+ struct bio_vec *bvl = NULL;
struct bio *bio = NULL;
- void *uninitialized_var(p);
+ unsigned long idx = 0;
+ void *p = NULL;
if (bs) {
p = mempool_alloc(bs->bio_pool, gfp_mask);
-
- if (p)
- bio = p + bs->front_pad;
- } else
+ if (!p)
+ goto err;
+ bio = p + bs->front_pad;
+ } else {
bio = kmalloc(sizeof(*bio), gfp_mask);
+ if (!bio)
+ goto err;
+ }
- if (likely(bio)) {
- struct bio_vec *bvl = NULL;
-
- bio_init(bio);
- if (likely(nr_iovecs)) {
- unsigned long uninitialized_var(idx);
-
- if (nr_iovecs <= BIO_INLINE_VECS) {
- idx = 0;
- bvl = bio->bi_inline_vecs;
- nr_iovecs = BIO_INLINE_VECS;
- } else {
- bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx,
- bs);
- nr_iovecs = bvec_nr_vecs(idx);
- }
- if (unlikely(!bvl)) {
- if (bs)
- mempool_free(p, bs->bio_pool);
- else
- kfree(bio);
- bio = NULL;
- goto out;
- }
- bio->bi_flags |= idx << BIO_POOL_OFFSET;
- bio->bi_max_vecs = nr_iovecs;
- }
- bio->bi_io_vec = bvl;
+ bio_init(bio);
+
+ if (unlikely(!nr_iovecs))
+ goto out_set;
+
+ if (nr_iovecs <= BIO_INLINE_VECS) {
+ bvl = bio->bi_inline_vecs;
+ nr_iovecs = BIO_INLINE_VECS;
+ } else {
+ bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
+ if (unlikely(!bvl))
+ goto err_free;
+
+ nr_iovecs = bvec_nr_vecs(idx);
}
-out:
+ bio->bi_flags |= idx << BIO_POOL_OFFSET;
+ bio->bi_max_vecs = nr_iovecs;
+out_set:
+ bio->bi_io_vec = bvl;
+
return bio;
+
+err_free:
+ if (bs)
+ mempool_free(p, bs->bio_pool);
+ else
+ kfree(bio);
+err:
+ return NULL;
}
struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
@@ -463,7 +466,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
if (bio_integrity(bio)) {
int ret;
- ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
+ ret = bio_integrity_clone(b, bio, gfp_mask);
if (ret < 0) {
bio_put(b);
@@ -1526,7 +1529,6 @@ void bioset_free(struct bio_set *bs)
if (bs->bio_pool)
mempool_destroy(bs->bio_pool);
- bioset_integrity_free(bs);
biovec_free_pools(bs);
bio_put_slab(bs);
@@ -1567,9 +1569,6 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
if (!bs->bio_pool)
goto bad;
- if (bioset_integrity_create(bs, pool_size))
- goto bad;
-
if (!biovec_create_pools(bs, pool_size))
return bs;
@@ -1586,6 +1585,13 @@ static void __init biovec_init_slabs(void)
int size;
struct biovec_slab *bvs = bvec_slabs + i;
+#ifndef CONFIG_BLK_DEV_INTEGRITY
+ if (bvs->nr_vecs <= BIO_INLINE_VECS) {
+ bvs->slab = NULL;
+ continue;
+ }
+#endif
+
size = bvs->nr_vecs * sizeof(struct bio_vec);
bvs->slab = kmem_cache_create(bvs->name, size, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
@@ -1600,7 +1606,6 @@ static int __init init_bio(void)
if (!bio_slabs)
panic("bio: can't allocate bios\n");
- bio_integrity_init_slab();
biovec_init_slabs();
fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b3c1efff5e1..8c3c6899ccf 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/blkpg.h>
#include <linux/buffer_head.h>
+#include <linux/pagevec.h>
#include <linux/writeback.h>
#include <linux/mpage.h>
#include <linux/mount.h>
@@ -174,6 +175,151 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
iov, offset, nr_segs, blkdev_get_blocks, NULL);
}
+/*
+ * Write out and wait upon all the dirty data associated with a block
+ * device via its mapping. Does not take the superblock lock.
+ */
+int sync_blockdev(struct block_device *bdev)
+{
+ int ret = 0;
+
+ if (bdev)
+ ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
+ return ret;
+}
+EXPORT_SYMBOL(sync_blockdev);
+
+/*
+ * Write out and wait upon all dirty data associated with this
+ * device. Filesystem data as well as the underlying block
+ * device. Takes the superblock lock.
+ */
+int fsync_bdev(struct block_device *bdev)
+{
+ struct super_block *sb = get_super(bdev);
+ if (sb) {
+ int res = fsync_super(sb);
+ drop_super(sb);
+ return res;
+ }
+ return sync_blockdev(bdev);
+}
+
+/**
+ * freeze_bdev -- lock a filesystem and force it into a consistent state
+ * @bdev: blockdevice to lock
+ *
+ * This takes the block device bd_mount_sem to make sure no new mounts
+ * happen on bdev until thaw_bdev() is called.
+ * If a superblock is found on this device, we take the s_umount semaphore
+ * on it to make sure nobody unmounts until the snapshot creation is done.
+ * The reference counter (bd_fsfreeze_count) guarantees that only the last
+ * unfreeze process can unfreeze the frozen filesystem actually when multiple
+ * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
+ * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
+ * actually.
+ */
+struct super_block *freeze_bdev(struct block_device *bdev)
+{
+ struct super_block *sb;
+ int error = 0;
+
+ mutex_lock(&bdev->bd_fsfreeze_mutex);
+ if (bdev->bd_fsfreeze_count > 0) {
+ bdev->bd_fsfreeze_count++;
+ sb = get_super(bdev);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return sb;
+ }
+ bdev->bd_fsfreeze_count++;
+
+ down(&bdev->bd_mount_sem);
+ sb = get_super(bdev);
+ if (sb && !(sb->s_flags & MS_RDONLY)) {
+ sb->s_frozen = SB_FREEZE_WRITE;
+ smp_wmb();
+
+ __fsync_super(sb);
+
+ sb->s_frozen = SB_FREEZE_TRANS;
+ smp_wmb();
+
+ sync_blockdev(sb->s_bdev);
+
+ if (sb->s_op->freeze_fs) {
+ error = sb->s_op->freeze_fs(sb);
+ if (error) {
+ printk(KERN_ERR
+ "VFS:Filesystem freeze failed\n");
+ sb->s_frozen = SB_UNFROZEN;
+ drop_super(sb);
+ up(&bdev->bd_mount_sem);
+ bdev->bd_fsfreeze_count--;
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return ERR_PTR(error);
+ }
+ }
+ }
+
+ sync_blockdev(bdev);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+
+ return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
+}
+EXPORT_SYMBOL(freeze_bdev);
+
+/**
+ * thaw_bdev -- unlock filesystem
+ * @bdev: blockdevice to unlock
+ * @sb: associated superblock
+ *
+ * Unlocks the filesystem and marks it writeable again after freeze_bdev().
+ */
+int thaw_bdev(struct block_device *bdev, struct super_block *sb)
+{
+ int error = 0;
+
+ mutex_lock(&bdev->bd_fsfreeze_mutex);
+ if (!bdev->bd_fsfreeze_count) {
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return -EINVAL;
+ }
+
+ bdev->bd_fsfreeze_count--;
+ if (bdev->bd_fsfreeze_count > 0) {
+ if (sb)
+ drop_super(sb);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return 0;
+ }
+
+ if (sb) {
+ BUG_ON(sb->s_bdev != bdev);
+ if (!(sb->s_flags & MS_RDONLY)) {
+ if (sb->s_op->unfreeze_fs) {
+ error = sb->s_op->unfreeze_fs(sb);
+ if (error) {
+ printk(KERN_ERR
+ "VFS:Filesystem thaw failed\n");
+ sb->s_frozen = SB_FREEZE_TRANS;
+ bdev->bd_fsfreeze_count++;
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return error;
+ }
+ }
+ sb->s_frozen = SB_UNFROZEN;
+ smp_wmb();
+ wake_up(&sb->s_wait_unfrozen);
+ }
+ drop_super(sb);
+ }
+
+ up(&bdev->bd_mount_sem);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(thaw_bdev);
+
static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, blkdev_get_block, wbc);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3e18175248e..6ec80c0fc86 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2385,7 +2385,7 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
unsigned long thresh = 32 * 1024 * 1024;
tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
- if (current_is_pdflush() || current->flags & PF_MEMALLOC)
+ if (current->flags & PF_MEMALLOC)
return;
num_dirty = count_range_bits(tree, &start, (u64)-1,
diff --git a/fs/buffer.c b/fs/buffer.c
index 891e1c78e4f..a2fd743d97c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -166,151 +166,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
}
/*
- * Write out and wait upon all the dirty data associated with a block
- * device via its mapping. Does not take the superblock lock.
- */
-int sync_blockdev(struct block_device *bdev)
-{
- int ret = 0;
-
- if (bdev)
- ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
- return ret;
-}
-EXPORT_SYMBOL(sync_blockdev);
-
-/*
- * Write out and wait upon all dirty data associated with this
- * device. Filesystem data as well as the underlying block
- * device. Takes the superblock lock.
- */
-int fsync_bdev(struct block_device *bdev)
-{
- struct super_block *sb = get_super(bdev);
- if (sb) {
- int res = fsync_super(sb);
- drop_super(sb);
- return res;
- }
- return sync_blockdev(bdev);
-}
-
-/**
- * freeze_bdev -- lock a filesystem and force it into a consistent state
- * @bdev: blockdevice to lock
- *
- * This takes the block device bd_mount_sem to make sure no new mounts
- * happen on bdev until thaw_bdev() is called.
- * If a superblock is found on this device, we take the s_umount semaphore
- * on it to make sure nobody unmounts until the snapshot creation is done.
- * The reference counter (bd_fsfreeze_count) guarantees that only the last
- * unfreeze process can unfreeze the frozen filesystem actually when multiple
- * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
- * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
- * actually.
- */
-struct super_block *freeze_bdev(struct block_device *bdev)
-{
- struct super_block *sb;
- int error = 0;
-
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (bdev->bd_fsfreeze_count > 0) {
- bdev->bd_fsfreeze_count++;
- sb = get_super(bdev);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return sb;
- }
- bdev->bd_fsfreeze_count++;
-
- down(&bdev->bd_mount_sem);
- sb = get_super(bdev);
- if (sb && !(sb->s_flags & MS_RDONLY)) {
- sb->s_frozen = SB_FREEZE_WRITE;
- smp_wmb();
-
- __fsync_super(sb);
-
- sb->s_frozen = SB_FREEZE_TRANS;
- smp_wmb();
-
- sync_blockdev(sb->s_bdev);
-
- if (sb->s_op->freeze_fs) {
- error = sb->s_op->freeze_fs(sb);
- if (error) {
- printk(KERN_ERR
- "VFS:Filesystem freeze failed\n");
- sb->s_frozen = SB_UNFROZEN;
- drop_super(sb);
- up(&bdev->bd_mount_sem);
- bdev->bd_fsfreeze_count--;
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return ERR_PTR(error);
- }
- }
- }
-
- sync_blockdev(bdev);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
-
- return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
-}
-EXPORT_SYMBOL(freeze_bdev);
-
-/**
- * thaw_bdev -- unlock filesystem
- * @bdev: blockdevice to unlock
- * @sb: associated superblock
- *
- * Unlocks the filesystem and marks it writeable again after freeze_bdev().
- */
-int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
- int error = 0;
-
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (!bdev->bd_fsfreeze_count) {
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return -EINVAL;
- }
-
- bdev->bd_fsfreeze_count--;
- if (bdev->bd_fsfreeze_count > 0) {
- if (sb)
- drop_super(sb);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return 0;
- }
-
- if (sb) {
- BUG_ON(sb->s_bdev != bdev);
- if (!(sb->s_flags & MS_RDONLY)) {
- if (sb->s_op->unfreeze_fs) {
- error = sb->s_op->unfreeze_fs(sb);
- if (error) {
- printk(KERN_ERR
- "VFS:Filesystem thaw failed\n");
- sb->s_frozen = SB_FREEZE_TRANS;
- bdev->bd_fsfreeze_count++;
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return error;
- }
- }
- sb->s_frozen = SB_UNFROZEN;
- smp_wmb();
- wake_up(&sb->s_wait_unfrozen);
- }
- drop_super(sb);
- }
-
- up(&bdev->bd_mount_sem);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return 0;
-}
-EXPORT_SYMBOL(thaw_bdev);
-
-/*
* Various filesystems appear to want __find_get_block to be non-blocking.
* But it's the page lock which protects the buffers. To get around this,
* we get exclusion from try_to_free_buffers with the blockdev mapping's
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 851388fafc7..65984006192 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -6,7 +6,16 @@ the server to treat subsequent connections, especially those that
are authenticated as guest, as reconnections, invalidating the earlier
user's smb session. This fix allows cifs to mount multiple times to the
same server with different userids without risking invalidating earlier
-established security contexts.
+established security contexts. fsync now sends SMB Flush operation
+to better ensure that we wait for server to write all of the data to
+server disk (not just write it over the network). Add new mount
+parameter to allow user to disable sending the (slow) SMB flush on
+fsync if desired (fsync still flushes all cached write data to the server).
+Posix file open support added (turned off after one attempt if server
+fails to support it properly, as with Samba server versions prior to 3.3.2)
+Fix "redzone overwritten" bug in cifs_put_tcon (CIFSTcon may allocate too
+little memory for the "nativeFileSystem" field returned by the server
+during mount).
Version 1.56
------------
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 341a98965bd..6994a0f54f0 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -118,6 +118,18 @@ config CIFS_DEBUG2
option can be turned off unless you are debugging
cifs problems. If unsure, say N.
+config CIFS_DFS_UPCALL
+ bool "DFS feature support"
+ depends on CIFS && KEYS
+ help
+ Distributed File System (DFS) support is used to access shares
+ transparently in an enterprise name space, even if the share
+ moves to a different server. This feature also enables
+ an upcall mechanism for CIFS which contacts userspace helper
+ utilities to provide server name resolution (host names to
+ IP addresses) which is needed for implicit mounts of DFS junction
+ points. If unsure, say N.
+
config CIFS_EXPERIMENTAL
bool "CIFS Experimental Features (EXPERIMENTAL)"
depends on CIFS && EXPERIMENTAL
@@ -131,12 +143,3 @@ config CIFS_EXPERIMENTAL
(which is disabled by default). See the file fs/cifs/README
for more details. If unsure, say N.
-config CIFS_DFS_UPCALL
- bool "DFS feature support (EXPERIMENTAL)"
- depends on CIFS_EXPERIMENTAL
- depends on KEYS
- help
- Enables an upcall mechanism for CIFS which contacts userspace
- helper utilities to provide server name resolution (host names to
- IP addresses) which is needed for implicit mounts of DFS junction
- points. If unsure, say N.
diff --git a/fs/cifs/README b/fs/cifs/README
index da4515e3be2..07434181623 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -472,6 +472,19 @@ A partial list of the supported mount options follows:
even if the cifs server would support posix advisory locks.
"forcemand" is accepted as a shorter form of this mount
option.
+ nostrictsync If this mount option is set, when an application does an
+ fsync call then the cifs client does not send an SMB Flush
+ to the server (to force the server to write all dirty data
+ for this file immediately to disk), although cifs still sends
+ all dirty (cached) file data to the server and waits for the
+ server to respond to the write. Since SMB Flush can be
+ very slow, and some servers may be reliable enough (to risk
+ delaying slightly flushing the data to disk on the server),
+ turning on this option may be useful to improve performance for
+ applications that fsync too much, at a small risk of server
+ crash. If this mount option is not set, by default cifs will
+ send an SMB flush request (and wait for a response) on every
+ fsync call.
nodfs Disable DFS (global name space support) even if the
server claims to support it. This can help work around
a problem with parsing of DFS paths with Samba server
@@ -692,13 +705,14 @@ require this helper. Note that NTLMv2 security (which does not require the
cifs.upcall helper program), instead of using Kerberos, is sufficient for
some use cases.
-Enabling DFS support (used to access shares transparently in an MS-DFS
-global name space) requires that CONFIG_CIFS_EXPERIMENTAL be enabled. In
-addition, DFS support for target shares which are specified as UNC
+DFS support allows transparent redirection to shares in an MS-DFS name space.
+In addition, DFS support for target shares which are specified as UNC
names which begin with host names (rather than IP addresses) requires
a user space helper (such as cifs.upcall) to be present in order to
translate host names to ip address, and the user space helper must also
-be configured in the file /etc/request-key.conf
+be configured in the file /etc/request-key.conf. Samba, Windows servers and
+many NAS appliances support DFS as a way of constructing a global name
+space to ease network configuration and improve reliability.
To use cifs Kerberos and DFS support, the Linux keyutils package should be
installed and something like the following lines should be added to the
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 490e34bbf27..877e4d9a115 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -340,6 +340,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\nWrites: %d Bytes: %lld",
atomic_read(&tcon->num_writes),
(long long)(tcon->bytes_written));
+ seq_printf(m, "\nFlushes: %d",
+ atomic_read(&tcon->num_flushes));
seq_printf(m, "\nLocks: %d HardLinks: %d "
"Symlinks: %d",
atomic_read(&tcon->num_locks),
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 85c0a74d034..5fdbf8a1447 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -104,9 +104,9 @@ static char *cifs_get_share_name(const char *node_name)
/**
- * compose_mount_options - creates mount options for refferral
+ * cifs_compose_mount_options - creates mount options for refferral
* @sb_mountdata: parent/root DFS mount options (template)
- * @dentry: point where we are going to mount
+ * @fullpath: full path in UNC format
* @ref: server's referral
* @devname: pointer for saving device name
*
@@ -116,8 +116,8 @@ static char *cifs_get_share_name(const char *node_name)
* Returns: pointer to new mount options or ERR_PTR.
* Caller is responcible for freeing retunrned value if it is not error.
*/
-static char *compose_mount_options(const char *sb_mountdata,
- struct dentry *dentry,
+char *cifs_compose_mount_options(const char *sb_mountdata,
+ const char *fullpath,
const struct dfs_info3_param *ref,
char **devname)
{
@@ -128,7 +128,6 @@ static char *compose_mount_options(const char *sb_mountdata,
char *srvIP = NULL;
char sep = ',';
int off, noff;
- char *fullpath;
if (sb_mountdata == NULL)
return ERR_PTR(-EINVAL);
@@ -202,17 +201,6 @@ static char *compose_mount_options(const char *sb_mountdata,
goto compose_mount_options_err;
}
- /*
- * this function gives us a path with a double backslash prefix. We
- * require a single backslash for DFS. Temporarily increment fullpath
- * to put it in the proper form and decrement before freeing it.
- */
- fullpath = build_path_from_dentry(dentry);
- if (!fullpath) {
- rc = -ENOMEM;
- goto compose_mount_options_err;
- }
- ++fullpath;
tkn_e = strchr(tkn_e + 1, '\\');
if (tkn_e || (strlen(fullpath) - ref->path_consumed)) {
strncat(mountdata, &sep, 1);
@@ -221,8 +209,6 @@ static char *compose_mount_options(const char *sb_mountdata,
strcat(mountdata, tkn_e + 1);
strcat(mountdata, fullpath + ref->path_consumed);
}
- --fullpath;
- kfree(fullpath);
/*cFYI(1,("%s: parent mountdata: %s", __func__,sb_mountdata));*/
/*cFYI(1, ("%s: submount mountdata: %s", __func__, mountdata ));*/
@@ -245,10 +231,20 @@ static struct vfsmount *cifs_dfs_do_refmount(const struct vfsmount *mnt_parent,
struct vfsmount *mnt;
char *mountdata;
char *devname = NULL;
+ char *fullpath;
cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
- mountdata = compose_mount_options(cifs_sb->mountdata,
- dentry, ref, &devname);
+ /*
+ * this function gives us a path with a double backslash prefix. We
+ * require a single backslash for DFS.
+ */
+ fullpath = build_path_from_dentry(dentry);
+ if (!fullpath)
+ return ERR_PTR(-ENOMEM);
+
+ mountdata = cifs_compose_mount_options(cifs_sb->mountdata,
+ fullpath + 1, ref, &devname);
+ kfree(fullpath);
if (IS_ERR(mountdata))
return (struct vfsmount *)mountdata;
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index c4c306f7b06..4797787c6a4 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -32,6 +32,7 @@
#define CIFS_MOUNT_OVERR_GID 0x800 /* override gid returned from server */
#define CIFS_MOUNT_DYNPERM 0x1000 /* allow in-memory only mode setting */
#define CIFS_MOUNT_NOPOSIXBRL 0x2000 /* mandatory not posix byte range lock */
+#define CIFS_MOUNT_NOSSYNC 0x4000 /* don't do slow SMBflush on every sync*/
struct cifs_sb_info {
struct cifsTconInfo *tcon; /* primary mount */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 13ea53251dc..38491fd3871 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -606,7 +606,8 @@ cifs_get_sb(struct file_system_type *fs_type,
return rc;
}
sb->s_flags |= MS_ACTIVE;
- return simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+ return 0;
}
static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 2b1d28a9ee2..77e190dc288 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -78,8 +78,8 @@ extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
/* Functions related to dir entries */
-extern struct dentry_operations cifs_dentry_ops;
-extern struct dentry_operations cifs_ci_dentry_ops;
+extern const struct dentry_operations cifs_dentry_ops;
+extern const struct dentry_operations cifs_ci_dentry_ops;
/* Functions related to symlinks */
extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index e004f6db5fc..9fbf4dff5da 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -254,6 +254,7 @@ struct cifsTconInfo {
atomic_t num_smbs_sent;
atomic_t num_writes;
atomic_t num_reads;
+ atomic_t num_flushes;
atomic_t num_oplock_brks;
atomic_t num_opens;
atomic_t num_closes;
@@ -298,6 +299,7 @@ struct cifsTconInfo {
bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol
for this mount even if server would support */
bool local_lease:1; /* check leases (only) on local system not remote */
+ bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
bool need_reconnect:1; /* connection reset, tid now invalid */
/* BB add field for back pointer to sb struct(s)? */
};
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index b4e2e9f0ee3..b370489c8da 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifspdu.h
*
- * Copyright (c) International Business Machines Corp., 2002,2008
+ * Copyright (c) International Business Machines Corp., 2002,2009
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -23,6 +23,7 @@
#define _CIFSPDU_H
#include <net/sock.h>
+#include "smbfsctl.h"
#ifdef CONFIG_CIFS_WEAK_PW_HASH
#define LANMAN_PROT 0
@@ -34,15 +35,15 @@
#define POSIX_PROT (CIFS_PROT+1)
#define BAD_PROT 0xFFFF
-/* SMB command codes */
-/*
- * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
+/* SMB command codes:
+ * Note some commands have minimal (wct=0,bcc=0), or uninteresting, responses
* (ie which include no useful data other than the SMB error code itself).
- * Knowing this helps avoid response buffer allocations and copy in some cases
+ * This can allow us to avoid response buffer allocations and copy in some cases
*/
#define SMB_COM_CREATE_DIRECTORY 0x00 /* trivial response */
#define SMB_COM_DELETE_DIRECTORY 0x01 /* trivial response */
#define SMB_COM_CLOSE 0x04 /* triv req/rsp, timestamp ignored */
+#define SMB_COM_FLUSH 0x05 /* triv req/rsp */
#define SMB_COM_DELETE 0x06 /* trivial response */
#define SMB_COM_RENAME 0x07 /* trivial response */
#define SMB_COM_QUERY_INFORMATION 0x08 /* aka getattr */
@@ -790,6 +791,12 @@ typedef struct smb_com_close_rsp {
__u16 ByteCount; /* bct = 0 */
} __attribute__((packed)) CLOSE_RSP;
+typedef struct smb_com_flush_req {
+ struct smb_hdr hdr; /* wct = 1 */
+ __u16 FileID;
+ __u16 ByteCount; /* 0 */
+} __attribute__((packed)) FLUSH_REQ;
+
typedef struct smb_com_findclose_req {
struct smb_hdr hdr; /* wct = 1 */
__u16 FileID;
@@ -1924,19 +1931,19 @@ typedef struct smb_com_transaction2_get_dfs_refer_req {
#define DFS_TYPE_ROOT 0x0001
/* Referral Entry Flags */
-#define DFS_NAME_LIST_REF 0x0200
+#define DFS_NAME_LIST_REF 0x0200 /* set for domain or DC referral responses */
+#define DFS_TARGET_SET_BOUNDARY 0x0400 /* only valid with version 4 dfs req */
-typedef struct dfs_referral_level_3 {
- __le16 VersionNumber;
+typedef struct dfs_referral_level_3 { /* version 4 is same, + one flag bit */
+ __le16 VersionNumber; /* must be 3 or 4 */
__le16 Size;
__le16 ServerType; /* 0x0001 = root targets; 0x0000 = link targets */
- __le16 ReferralEntryFlags; /* 0x0200 bit set only for domain
- or DC referral responce */
+ __le16 ReferralEntryFlags;
__le32 TimeToLive;
__le16 DfsPathOffset;
__le16 DfsAlternatePathOffset;
__le16 NetworkAddressOffset; /* offset of the link target */
- __le16 ServiceSiteGuid;
+ __u8 ServiceSiteGuid[16]; /* MBZ, ignored */
} __attribute__((packed)) REFERRAL3;
typedef struct smb_com_transaction_get_dfs_refer_rsp {
@@ -1946,48 +1953,15 @@ typedef struct smb_com_transaction_get_dfs_refer_rsp {
__u8 Pad;
__le16 PathConsumed;
__le16 NumberOfReferrals;
- __le16 DFSFlags;
- __u16 Pad2;
+ __le32 DFSFlags;
REFERRAL3 referrals[1]; /* array of level 3 dfs_referral structures */
/* followed by the strings pointed to by the referral structures */
} __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_RSP;
/* DFS Flags */
-#define DFSREF_REFERRAL_SERVER 0x0001
-#define DFSREF_STORAGE_SERVER 0x0002
-
-/* IOCTL information */
-/*
- * List of ioctl function codes that look to be of interest to remote clients
- * like this one. Need to do some experimentation to make sure they all work
- * remotely. Some of the following, such as the encryption/compression ones
- * would be invoked from tools via a specialized hook into the VFS rather
- * than via the standard vfs entry points
- */
-#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
-#define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
-#define FSCTL_REQUEST_BATCH_OPLOCK 0x00090008
-#define FSCTL_LOCK_VOLUME 0x00090018
-#define FSCTL_UNLOCK_VOLUME 0x0009001C
-#define FSCTL_GET_COMPRESSION 0x0009003C
-#define FSCTL_SET_COMPRESSION 0x0009C040
-#define FSCTL_REQUEST_FILTER_OPLOCK 0x0009008C
-#define FSCTL_FILESYS_GET_STATISTICS 0x00090090
-#define FSCTL_SET_REPARSE_POINT 0x000900A4
-#define FSCTL_GET_REPARSE_POINT 0x000900A8
-#define FSCTL_DELETE_REPARSE_POINT 0x000900AC
-#define FSCTL_SET_SPARSE 0x000900C4
-#define FSCTL_SET_ZERO_DATA 0x000900C8
-#define FSCTL_SET_ENCRYPTION 0x000900D7
-#define FSCTL_ENCRYPTION_FSCTL_IO 0x000900DB
-#define FSCTL_WRITE_RAW_ENCRYPTED 0x000900DF
-#define FSCTL_READ_RAW_ENCRYPTED 0x000900E3
-#define FSCTL_SIS_COPYFILE 0x00090100
-#define FSCTL_SIS_LINK_FILES 0x0009C104
-
-#define IO_REPARSE_TAG_MOUNT_POINT 0xA0000003
-#define IO_REPARSE_TAG_HSM 0xC0000004
-#define IO_REPARSE_TAG_SIS 0x80000007
+#define DFSREF_REFERRAL_SERVER 0x00000001 /* all targets are DFS roots */
+#define DFSREF_STORAGE_SERVER 0x00000002 /* no further ref requests needed */
+#define DFSREF_TARGET_FAILBACK 0x00000004 /* only for DFS referral version 4 */
/*
************************************************************************
@@ -2508,8 +2482,6 @@ struct data_blob {
6) Use nanosecond timestamps throughout all time fields if
corresponding attribute flag is set
7) sendfile - handle based copy
- 8) Direct i/o
- 9) Misc fcntls?
what about fixing 64 bit alignment
@@ -2628,7 +2600,5 @@ typedef struct file_chattr_info {
__le64 mode; /* list of actual attribute bits on this inode */
} __attribute__((packed)) FILE_CHATTR_INFO; /* ext attributes
(chattr, chflags) level 0x206 */
-
-#endif
-
+#endif /* POSIX */
#endif /* _CIFSPDU_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 083dfc57c7a..4167716d32f 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -44,6 +44,9 @@ extern void _FreeXid(unsigned int);
extern char *build_path_from_dentry(struct dentry *);
extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
+extern char *cifs_compose_mount_options(const char *sb_mountdata,
+ const char *fullpath, const struct dfs_info3_param *ref,
+ char **devname);
/* extern void renew_parental_timestamps(struct dentry *direntry);*/
extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
struct smb_hdr * /* input */ ,
@@ -92,6 +95,9 @@ extern u64 cifs_UnixTimeToNT(struct timespec);
extern __le64 cnvrtDosCifsTm(__u16 date, __u16 time);
extern struct timespec cnvrtDosUnixTm(__u16 date, __u16 time);
+extern int cifs_posix_open(char *full_path, struct inode **pinode,
+ struct super_block *sb, int mode, int oflags,
+ int *poplock, __u16 *pnetfid, int xid);
extern void posix_fill_in_inode(struct inode *tmp_inode,
FILE_UNIX_BASIC_INFO *pData, int isNewInode);
extern struct inode *cifs_new_inode(struct super_block *sb, __u64 *inum);
@@ -281,6 +287,9 @@ extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon,
extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon,
const int smb_file_id);
+extern int CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon,
+ const int smb_file_id);
+
extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
const int netfid, unsigned int count,
const __u64 lseek, unsigned int *nbytes, char **buf,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 939e2f76b95..bc09c998631 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1934,6 +1934,27 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
}
int
+CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
+{
+ int rc = 0;
+ FLUSH_REQ *pSMB = NULL;
+ cFYI(1, ("In CIFSSMBFlush"));
+
+ rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB);
+ if (rc)
+ return rc;
+
+ pSMB->FileID = (__u16) smb_file_id;
+ pSMB->ByteCount = 0;
+ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ cifs_stats_inc(&tcon->num_flushes);
+ if (rc)
+ cERROR(1, ("Send error in Flush = %d", rc));
+
+ return rc;
+}
+
+int
CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
@@ -2356,8 +2377,10 @@ winCreateHardLinkRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- pSMB->OldFileName[name_len] = 0; /* pad */
- pSMB->OldFileName[name_len + 1] = 0x04;
+
+ /* protocol specifies ASCII buffer format (0x04) for unicode */
+ pSMB->OldFileName[name_len] = 0x04;
+ pSMB->OldFileName[name_len + 1] = 0x00; /* pad */
name_len2 =
cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
toName, PATH_MAX, nls_codepage, remap);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index da0f4ffa061..0de3b5615a2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -95,6 +95,7 @@ struct smb_vol {
bool local_lease:1; /* check leases only on local system, not remote */
bool noblocksnd:1;
bool noautotune:1;
+ bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
unsigned int rsize;
unsigned int wsize;
unsigned int sockopt;
@@ -1274,6 +1275,10 @@ cifs_parse_mount_options(char *options, const char *devname,
vol->intr = 0;
} else if (strnicmp(data, "intr", 4) == 0) {
vol->intr = 1;
+ } else if (strnicmp(data, "nostrictsync", 12) == 0) {
+ vol->nostrictsync = 1;
+ } else if (strnicmp(data, "strictsync", 10) == 0) {
+ vol->nostrictsync = 0;
} else if (strnicmp(data, "serverino", 7) == 0) {
vol->server_ino = 1;
} else if (strnicmp(data, "noserverino", 9) == 0) {
@@ -2160,6 +2165,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL;
if (pvolume_info->nobrl)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL;
+ if (pvolume_info->nostrictsync)
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC;
if (pvolume_info->mand_lock)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL;
if (pvolume_info->cifs_acl)
@@ -3667,7 +3674,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
BCC(smb_buffer_response)) {
kfree(tcon->nativeFileSystem);
tcon->nativeFileSystem =
- kzalloc(length + 2, GFP_KERNEL);
+ kzalloc(2*(length + 1), GFP_KERNEL);
if (tcon->nativeFileSystem)
cifs_strfromUCS_le(
tcon->nativeFileSystem,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 89fb7283265..2f35cccfcd8 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -129,7 +129,7 @@ cifs_bp_rename_retry:
return full_path;
}
-static int cifs_posix_open(char *full_path, struct inode **pinode,
+int cifs_posix_open(char *full_path, struct inode **pinode,
struct super_block *sb, int mode, int oflags,
int *poplock, __u16 *pnetfid, int xid)
{
@@ -187,7 +187,9 @@ static int cifs_posix_open(char *full_path, struct inode **pinode,
if (!pinode)
goto posix_open_ret; /* caller does not need info */
- *pinode = cifs_new_inode(sb, &presp_data->UniqueId);
+ if (*pinode == NULL)
+ *pinode = cifs_new_inode(sb, &presp_data->UniqueId);
+ /* else an inode was passed in. Update its info, don't create one */
/* We do not need to close the file if new_inode fails since
the caller will retry qpathinfo as long as inode is null */
@@ -699,7 +701,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
return rc;
} */
-struct dentry_operations cifs_dentry_ops = {
+const struct dentry_operations cifs_dentry_ops = {
.d_revalidate = cifs_d_revalidate,
/* d_delete: cifs_d_delete, */ /* not needed except for debugging */
};
@@ -737,7 +739,7 @@ static int cifs_ci_compare(struct dentry *dentry, struct qstr *a,
return 1;
}
-struct dentry_operations cifs_ci_dentry_ops = {
+const struct dentry_operations cifs_ci_dentry_ops = {
.d_revalidate = cifs_d_revalidate,
.d_hash = cifs_ci_hash,
.d_compare = cifs_ci_compare,
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 12bb656fbe7..81747acca4c 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -78,8 +78,36 @@ static inline int cifs_convert_flags(unsigned int flags)
return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
FILE_READ_DATA);
+}
+static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
+{
+ fmode_t posix_flags = 0;
+ if ((flags & O_ACCMODE) == O_RDONLY)
+ posix_flags = FMODE_READ;
+ else if ((flags & O_ACCMODE) == O_WRONLY)
+ posix_flags = FMODE_WRITE;
+ else if ((flags & O_ACCMODE) == O_RDWR) {
+ /* GENERIC_ALL is too much permission to request
+ can cause unnecessary access denied on create */
+ /* return GENERIC_ALL; */
+ posix_flags = FMODE_READ | FMODE_WRITE;
+ }
+ /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
+ reopening a file. They had their effect on the original open */
+ if (flags & O_APPEND)
+ posix_flags |= (fmode_t)O_APPEND;
+ if (flags & O_SYNC)
+ posix_flags |= (fmode_t)O_SYNC;
+ if (flags & O_DIRECTORY)
+ posix_flags |= (fmode_t)O_DIRECTORY;
+ if (flags & O_NOFOLLOW)
+ posix_flags |= (fmode_t)O_NOFOLLOW;
+ if (flags & O_DIRECT)
+ posix_flags |= (fmode_t)O_DIRECT;
+
+ return posix_flags;
}
static inline int cifs_get_disposition(unsigned int flags)
@@ -97,6 +125,80 @@ static inline int cifs_get_disposition(unsigned int flags)
}
/* all arguments to this function must be checked for validity in caller */
+static inline int cifs_posix_open_inode_helper(struct inode *inode,
+ struct file *file, struct cifsInodeInfo *pCifsInode,
+ struct cifsFileInfo *pCifsFile, int oplock, u16 netfid)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+/* struct timespec temp; */ /* BB REMOVEME BB */
+
+ file->private_data = kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+ if (file->private_data == NULL)
+ return -ENOMEM;
+ pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
+ write_lock(&GlobalSMBSeslock);
+ list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList);
+
+ pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
+ if (pCifsInode == NULL) {
+ write_unlock(&GlobalSMBSeslock);
+ return -EINVAL;
+ }
+
+ /* want handles we can use to read with first
+ in the list so we do not have to walk the
+ list to search for one in write_begin */
+ if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+ list_add_tail(&pCifsFile->flist,
+ &pCifsInode->openFileList);
+ } else {
+ list_add(&pCifsFile->flist,
+ &pCifsInode->openFileList);
+ }
+
+ if (pCifsInode->clientCanCacheRead) {
+ /* we have the inode open somewhere else
+ no need to discard cache data */
+ goto psx_client_can_cache;
+ }
+
+ /* BB FIXME need to fix this check to move it earlier into posix_open
+ BB fIX following section BB FIXME */
+
+ /* if not oplocked, invalidate inode pages if mtime or file
+ size changed */
+/* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
+ if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
+ (file->f_path.dentry->d_inode->i_size ==
+ (loff_t)le64_to_cpu(buf->EndOfFile))) {
+ cFYI(1, ("inode unchanged on server"));
+ } else {
+ if (file->f_path.dentry->d_inode->i_mapping) {
+ rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
+ if (rc != 0)
+ CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
+ }
+ cFYI(1, ("invalidating remote inode since open detected it "
+ "changed"));
+ invalidate_remote_inode(file->f_path.dentry->d_inode);
+ } */
+
+psx_client_can_cache:
+ if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
+ pCifsInode->clientCanCacheAll = true;
+ pCifsInode->clientCanCacheRead = true;
+ cFYI(1, ("Exclusive Oplock granted on inode %p",
+ file->f_path.dentry->d_inode));
+ } else if ((oplock & 0xF) == OPLOCK_READ)
+ pCifsInode->clientCanCacheRead = true;
+
+ /* will have to change the unlock if we reenable the
+ filemap_fdatawrite (which does not seem necessary */
+ write_unlock(&GlobalSMBSeslock);
+ return 0;
+}
+
+/* all arguments to this function must be checked for validity in caller */
static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
@@ -167,7 +269,7 @@ int cifs_open(struct inode *inode, struct file *file)
int rc = -EACCES;
int xid, oplock;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifsTconInfo *tcon;
struct cifsFileInfo *pCifsFile;
struct cifsInodeInfo *pCifsInode;
struct list_head *tmp;
@@ -180,7 +282,7 @@ int cifs_open(struct inode *inode, struct file *file)
xid = GetXid();
cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
+ tcon = cifs_sb->tcon;
if (file->f_flags & O_CREAT) {
/* search inode for this file and fill in file->private_data */
@@ -220,6 +322,45 @@ int cifs_open(struct inode *inode, struct file *file)
cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
inode, file->f_flags, full_path));
+
+ if (oplockEnabled)
+ oplock = REQ_OPLOCK;
+ else
+ oplock = 0;
+
+ if (!tcon->broken_posix_open && tcon->unix_ext &&
+ (tcon->ses->capabilities & CAP_UNIX) &&
+ (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+ int oflags = (int) cifs_posix_convert_flags(file->f_flags);
+ /* can not refresh inode info since size could be stale */
+ rc = cifs_posix_open(full_path, &inode, inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
+ if (rc == 0) {
+ cFYI(1, ("posix open succeeded"));
+ /* no need for special case handling of setting mode
+ on read only files needed here */
+
+ cifs_posix_open_inode_helper(inode, file, pCifsInode,
+ pCifsFile, oplock, netfid);
+ goto out;
+ } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
+ if (tcon->ses->serverNOS)
+ cERROR(1, ("server %s of type %s returned"
+ " unexpected error on SMB posix open"
+ ", disabling posix open support."
+ " Check if server update available.",
+ tcon->ses->serverName,
+ tcon->ses->serverNOS));
+ tcon->broken_posix_open = true;
+ } else if ((rc != -EIO) && (rc != -EREMOTE) &&
+ (rc != -EOPNOTSUPP)) /* path not found or net err */
+ goto out;
+ /* else fallthrough to retry open the old way on network i/o
+ or DFS errors */
+ }
+
desiredAccess = cifs_convert_flags(file->f_flags);
/*********************************************************************
@@ -248,11 +389,6 @@ int cifs_open(struct inode *inode, struct file *file)
disposition = cifs_get_disposition(file->f_flags);
- if (oplockEnabled)
- oplock = REQ_OPLOCK;
- else
- oplock = 0;
-
/* BB pass O_SYNC flag through on file attributes .. BB */
/* Also refresh inode by passing in file_info buf returned by SMBOpen
@@ -269,7 +405,7 @@ int cifs_open(struct inode *inode, struct file *file)
}
if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
- rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
+ rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -278,7 +414,7 @@ int cifs_open(struct inode *inode, struct file *file)
if (rc == -EIO) {
/* Old server, try legacy style OpenX */
- rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
+ rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -295,12 +431,12 @@ int cifs_open(struct inode *inode, struct file *file)
}
pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
write_lock(&GlobalSMBSeslock);
- list_add(&pCifsFile->tlist, &pTcon->openFileList);
+ list_add(&pCifsFile->tlist, &tcon->openFileList);
pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
if (pCifsInode) {
rc = cifs_open_inode_helper(inode, file, pCifsInode,
- pCifsFile, pTcon,
+ pCifsFile, tcon,
&oplock, buf, full_path, xid);
} else {
write_unlock(&GlobalSMBSeslock);
@@ -309,7 +445,7 @@ int cifs_open(struct inode *inode, struct file *file)
if (oplock & CIFS_CREATE_ACTION) {
/* time to set mode which we can not set earlier due to
problems creating new read-only files */
- if (pTcon->unix_ext) {
+ if (tcon->unix_ext) {
struct cifs_unix_set_info_args args = {
.mode = inode->i_mode,
.uid = NO_CHANGE_64,
@@ -319,7 +455,7 @@ int cifs_open(struct inode *inode, struct file *file)
.mtime = NO_CHANGE_64,
.device = 0,
};
- CIFSSMBUnixSetInfo(xid, pTcon, full_path, &args,
+ CIFSSMBUnixSetInfo(xid, tcon, full_path, &args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -349,7 +485,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
int rc = -EACCES;
int xid, oplock;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifsTconInfo *tcon;
struct cifsFileInfo *pCifsFile;
struct cifsInodeInfo *pCifsInode;
struct inode *inode;
@@ -387,7 +523,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
}
cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
+ tcon = cifs_sb->tcon;
/* can not grab rename sem here because various ops, including
those that already have the rename sem can end up causing writepage
@@ -404,20 +540,37 @@ reopen_error_exit:
cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
inode, file->f_flags, full_path));
- desiredAccess = cifs_convert_flags(file->f_flags);
if (oplockEnabled)
oplock = REQ_OPLOCK;
else
oplock = 0;
+ if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
+ (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+ int oflags = (int) cifs_posix_convert_flags(file->f_flags);
+ /* can not refresh inode info since size could be stale */
+ rc = cifs_posix_open(full_path, NULL, inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
+ if (rc == 0) {
+ cFYI(1, ("posix reopen succeeded"));
+ goto reopen_success;
+ }
+ /* fallthrough to retry open the old way on errors, especially
+ in the reconnect path it is important to retry hard */
+ }
+
+ desiredAccess = cifs_convert_flags(file->f_flags);
+
/* Can not refresh inode by passing in file_info buf to be returned
by SMBOpen and then calling get_inode_info with returned buf
since file might have write behind data that needs to be flushed
and server version of file size can be stale. If we knew for sure
that inode was not dirty locally we could do this */
- rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
+ rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
CREATE_NOT_DIR, &netfid, &oplock, NULL,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -426,6 +579,7 @@ reopen_error_exit:
cFYI(1, ("cifs_open returned 0x%x", rc));
cFYI(1, ("oplock: %d", oplock));
} else {
+reopen_success:
pCifsFile->netfid = netfid;
pCifsFile->invalidHandle = false;
up(&pCifsFile->fh_sem);
@@ -439,7 +593,7 @@ reopen_error_exit:
go to server to get inode info */
pCifsInode->clientCanCacheAll = false;
pCifsInode->clientCanCacheRead = false;
- if (pTcon->unix_ext)
+ if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode,
full_path, inode->i_sb, xid);
else
@@ -467,7 +621,6 @@ reopen_error_exit:
cifs_relock_file(pCifsFile);
}
}
-
kfree(full_path);
FreeXid(xid);
return rc;
@@ -1523,6 +1676,9 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
int xid;
int rc = 0;
+ struct cifsTconInfo *tcon;
+ struct cifsFileInfo *smbfile =
+ (struct cifsFileInfo *)file->private_data;
struct inode *inode = file->f_path.dentry->d_inode;
xid = GetXid();
@@ -1534,7 +1690,12 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
if (rc == 0) {
rc = CIFS_I(inode)->write_behind_rc;
CIFS_I(inode)->write_behind_rc = 0;
+ tcon = CIFS_SB(inode->i_sb)->tcon;
+ if (!rc && tcon && smbfile &&
+ !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
+ rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
}
+
FreeXid(xid);
return rc;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4690a360c85..a8797cc6080 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -763,6 +763,9 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
struct cifsTconInfo *pTcon = cifs_sb->tcon;
FILE_BASIC_INFO info_buf;
+ if (attrs == NULL)
+ return -EINVAL;
+
if (attrs->ia_valid & ATTR_ATIME) {
set_time = true;
info_buf.LastAccessTime =
diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h
new file mode 100644
index 00000000000..7056b891e08
--- /dev/null
+++ b/fs/cifs/smbfsctl.h
@@ -0,0 +1,84 @@
+/*
+ * fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ *
+ * Copyright (c) International Business Machines Corp., 2002,2009
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* IOCTL information */
+/*
+ * List of ioctl/fsctl function codes that are or could be useful in the
+ * future to remote clients like cifs or SMB2 client. There is probably
+ * a slightly larger set of fsctls that NTFS local filesystem could handle,
+ * including the seven below that we do not have struct definitions for.
+ * Even with protocol definitions for most of these now available, we still
+ * need to do some experimentation to identify which are practical to do
+ * remotely. Some of the following, such as the encryption/compression ones
+ * could be invoked from tools via a specialized hook into the VFS rather
+ * than via the standard vfs entry points
+ */
+#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
+#define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
+#define FSCTL_REQUEST_BATCH_OPLOCK 0x00090008
+#define FSCTL_LOCK_VOLUME 0x00090018
+#define FSCTL_UNLOCK_VOLUME 0x0009001C
+#define FSCTL_IS_PATHNAME_VALID 0x0009002C /* BB add struct */
+#define FSCTL_GET_COMPRESSION 0x0009003C /* BB add struct */
+#define FSCTL_SET_COMPRESSION 0x0009C040 /* BB add struct */
+#define FSCTL_QUERY_FAT_BPB 0x00090058 /* BB add struct */
+/* Verify the next FSCTL number, we had it as 0x00090090 before */
+#define FSCTL_FILESYSTEM_GET_STATS 0x00090060 /* BB add struct */
+#define FSCTL_GET_NTFS_VOLUME_DATA 0x00090064 /* BB add struct */
+#define FSCTL_GET_RETRIEVAL_POINTERS 0x00090073 /* BB add struct */
+#define FSCTL_IS_VOLUME_DIRTY 0x00090078 /* BB add struct */
+#define FSCTL_ALLOW_EXTENDED_DASD_IO 0x00090083 /* BB add struct */
+#define FSCTL_REQUEST_FILTER_OPLOCK 0x0009008C
+#define FSCTL_FIND_FILES_BY_SID 0x0009008F /* BB add struct */
+#define FSCTL_SET_OBJECT_ID 0x00090098 /* BB add struct */
+#define FSCTL_GET_OBJECT_ID 0x0009009C /* BB add struct */
+#define FSCTL_DELETE_OBJECT_ID 0x000900A0 /* BB add struct */
+#define FSCTL_SET_REPARSE_POINT 0x000900A4 /* BB add struct */
+#define FSCTL_GET_REPARSE_POINT 0x000900A8 /* BB add struct */
+#define FSCTL_DELETE_REPARSE_POINT 0x000900AC /* BB add struct */
+#define FSCTL_SET_OBJECT_ID_EXTENDED 0x000900BC /* BB add struct */
+#define FSCTL_CREATE_OR_GET_OBJECT_ID 0x000900C0 /* BB add struct */
+#define FSCTL_SET_SPARSE 0x000900C4 /* BB add struct */
+#define FSCTL_SET_ZERO_DATA 0x000900C8 /* BB add struct */
+#define FSCTL_SET_ENCRYPTION 0x000900D7 /* BB add struct */
+#define FSCTL_ENCRYPTION_FSCTL_IO 0x000900DB /* BB add struct */
+#define FSCTL_WRITE_RAW_ENCRYPTED 0x000900DF /* BB add struct */
+#define FSCTL_READ_RAW_ENCRYPTED 0x000900E3 /* BB add struct */
+#define FSCTL_READ_FILE_USN_DATA 0x000900EB /* BB add struct */
+#define FSCTL_WRITE_USN_CLOSE_RECORD 0x000900EF /* BB add struct */
+#define FSCTL_SIS_COPYFILE 0x00090100 /* BB add struct */
+#define FSCTL_RECALL_FILE 0x00090117 /* BB add struct */
+#define FSCTL_QUERY_SPARING_INFO 0x00090138 /* BB add struct */
+#define FSCTL_SET_ZERO_ON_DEALLOC 0x00090194 /* BB add struct */
+#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
+#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF /* BB add struct */
+#define FSCTL_SET_DEFECT_MANAGEMENT 0x00098134 /* BB add struct */
+#define FSCTL_SIS_LINK_FILES 0x0009C104
+#define FSCTL_PIPE_PEEK 0x0011400C /* BB add struct */
+#define FSCTL_PIPE_TRANSCEIVE 0x0011C017 /* BB add struct */
+/* strange that the number for this op is not sequential with previous op */
+#define FSCTL_PIPE_WAIT 0x00110018 /* BB add struct */
+#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
+#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
+
+#define IO_REPARSE_TAG_MOUNT_POINT 0xA0000003
+#define IO_REPARSE_TAG_HSM 0xC0000004
+#define IO_REPARSE_TAG_SIS 0x80000007
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 75b1fa90b2c..4bb9d0a5dec 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -59,7 +59,7 @@ static int coda_return_EIO(void)
}
#define CODA_EIO_ERROR ((void *) (coda_return_EIO))
-static struct dentry_operations coda_dentry_operations =
+static const struct dentry_operations coda_dentry_operations =
{
.d_revalidate = coda_dentry_revalidate,
.d_delete = coda_dentry_delete,
diff --git a/fs/compat.c b/fs/compat.c
index 0949b43794a..5e374aad33f 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -378,6 +378,34 @@ out:
return error;
}
+/*
+ * This is a copy of sys_ustat, just dealing with a structure layout.
+ * Given how simple this syscall is that apporach is more maintainable
+ * than the various conversion hacks.
+ */
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
+{
+ struct super_block *sb;
+ struct compat_ustat tmp;
+ struct kstatfs sbuf;
+ int err;
+
+ sb = user_get_super(new_decode_dev(dev));
+ if (!sb)
+ return -EINVAL;
+ err = vfs_statfs(sb->s_root, &sbuf);
+ drop_super(sb);
+ if (err)
+ return err;
+
+ memset(&tmp, 0, sizeof(struct compat_ustat));
+ tmp.f_tfree = sbuf.f_bfree;
+ tmp.f_tinode = sbuf.f_ffree;
+ if (copy_to_user(u, &tmp, sizeof(struct compat_ustat)))
+ return -EFAULT;
+ return 0;
+}
+
static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
{
if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) ||
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 45e59d3c7f1..ff786687e93 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -522,6 +522,11 @@ static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
if (err)
return -EFAULT;
break;
+ case SIOCSHWTSTAMP:
+ if (copy_from_user(&ifr, uifr32, sizeof(*uifr32)))
+ return -EFAULT;
+ ifr.ifr_data = compat_ptr(uifr32->ifr_ifru.ifru_data);
+ break;
default:
if (copy_from_user(&ifr, uifr32, sizeof(*uifr32)))
return -EFAULT;
@@ -1993,6 +1998,8 @@ COMPATIBLE_IOCTL(TUNSETGROUP)
COMPATIBLE_IOCTL(TUNGETFEATURES)
COMPATIBLE_IOCTL(TUNSETOFFLOAD)
COMPATIBLE_IOCTL(TUNSETTXFILTER)
+COMPATIBLE_IOCTL(TUNGETSNDBUF)
+COMPATIBLE_IOCTL(TUNSETSNDBUF)
/* Big V */
COMPATIBLE_IOCTL(VT_SETMODE)
COMPATIBLE_IOCTL(VT_GETMODE)
@@ -2566,6 +2573,7 @@ HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc)
HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc)
HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc)
HANDLE_IOCTL(SIOCSIFHWBROADCAST, dev_ifsioc)
+HANDLE_IOCTL(SIOCSHWTSTAMP, dev_ifsioc)
/* ioctls used by appletalk ddp.c */
HANDLE_IOCTL(SIOCATALKDIFADDR, dev_ifsioc)
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 8e93341f3e8..05373db21a4 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -72,7 +72,7 @@ static int configfs_d_delete(struct dentry *dentry)
return 1;
}
-static struct dentry_operations configfs_dentry_ops = {
+static const struct dentry_operations configfs_dentry_ops = {
.d_iput = configfs_d_iput,
/* simple_delete_dentry() isn't exported */
.d_delete = configfs_d_delete,
diff --git a/fs/dcache.c b/fs/dcache.c
index 07e2d4a44bd..90bbd7e1b11 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1247,15 +1247,18 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
struct dentry *found;
struct dentry *new;
- /* Does a dentry matching the name exist already? */
+ /*
+ * First check if a dentry matching the name already exists,
+ * if not go ahead and create it now.
+ */
found = d_hash_and_lookup(dentry->d_parent, name);
- /* If not, create it now and return */
if (!found) {
new = d_alloc(dentry->d_parent, name);
if (!new) {
error = -ENOMEM;
goto err_out;
}
+
found = d_splice_alias(inode, new);
if (found) {
dput(new);
@@ -1263,61 +1266,46 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
}
return new;
}
- /* Matching dentry exists, check if it is negative. */
+
+ /*
+ * If a matching dentry exists, and it's not negative use it.
+ *
+ * Decrement the reference count to balance the iget() done
+ * earlier on.
+ */
if (found->d_inode) {
if (unlikely(found->d_inode != inode)) {
/* This can't happen because bad inodes are unhashed. */
BUG_ON(!is_bad_inode(inode));
BUG_ON(!is_bad_inode(found->d_inode));
}
- /*
- * Already have the inode and the dentry attached, decrement
- * the reference count to balance the iget() done
- * earlier on. We found the dentry using d_lookup() so it
- * cannot be disconnected and thus we do not need to worry
- * about any NFS/disconnectedness issues here.
- */
iput(inode);
return found;
}
+
/*
* Negative dentry: instantiate it unless the inode is a directory and
- * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED),
- * in which case d_move() that in place of the found dentry.
+ * already has a dentry.
*/
- if (!S_ISDIR(inode->i_mode)) {
- /* Not a directory; everything is easy. */
- d_instantiate(found, inode);
- return found;
- }
spin_lock(&dcache_lock);
- if (list_empty(&inode->i_dentry)) {
- /*
- * Directory without a 'disconnected' dentry; we need to do
- * d_instantiate() by hand because it takes dcache_lock which
- * we already hold.
- */
+ if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
__d_instantiate(found, inode);
spin_unlock(&dcache_lock);
security_d_instantiate(found, inode);
return found;
}
+
/*
- * Directory with a 'disconnected' dentry; get a reference to the
- * 'disconnected' dentry.
+ * In case a directory already has a (disconnected) entry grab a
+ * reference to it, move it in place and use it.
*/
new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
dget_locked(new);
spin_unlock(&dcache_lock);
- /* Do security vodoo. */
security_d_instantiate(found, inode);
- /* Move new in place of found. */
d_move(new, found);
- /* Balance the iget() we did above. */
iput(inode);
- /* Throw away found. */
dput(found);
- /* Use new as the actual dentry. */
return new;
err_out:
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index bff4052b05e..63a4a59e414 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -322,177 +322,81 @@ static int compare_init_pts_sb(struct super_block *s, void *p)
}
/*
- * Safely parse the mount options in @data and update @opts.
+ * devpts_get_sb()
*
- * devpts ends up parsing options two times during mount, due to the
- * two modes of operation it supports. The first parse occurs in
- * devpts_get_sb() when determining the mode (single-instance or
- * multi-instance mode). The second parse happens in devpts_remount()
- * or new_pts_mount() depending on the mode.
+ * If the '-o newinstance' mount option was specified, mount a new
+ * (private) instance of devpts. PTYs created in this instance are
+ * independent of the PTYs in other devpts instances.
*
- * Parsing of options modifies the @data making subsequent parsing
- * incorrect. So make a local copy of @data and parse it.
+ * If the '-o newinstance' option was not specified, mount/remount the
+ * initial kernel mount of devpts. This type of mount gives the
+ * legacy, single-instance semantics.
*
- * Return: 0 On success, -errno on error
- */
-static int safe_parse_mount_options(void *data, struct pts_mount_opts *opts)
-{
- int rc;
- void *datacp;
-
- if (!data)
- return 0;
-
- /* Use kstrdup() ? */
- datacp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!datacp)
- return -ENOMEM;
-
- memcpy(datacp, data, PAGE_SIZE);
- rc = parse_mount_options((char *)datacp, PARSE_MOUNT, opts);
- kfree(datacp);
-
- return rc;
-}
-
-/*
- * Mount a new (private) instance of devpts. PTYs created in this
- * instance are independent of the PTYs in other devpts instances.
- */
-static int new_pts_mount(struct file_system_type *fs_type, int flags,
- void *data, struct vfsmount *mnt)
-{
- int err;
- struct pts_fs_info *fsi;
- struct pts_mount_opts *opts;
-
- err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt);
- if (err)
- return err;
-
- fsi = DEVPTS_SB(mnt->mnt_sb);
- opts = &fsi->mount_opts;
-
- err = parse_mount_options(data, PARSE_MOUNT, opts);
- if (err)
- goto fail;
-
- err = mknod_ptmx(mnt->mnt_sb);
- if (err)
- goto fail;
-
- return 0;
-
-fail:
- dput(mnt->mnt_sb->s_root);
- deactivate_super(mnt->mnt_sb);
- return err;
-}
-
-/*
- * Check if 'newinstance' mount option was specified in @data.
+ * The 'newinstance' option is needed to support multiple namespace
+ * semantics in devpts while preserving backward compatibility of the
+ * current 'single-namespace' semantics. i.e all mounts of devpts
+ * without the 'newinstance' mount option should bind to the initial
+ * kernel mount, like get_sb_single().
*
- * Return: -errno on error (eg: invalid mount options specified)
- * : 1 if 'newinstance' mount option was specified
- * : 0 if 'newinstance' mount option was NOT specified
- */
-static int is_new_instance_mount(void *data)
-{
- int rc;
- struct pts_mount_opts opts;
-
- if (!data)
- return 0;
-
- rc = safe_parse_mount_options(data, &opts);
- if (!rc)
- rc = opts.newinstance;
-
- return rc;
-}
-
-/*
- * get_init_pts_sb()
- *
- * This interface is needed to support multiple namespace semantics in
- * devpts while preserving backward compatibility of the current 'single-
- * namespace' semantics. i.e all mounts of devpts without the 'newinstance'
- * mount option should bind to the initial kernel mount, like
- * get_sb_single().
+ * Mounts with 'newinstance' option create a new, private namespace.
*
- * Mounts with 'newinstance' option create a new private namespace.
+ * NOTE:
*
- * But for single-mount semantics, devpts cannot use get_sb_single(),
+ * For single-mount semantics, devpts cannot use get_sb_single(),
* because get_sb_single()/sget() find and use the super-block from
* the most recent mount of devpts. But that recent mount may be a
* 'newinstance' mount and get_sb_single() would pick the newinstance
* super-block instead of the initial super-block.
- *
- * This interface is identical to get_sb_single() except that it
- * consistently selects the 'single-namespace' superblock even in the
- * presence of the private namespace (i.e 'newinstance') super-blocks.
*/
-static int get_init_pts_sb(struct file_system_type *fs_type, int flags,
- void *data, struct vfsmount *mnt)
+static int devpts_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- struct super_block *s;
int error;
+ struct pts_mount_opts opts;
+ struct super_block *s;
+
+ memset(&opts, 0, sizeof(opts));
+ if (data) {
+ error = parse_mount_options(data, PARSE_MOUNT, &opts);
+ if (error)
+ return error;
+ }
+
+ if (opts.newinstance)
+ s = sget(fs_type, NULL, set_anon_super, NULL);
+ else
+ s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
- s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
if (IS_ERR(s))
return PTR_ERR(s);
if (!s->s_root) {
s->s_flags = flags;
error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
- if (error) {
- up_write(&s->s_umount);
- deactivate_super(s);
- return error;
- }
+ if (error)
+ goto out_undo_sget;
s->s_flags |= MS_ACTIVE;
}
- do_remount_sb(s, flags, data, 0);
- return simple_set_mnt(mnt, s);
-}
-/*
- * Mount or remount the initial kernel mount of devpts. This type of
- * mount maintains the legacy, single-instance semantics, while the
- * kernel still allows multiple-instances.
- */
-static int init_pts_mount(struct file_system_type *fs_type, int flags,
- void *data, struct vfsmount *mnt)
-{
- int err;
+ simple_set_mnt(mnt, s);
- err = get_init_pts_sb(fs_type, flags, data, mnt);
- if (err)
- return err;
+ memcpy(&(DEVPTS_SB(s))->mount_opts, &opts, sizeof(opts));
- err = mknod_ptmx(mnt->mnt_sb);
- if (err) {
- dput(mnt->mnt_sb->s_root);
- deactivate_super(mnt->mnt_sb);
- }
+ error = mknod_ptmx(s);
+ if (error)
+ goto out_dput;
- return err;
-}
-
-static int devpts_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
-{
- int new;
-
- new = is_new_instance_mount(data);
- if (new < 0)
- return new;
+ return 0;
- if (new)
- return new_pts_mount(fs_type, flags, data, mnt);
+out_dput:
+ dput(s->s_root);
- return init_pts_mount(fs_type, flags, data, mnt);
+out_undo_sget:
+ up_write(&s->s_umount);
+ deactivate_super(s);
+ return error;
}
+
#else
/*
* This supports only the legacy single-instance semantics (no
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index 92969f879a1..858fba14aaa 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -156,7 +156,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
bucket = dir_hash(ls, name, namelen);
- write_lock(&ls->ls_dirtbl[bucket].lock);
+ spin_lock(&ls->ls_dirtbl[bucket].lock);
de = search_bucket(ls, name, namelen, bucket);
@@ -173,7 +173,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
list_del(&de->list);
kfree(de);
out:
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
}
void dlm_dir_clear(struct dlm_ls *ls)
@@ -185,14 +185,14 @@ void dlm_dir_clear(struct dlm_ls *ls)
DLM_ASSERT(list_empty(&ls->ls_recover_list), );
for (i = 0; i < ls->ls_dirtbl_size; i++) {
- write_lock(&ls->ls_dirtbl[i].lock);
+ spin_lock(&ls->ls_dirtbl[i].lock);
head = &ls->ls_dirtbl[i].list;
while (!list_empty(head)) {
de = list_entry(head->next, struct dlm_direntry, list);
list_del(&de->list);
put_free_de(ls, de);
}
- write_unlock(&ls->ls_dirtbl[i].lock);
+ spin_unlock(&ls->ls_dirtbl[i].lock);
}
}
@@ -307,17 +307,17 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
bucket = dir_hash(ls, name, namelen);
- write_lock(&ls->ls_dirtbl[bucket].lock);
+ spin_lock(&ls->ls_dirtbl[bucket].lock);
de = search_bucket(ls, name, namelen, bucket);
if (de) {
*r_nodeid = de->master_nodeid;
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
if (*r_nodeid == nodeid)
return -EEXIST;
return 0;
}
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
if (namelen > DLM_RESNAME_MAXLEN)
return -EINVAL;
@@ -330,7 +330,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
de->length = namelen;
memcpy(de->name, name, namelen);
- write_lock(&ls->ls_dirtbl[bucket].lock);
+ spin_lock(&ls->ls_dirtbl[bucket].lock);
tmp = search_bucket(ls, name, namelen, bucket);
if (tmp) {
kfree(de);
@@ -339,7 +339,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
}
*r_nodeid = de->master_nodeid;
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
return 0;
}
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 076e86f38bc..d01ca0a711d 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -99,7 +99,7 @@ struct dlm_direntry {
struct dlm_dirtable {
struct list_head list;
- rwlock_t lock;
+ spinlock_t lock;
};
struct dlm_rsbtable {
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 01e7d39c5fb..205ec95b347 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -835,7 +835,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
lkb->lkb_wait_count++;
hold_lkb(lkb);
- log_debug(ls, "add overlap %x cur %d new %d count %d flags %x",
+ log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
lkb->lkb_id, lkb->lkb_wait_type, mstype,
lkb->lkb_wait_count, lkb->lkb_flags);
goto out;
@@ -851,7 +851,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
out:
if (error)
- log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s",
+ log_error(ls, "addwait error %x %d flags %x %d %d %s",
lkb->lkb_id, error, lkb->lkb_flags, mstype,
lkb->lkb_wait_type, lkb->lkb_resource->res_name);
mutex_unlock(&ls->ls_waiters_mutex);
@@ -863,23 +863,55 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
request reply on the requestqueue) between dlm_recover_waiters_pre() which
set RESEND and dlm_recover_waiters_post() */
-static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
+static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
+ struct dlm_message *ms)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int overlap_done = 0;
if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
+ log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
overlap_done = 1;
goto out_del;
}
if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
+ log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
overlap_done = 1;
goto out_del;
}
+ /* Cancel state was preemptively cleared by a successful convert,
+ see next comment, nothing to do. */
+
+ if ((mstype == DLM_MSG_CANCEL_REPLY) &&
+ (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
+ log_debug(ls, "remwait %x cancel_reply wait_type %d",
+ lkb->lkb_id, lkb->lkb_wait_type);
+ return -1;
+ }
+
+ /* Remove for the convert reply, and premptively remove for the
+ cancel reply. A convert has been granted while there's still
+ an outstanding cancel on it (the cancel is moot and the result
+ in the cancel reply should be 0). We preempt the cancel reply
+ because the app gets the convert result and then can follow up
+ with another op, like convert. This subsequent op would see the
+ lingering state of the cancel and fail with -EBUSY. */
+
+ if ((mstype == DLM_MSG_CONVERT_REPLY) &&
+ (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
+ is_overlap_cancel(lkb) && ms && !ms->m_result) {
+ log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
+ lkb->lkb_id);
+ lkb->lkb_wait_type = 0;
+ lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
+ lkb->lkb_wait_count--;
+ goto out_del;
+ }
+
/* N.B. type of reply may not always correspond to type of original
msg due to lookup->request optimization, verify others? */
@@ -888,8 +920,8 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
goto out_del;
}
- log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d",
- lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type);
+ log_error(ls, "remwait error %x reply %d flags %x no wait_type",
+ lkb->lkb_id, mstype, lkb->lkb_flags);
return -1;
out_del:
@@ -899,7 +931,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
this would happen */
if (overlap_done && lkb->lkb_wait_type) {
- log_error(ls, "remove_from_waiters %x reply %d give up on %d",
+ log_error(ls, "remwait error %x reply %d wait_type %d overlap",
lkb->lkb_id, mstype, lkb->lkb_wait_type);
lkb->lkb_wait_count--;
lkb->lkb_wait_type = 0;
@@ -921,7 +953,7 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
int error;
mutex_lock(&ls->ls_waiters_mutex);
- error = _remove_from_waiters(lkb, mstype);
+ error = _remove_from_waiters(lkb, mstype, NULL);
mutex_unlock(&ls->ls_waiters_mutex);
return error;
}
@@ -936,7 +968,7 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
if (ms != &ls->ls_stub_ms)
mutex_lock(&ls->ls_waiters_mutex);
- error = _remove_from_waiters(lkb, ms->m_type);
+ error = _remove_from_waiters(lkb, ms->m_type, ms);
if (ms != &ls->ls_stub_ms)
mutex_unlock(&ls->ls_waiters_mutex);
return error;
@@ -2083,6 +2115,11 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
lkb->lkb_timeout_cs = args->timeout;
rv = 0;
out:
+ if (rv)
+ log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
+ rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
+ lkb->lkb_status, lkb->lkb_wait_type,
+ lkb->lkb_resource->res_name);
return rv;
}
@@ -2149,6 +2186,13 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
goto out;
}
+ /* there's nothing to cancel */
+ if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
+ !lkb->lkb_wait_type) {
+ rv = -EBUSY;
+ goto out;
+ }
+
switch (lkb->lkb_wait_type) {
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index aa32e5f0249..cd8e2df3c29 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -487,7 +487,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
goto out_lkbfree;
for (i = 0; i < size; i++) {
INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
- rwlock_init(&ls->ls_dirtbl[i].lock);
+ spin_lock_init(&ls->ls_dirtbl[i].lock);
}
INIT_LIST_HEAD(&ls->ls_waiters);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 103a5ebd137..609108a8326 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -21,7 +21,7 @@
*
* Cluster nodes are referred to by their nodeids. nodeids are
* simply 32 bit numbers to the locking module - if they need to
- * be expanded for the cluster infrastructure then that is it's
+ * be expanded for the cluster infrastructure then that is its
* responsibility. It is this layer's
* responsibility to resolve these into IP address or
* whatever it needs for inter-node communication.
@@ -36,9 +36,9 @@
* of high load. Also, this way, the sending thread can collect together
* messages bound for one node and send them in one block.
*
- * lowcomms will choose to use wither TCP or SCTP as its transport layer
+ * lowcomms will choose to use either TCP or SCTP as its transport layer
* depending on the configuration variable 'protocol'. This should be set
- * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a
+ * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
* cluster-wide mechanism as it must be the same on all nodes of the cluster
* for the DLM to function.
*
@@ -48,11 +48,11 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <linux/pagemap.h>
-#include <linux/idr.h>
#include <linux/file.h>
#include <linux/mutex.h>
#include <linux/sctp.h>
#include <net/sctp/user.h>
+#include <net/ipv6.h>
#include "dlm_internal.h"
#include "lowcomms.h"
@@ -60,6 +60,7 @@
#include "config.h"
#define NEEDED_RMEM (4*1024*1024)
+#define CONN_HASH_SIZE 32
struct cbuf {
unsigned int base;
@@ -114,6 +115,7 @@ struct connection {
int retries;
#define MAX_CONNECT_RETRIES 3
int sctp_assoc;
+ struct hlist_node list;
struct connection *othercon;
struct work_struct rwork; /* Receive workqueue */
struct work_struct swork; /* Send workqueue */
@@ -138,14 +140,37 @@ static int dlm_local_count;
static struct workqueue_struct *recv_workqueue;
static struct workqueue_struct *send_workqueue;
-static DEFINE_IDR(connections_idr);
+static struct hlist_head connection_hash[CONN_HASH_SIZE];
static DEFINE_MUTEX(connections_lock);
-static int max_nodeid;
static struct kmem_cache *con_cache;
static void process_recv_sockets(struct work_struct *work);
static void process_send_sockets(struct work_struct *work);
+
+/* This is deliberately very simple because most clusters have simple
+ sequential nodeids, so we should be able to go straight to a connection
+ struct in the array */
+static inline int nodeid_hash(int nodeid)
+{
+ return nodeid & (CONN_HASH_SIZE-1);
+}
+
+static struct connection *__find_con(int nodeid)
+{
+ int r;
+ struct hlist_node *h;
+ struct connection *con;
+
+ r = nodeid_hash(nodeid);
+
+ hlist_for_each_entry(con, h, &connection_hash[r], list) {
+ if (con->nodeid == nodeid)
+ return con;
+ }
+ return NULL;
+}
+
/*
* If 'allocation' is zero then we don't attempt to create a new
* connection structure for this node.
@@ -154,31 +179,17 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
{
struct connection *con = NULL;
int r;
- int n;
- con = idr_find(&connections_idr, nodeid);
+ con = __find_con(nodeid);
if (con || !alloc)
return con;
- r = idr_pre_get(&connections_idr, alloc);
- if (!r)
- return NULL;
-
con = kmem_cache_zalloc(con_cache, alloc);
if (!con)
return NULL;
- r = idr_get_new_above(&connections_idr, con, nodeid, &n);
- if (r) {
- kmem_cache_free(con_cache, con);
- return NULL;
- }
-
- if (n != nodeid) {
- idr_remove(&connections_idr, n);
- kmem_cache_free(con_cache, con);
- return NULL;
- }
+ r = nodeid_hash(nodeid);
+ hlist_add_head(&con->list, &connection_hash[r]);
con->nodeid = nodeid;
mutex_init(&con->sock_mutex);
@@ -189,19 +200,30 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
/* Setup action pointers for child sockets */
if (con->nodeid) {
- struct connection *zerocon = idr_find(&connections_idr, 0);
+ struct connection *zerocon = __find_con(0);
con->connect_action = zerocon->connect_action;
if (!con->rx_action)
con->rx_action = zerocon->rx_action;
}
- if (nodeid > max_nodeid)
- max_nodeid = nodeid;
-
return con;
}
+/* Loop round all connections */
+static void foreach_conn(void (*conn_func)(struct connection *c))
+{
+ int i;
+ struct hlist_node *h, *n;
+ struct connection *con;
+
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){
+ conn_func(con);
+ }
+ }
+}
+
static struct connection *nodeid2con(int nodeid, gfp_t allocation)
{
struct connection *con;
@@ -217,14 +239,17 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
static struct connection *assoc2con(int assoc_id)
{
int i;
+ struct hlist_node *h;
struct connection *con;
mutex_lock(&connections_lock);
- for (i=0; i<=max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (con && con->sctp_assoc == assoc_id) {
- mutex_unlock(&connections_lock);
- return con;
+
+ for (i = 0 ; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry(con, h, &connection_hash[i], list) {
+ if (con && con->sctp_assoc == assoc_id) {
+ mutex_unlock(&connections_lock);
+ return con;
+ }
}
}
mutex_unlock(&connections_lock);
@@ -250,8 +275,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
} else {
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
- memcpy(&ret6->sin6_addr, &in6->sin6_addr,
- sizeof(in6->sin6_addr));
+ ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
}
return 0;
@@ -376,25 +400,23 @@ static void sctp_send_shutdown(sctp_assoc_t associd)
log_print("send EOF to node failed: %d", ret);
}
+static void sctp_init_failed_foreach(struct connection *con)
+{
+ con->sctp_assoc = 0;
+ if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
+ if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
+ queue_work(send_workqueue, &con->swork);
+ }
+}
+
/* INIT failed but we don't know which node...
restart INIT on all pending nodes */
static void sctp_init_failed(void)
{
- int i;
- struct connection *con;
-
mutex_lock(&connections_lock);
- for (i=1; i<=max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (!con)
- continue;
- con->sctp_assoc = 0;
- if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
- if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
- queue_work(send_workqueue, &con->swork);
- }
- }
- }
+
+ foreach_conn(sctp_init_failed_foreach);
+
mutex_unlock(&connections_lock);
}
@@ -1313,13 +1335,10 @@ out_connect:
static void clean_one_writequeue(struct connection *con)
{
- struct list_head *list;
- struct list_head *temp;
+ struct writequeue_entry *e, *safe;
spin_lock(&con->writequeue_lock);
- list_for_each_safe(list, temp, &con->writequeue) {
- struct writequeue_entry *e =
- list_entry(list, struct writequeue_entry, list);
+ list_for_each_entry_safe(e, safe, &con->writequeue, list) {
list_del(&e->list);
free_entry(e);
}
@@ -1369,14 +1388,7 @@ static void process_send_sockets(struct work_struct *work)
/* Discard all entries on the write queues */
static void clean_writequeues(void)
{
- int nodeid;
-
- for (nodeid = 1; nodeid <= max_nodeid; nodeid++) {
- struct connection *con = __nodeid2con(nodeid, 0);
-
- if (con)
- clean_one_writequeue(con);
- }
+ foreach_conn(clean_one_writequeue);
}
static void work_stop(void)
@@ -1406,23 +1418,29 @@ static int work_start(void)
return 0;
}
-void dlm_lowcomms_stop(void)
+static void stop_conn(struct connection *con)
{
- int i;
- struct connection *con;
+ con->flags |= 0x0F;
+ if (con->sock)
+ con->sock->sk->sk_user_data = NULL;
+}
+static void free_conn(struct connection *con)
+{
+ close_connection(con, true);
+ if (con->othercon)
+ kmem_cache_free(con_cache, con->othercon);
+ hlist_del(&con->list);
+ kmem_cache_free(con_cache, con);
+}
+
+void dlm_lowcomms_stop(void)
+{
/* Set all the flags to prevent any
socket activity.
*/
mutex_lock(&connections_lock);
- for (i = 0; i <= max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (con) {
- con->flags |= 0x0F;
- if (con->sock)
- con->sock->sk->sk_user_data = NULL;
- }
- }
+ foreach_conn(stop_conn);
mutex_unlock(&connections_lock);
work_stop();
@@ -1430,25 +1448,20 @@ void dlm_lowcomms_stop(void)
mutex_lock(&connections_lock);
clean_writequeues();
- for (i = 0; i <= max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (con) {
- close_connection(con, true);
- if (con->othercon)
- kmem_cache_free(con_cache, con->othercon);
- kmem_cache_free(con_cache, con);
- }
- }
- max_nodeid = 0;
+ foreach_conn(free_conn);
+
mutex_unlock(&connections_lock);
kmem_cache_destroy(con_cache);
- idr_init(&connections_idr);
}
int dlm_lowcomms_start(void)
{
int error = -EINVAL;
struct connection *con;
+ int i;
+
+ for (i = 0; i < CONN_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&connection_hash[i]);
init_local();
if (!dlm_local_count) {
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 065149e84f4..ebce994ab0b 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -84,7 +84,7 @@ struct dlm_lock_result32 {
static void compat_input(struct dlm_write_request *kb,
struct dlm_write_request32 *kb32,
- size_t count)
+ int namelen)
{
kb->version[0] = kb32->version[0];
kb->version[1] = kb32->version[1];
@@ -96,8 +96,7 @@ static void compat_input(struct dlm_write_request *kb,
kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
kb->i.lspace.flags = kb32->i.lspace.flags;
kb->i.lspace.minor = kb32->i.lspace.minor;
- memcpy(kb->i.lspace.name, kb32->i.lspace.name, count -
- offsetof(struct dlm_write_request32, i.lspace.name));
+ memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
} else if (kb->cmd == DLM_USER_PURGE) {
kb->i.purge.nodeid = kb32->i.purge.nodeid;
kb->i.purge.pid = kb32->i.purge.pid;
@@ -115,8 +114,7 @@ static void compat_input(struct dlm_write_request *kb,
kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
- memcpy(kb->i.lock.name, kb32->i.lock.name, count -
- offsetof(struct dlm_write_request32, i.lock.name));
+ memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
}
}
@@ -539,9 +537,16 @@ static ssize_t device_write(struct file *file, const char __user *buf,
#ifdef CONFIG_COMPAT
if (!kbuf->is64bit) {
struct dlm_write_request32 *k32buf;
+ int namelen = 0;
+
+ if (count > sizeof(struct dlm_write_request32))
+ namelen = count - sizeof(struct dlm_write_request32);
+
k32buf = (struct dlm_write_request32 *)kbuf;
- kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) -
- sizeof(struct dlm_write_request32)), GFP_KERNEL);
+
+ /* add 1 after namelen so that the name string is terminated */
+ kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
+ GFP_KERNEL);
if (!kbuf) {
kfree(k32buf);
return -ENOMEM;
@@ -549,7 +554,8 @@ static ssize_t device_write(struct file *file, const char __user *buf,
if (proc)
set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
- compat_input(kbuf, k32buf, count + 1);
+
+ compat_input(kbuf, k32buf, namelen);
kfree(k32buf);
}
#endif
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 3e5637fc377..44d725f612c 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb)
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_FREEING|I_WILL_FREE))
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
continue;
if (inode->i_mapping->nrpages == 0)
continue;
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 5e596583946..2dda5ade75b 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -89,7 +89,7 @@ static void ecryptfs_d_release(struct dentry *dentry)
return;
}
-struct dentry_operations ecryptfs_dops = {
+const struct dentry_operations ecryptfs_dops = {
.d_revalidate = ecryptfs_d_revalidate,
.d_release = ecryptfs_d_release,
};
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index ac749d4d644..064c5820e4e 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -580,7 +580,7 @@ extern const struct inode_operations ecryptfs_main_iops;
extern const struct inode_operations ecryptfs_dir_iops;
extern const struct inode_operations ecryptfs_symlink_iops;
extern const struct super_operations ecryptfs_sops;
-extern struct dentry_operations ecryptfs_dops;
+extern const struct dentry_operations ecryptfs_dops;
extern struct address_space_operations ecryptfs_aops;
extern int ecryptfs_verbosity;
extern unsigned int ecryptfs_message_buf_len;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 011b9b8c90c..c5c424f23fd 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -417,10 +417,10 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
ep_unregister_pollwait(ep, epi);
/* Remove the current item from the list of epoll hooks */
- spin_lock(&file->f_ep_lock);
+ spin_lock(&file->f_lock);
if (ep_is_linked(&epi->fllink))
list_del_init(&epi->fllink);
- spin_unlock(&file->f_ep_lock);
+ spin_unlock(&file->f_lock);
rb_erase(&epi->rbn, &ep->rbr);
@@ -538,7 +538,7 @@ void eventpoll_release_file(struct file *file)
struct epitem *epi;
/*
- * We don't want to get "file->f_ep_lock" because it is not
+ * We don't want to get "file->f_lock" because it is not
* necessary. It is not necessary because we're in the "struct file"
* cleanup path, and this means that noone is using this file anymore.
* So, for example, epoll_ctl() cannot hit here sicne if we reach this
@@ -547,6 +547,8 @@ void eventpoll_release_file(struct file *file)
* will correctly serialize the operation. We do need to acquire
* "ep->mtx" after "epmutex" because ep_remove() requires it when called
* from anywhere but ep_free().
+ *
+ * Besides, ep_remove() acquires the lock, so we can't hold it here.
*/
mutex_lock(&epmutex);
@@ -785,9 +787,9 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
goto error_unregister;
/* Add the current item to the list of active epoll hook for this file */
- spin_lock(&tfile->f_ep_lock);
+ spin_lock(&tfile->f_lock);
list_add_tail(&epi->fllink, &tfile->f_ep_links);
- spin_unlock(&tfile->f_ep_lock);
+ spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 4a29d637608..7f8d2e5a7ea 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -570,7 +570,7 @@ do_more:
error_return:
brelse(bitmap_bh);
release_blocks(sb, freed);
- DQUOT_FREE_BLOCK(inode, freed);
+ vfs_dq_free_block(inode, freed);
}
/**
@@ -1247,7 +1247,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
/*
* Check quota for allocation of this block.
*/
- if (DQUOT_ALLOC_BLOCK(inode, num)) {
+ if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
return 0;
}
@@ -1409,7 +1409,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
- DQUOT_FREE_BLOCK(inode, *count-num);
+ vfs_dq_free_block(inode, *count-num);
*count = num;
return ret_block;
@@ -1420,7 +1420,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
- DQUOT_FREE_BLOCK(inode, *count);
+ vfs_dq_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 66321a877e7..15387c9c17d 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode)
if (!is_bad_inode(inode)) {
/* Quota is already initialized in iput() */
ext2_xattr_delete_inode(inode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
}
es = EXT2_SB(sb)->s_es;
@@ -586,7 +586,7 @@ got:
goto fail_drop;
}
- if (DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto fail_drop;
}
@@ -605,10 +605,10 @@ got:
return inode;
fail_free_drop:
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
fail_drop:
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 23fff2f8778..b43b9556366 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1444,7 +1444,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
return error;
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
- error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0;
+ error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
if (error)
return error;
}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7c6e3606f0e..f983225266d 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1331,6 +1331,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
sb->s_blocksize - offset : toread;
tmp_bh.b_state = 0;
+ tmp_bh.b_size = sb->s_blocksize;
err = ext2_get_block(inode, blk, &tmp_bh, 0);
if (err < 0)
return err;
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 987a5261cc2..7913531ec6d 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -642,7 +642,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
ea_bdebug(new_bh, "reusing block");
error = -EDQUOT;
- if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+ if (vfs_dq_alloc_block(inode, 1)) {
unlock_buffer(new_bh);
goto cleanup;
}
@@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
* as if nothing happened and cleanup the unused block */
if (error && error != -ENOSPC) {
if (new_bh && new_bh != old_bh)
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
goto cleanup;
}
} else
@@ -731,7 +731,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
le32_to_cpu(HDR(old_bh)->h_refcount));
@@ -794,7 +794,7 @@ ext2_xattr_delete_inode(struct inode *inode)
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 0dbf1c04847..225202db897 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
}
ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
if (dquot_freed_blocks)
- DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+ vfs_dq_free_block(inode, dquot_freed_blocks);
return;
}
@@ -1502,7 +1502,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
/*
* Check quota for allocation of this block.
*/
- if (DQUOT_ALLOC_BLOCK(inode, num)) {
+ if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
return 0;
}
@@ -1714,7 +1714,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
- DQUOT_FREE_BLOCK(inode, *count-num);
+ vfs_dq_free_block(inode, *count-num);
*count = num;
return ret_block;
@@ -1729,7 +1729,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
- DQUOT_FREE_BLOCK(inode, *count);
+ vfs_dq_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 8de6c720e51..dd13d60d524 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
ext3_xattr_delete_inode(handle, inode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@@ -589,7 +589,7 @@ got:
sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
ret = inode;
- if(DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto fail_drop;
}
@@ -620,10 +620,10 @@ really_out:
return ret;
fail_free_drop:
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
fail_drop:
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 5fa453b49a6..4a09ff16987 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1435,6 +1435,10 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
return 0;
}
+static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
+{
+ return !buffer_mapped(bh);
+}
/*
* Note that we always start a transaction even if we're not journalling
* data. This is to preserve ordering: any hole instantiation within
@@ -1505,6 +1509,15 @@ static int ext3_ordered_writepage(struct page *page,
if (ext3_journal_current_handle())
goto out_fail;
+ if (!page_has_buffers(page)) {
+ create_empty_buffers(page, inode->i_sb->s_blocksize,
+ (1 << BH_Dirty)|(1 << BH_Uptodate));
+ } else if (!walk_page_buffers(NULL, page_buffers(page), 0, PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
+ /* Provide NULL instead of get_block so that we catch bugs if buffers weren't really mapped */
+ return block_write_full_page(page, NULL, wbc);
+ }
+ page_bufs = page_buffers(page);
+
handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
@@ -1512,11 +1525,6 @@ static int ext3_ordered_writepage(struct page *page,
goto out_fail;
}
- if (!page_has_buffers(page)) {
- create_empty_buffers(page, inode->i_sb->s_blocksize,
- (1 << BH_Dirty)|(1 << BH_Uptodate));
- }
- page_bufs = page_buffers(page);
walk_page_buffers(handle, page_bufs, 0,
PAGE_CACHE_SIZE, NULL, bget_one);
@@ -3055,7 +3063,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
- error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+ error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
if (error) {
ext3_journal_stop(handle);
return error;
@@ -3146,7 +3154,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
ret = 2 * (bpp + indirects) + 2;
#ifdef CONFIG_QUOTA
- /* We know that structure was already allocated during DQUOT_INIT so
+ /* We know that structure was already allocated during vfs_dq_init so
* we will be updating only the data blocks + inodes */
ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
#endif
@@ -3237,7 +3245,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
- * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * Also, vfs_dq_alloc_space() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 4db4ffa1eda..e2fc63cbba8 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -2049,7 +2049,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
- DQUOT_INIT(dentry->d_inode);
+ vfs_dq_init(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2108,7 +2108,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
- DQUOT_INIT(dentry->d_inode);
+ vfs_dq_init(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2272,7 +2272,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
- DQUOT_INIT(new_dentry->d_inode);
+ vfs_dq_init(new_dentry->d_inode);
handle = ext3_journal_start(old_dir, 2 *
EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 4a970411a45..9e5b8e387e1 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -707,8 +707,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
-static int ext3_dquot_initialize(struct inode *inode, int type);
-static int ext3_dquot_drop(struct inode *inode);
static int ext3_write_dquot(struct dquot *dquot);
static int ext3_acquire_dquot(struct dquot *dquot);
static int ext3_release_dquot(struct dquot *dquot);
@@ -723,8 +721,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static struct dquot_operations ext3_quota_operations = {
- .initialize = ext3_dquot_initialize,
- .drop = ext3_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
@@ -1438,7 +1436,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
}
list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
if (inode->i_nlink) {
printk(KERN_DEBUG
"%s: truncating inode %lu to %Ld bytes\n",
@@ -2702,7 +2700,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
* Process 1 Process 2
* ext3_create() quota_sync()
* journal_start() write_dquot()
- * DQUOT_INIT() down(dqio_mutex)
+ * vfs_dq_init() down(dqio_mutex)
* down(dqio_mutex) journal_start()
*
*/
@@ -2714,44 +2712,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
}
-static int ext3_dquot_initialize(struct inode *inode, int type)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may create quota structure so we need to reserve enough blocks */
- handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ret = dquot_initialize(inode, type);
- err = ext3_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
-static int ext3_dquot_drop(struct inode *inode)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may delete quota structure so we need to reserve enough blocks */
- handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (IS_ERR(handle)) {
- /*
- * We call dquot_drop() anyway to at least release references
- * to quota structures so that umount does not hang.
- */
- dquot_drop(inode);
- return PTR_ERR(handle);
- }
- ret = dquot_drop(inode);
- err = ext3_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
static int ext3_write_dquot(struct dquot *dquot)
{
int ret, err;
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 175414ac221..83b7be849bd 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -498,7 +498,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext3_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode))
handle->h_sync = 1;
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
@@ -774,7 +774,7 @@ inserted:
/* The old block is released after updating
the inode. */
error = -EDQUOT;
- if (DQUOT_ALLOC_BLOCK(inode, 1))
+ if (vfs_dq_alloc_block(inode, 1))
goto cleanup;
error = ext3_journal_get_write_access(handle,
new_bh);
@@ -848,7 +848,7 @@ cleanup:
return error;
cleanup_dquot:
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
goto cleanup;
bad_block:
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index de9459b4cb9..38f40d55899 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -536,7 +536,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
ext4_mb_free_blocks(handle, inode, block, count,
metadata, &dquot_freed_blocks);
if (dquot_freed_blocks)
- DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+ vfs_dq_free_block(inode, dquot_freed_blocks);
return;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b0c87dce66a..6083bb38057 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -20,6 +20,7 @@
#include <linux/blkdev.h>
#include <linux/magic.h>
#include <linux/jbd2.h>
+#include <linux/quota.h>
#include "ext4_i.h"
/*
@@ -1098,6 +1099,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+extern qsize_t ext4_get_reserved_space(struct inode *inode);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2d2b3585ee9..fb51b40e3e8 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -220,10 +220,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
ext4_xattr_delete_inode(handle, inode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@@ -915,7 +915,7 @@ got:
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
ret = inode;
- if (DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto fail_drop;
}
@@ -956,10 +956,10 @@ really_out:
return ret;
fail_free_drop:
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
fail_drop:
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c7fed5b1874..71d3ecd5db7 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -975,6 +975,17 @@ out:
return err;
}
+qsize_t ext4_get_reserved_space(struct inode *inode)
+{
+ unsigned long long total;
+
+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+ total = EXT4_I(inode)->i_reserved_data_blocks +
+ EXT4_I(inode)->i_reserved_meta_blocks;
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ return total;
+}
/*
* Calculate the number of metadata blocks need to reserve
* to allocate @blocks for non extent file based file
@@ -1036,8 +1047,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
/* update per-inode reservations */
BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
EXT4_I(inode)->i_reserved_data_blocks -= used;
-
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ /*
+ * free those over-booking quota for metadata blocks
+ */
+
+ if (mdb_free)
+ vfs_dq_release_reservation_block(inode, mdb_free);
}
/*
@@ -1553,8 +1570,8 @@ static int ext4_journalled_write_end(struct file *file,
static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
{
int retries = 0;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned long md_needed, mdblocks, total = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ unsigned long md_needed, mdblocks, total = 0;
/*
* recalculate the amount of metadata blocks to reserve
@@ -1570,12 +1587,23 @@ repeat:
md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
total = md_needed + nrblocks;
+ /*
+ * Make quota reservation here to prevent quota overflow
+ * later. Real quota accounting is done at pages writeout
+ * time.
+ */
+ if (vfs_dq_reserve_block(inode, total)) {
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ return -EDQUOT;
+ }
+
if (ext4_claim_free_blocks(sbi, total)) {
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
}
+ vfs_dq_release_reservation_block(inode, total);
return -ENOSPC;
}
EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -1629,6 +1657,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
EXT4_I(inode)->i_reserved_meta_blocks = mdb;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ vfs_dq_release_reservation_block(inode, release);
}
static void ext4_da_page_release_reservation(struct page *page,
@@ -4612,7 +4642,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
- error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+ error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
if (error) {
ext4_journal_stop(handle);
return error;
@@ -4991,7 +5021,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
- * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9f61e62f435..b038188bd03 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3086,9 +3086,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
/* release all the reserved blocks if non delalloc */
percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
- else
+ else {
percpu_counter_sub(&sbi->s_dirtyblocks_counter,
ac->ac_b_ex.fe_len);
+ /* convert reserved quota blocks to real quota blocks */
+ vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
+ }
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -4544,7 +4547,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
struct ext4_sb_info *sbi;
struct super_block *sb;
ext4_fsblk_t block = 0;
- unsigned int inquota;
+ unsigned int inquota = 0;
unsigned int reserv_blks = 0;
sb = ar->inode->i_sb;
@@ -4562,9 +4565,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
(unsigned long long) ar->pleft,
(unsigned long long) ar->pright);
- if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
- /*
- * With delalloc we already reserved the blocks
+ /*
+ * For delayed allocation, we could skip the ENOSPC and
+ * EDQUOT check, as blocks and quotas have been already
+ * reserved when data being copied into pagecache.
+ */
+ if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+ ar->flags |= EXT4_MB_DELALLOC_RESERVED;
+ else {
+ /* Without delayed allocation we need to verify
+ * there is enough free blocks to do block allocation
+ * and verify allocation doesn't exceed the quota limits.
*/
while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
/* let others to free the space */
@@ -4576,19 +4587,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
return 0;
}
reserv_blks = ar->len;
+ while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
+ ar->flags |= EXT4_MB_HINT_NOPREALLOC;
+ ar->len--;
+ }
+ inquota = ar->len;
+ if (ar->len == 0) {
+ *errp = -EDQUOT;
+ goto out3;
+ }
}
- while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
- ar->flags |= EXT4_MB_HINT_NOPREALLOC;
- ar->len--;
- }
- if (ar->len == 0) {
- *errp = -EDQUOT;
- goto out3;
- }
- inquota = ar->len;
-
- if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
- ar->flags |= EXT4_MB_DELALLOC_RESERVED;
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (!ac) {
@@ -4654,8 +4662,8 @@ repeat:
out2:
kmem_cache_free(ext4_ac_cachep, ac);
out1:
- if (ar->len < inquota)
- DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
+ if (inquota && ar->len < inquota)
+ vfs_dq_free_block(ar->inode, inquota - ar->len);
out3:
if (!ar->len) {
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index ba702bd7910..83410244d3e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2092,7 +2092,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
- DQUOT_INIT(dentry->d_inode);
+ vfs_dq_init(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2151,7 +2151,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
- DQUOT_INIT(dentry->d_inode);
+ vfs_dq_init(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2318,7 +2318,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
- DQUOT_INIT(new_dentry->d_inode);
+ vfs_dq_init(new_dentry->d_inode);
handle = ext4_journal_start(old_dir, 2 *
EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 39d1993cfa1..f7371a6a923 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -926,8 +926,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_
#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
-static int ext4_dquot_initialize(struct inode *inode, int type);
-static int ext4_dquot_drop(struct inode *inode);
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
@@ -942,9 +940,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static struct dquot_operations ext4_quota_operations = {
- .initialize = ext4_dquot_initialize,
- .drop = ext4_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
+ .reserve_space = dquot_reserve_space,
+ .claim_space = dquot_claim_space,
+ .release_rsv = dquot_release_reserved_space,
+ .get_reserved_space = ext4_get_reserved_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
@@ -1802,7 +1804,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
}
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
if (inode->i_nlink) {
printk(KERN_DEBUG
"%s: truncating inode %lu to %lld bytes\n",
@@ -3367,8 +3369,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
* is locked for write. Otherwise the are possible deadlocks:
* Process 1 Process 2
* ext4_create() quota_sync()
- * jbd2_journal_start() write_dquot()
- * DQUOT_INIT() down(dqio_mutex)
+ * jbd2_journal_start() write_dquot()
+ * vfs_dq_init() down(dqio_mutex)
* down(dqio_mutex) jbd2_journal_start()
*
*/
@@ -3380,44 +3382,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
}
-static int ext4_dquot_initialize(struct inode *inode, int type)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may create quota structure so we need to reserve enough blocks */
- handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ret = dquot_initialize(inode, type);
- err = ext4_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
-static int ext4_dquot_drop(struct inode *inode)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may delete quota structure so we need to reserve enough blocks */
- handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (IS_ERR(handle)) {
- /*
- * We call dquot_drop() anyway to at least release references
- * to quota structures so that umount does not hang.
- */
- dquot_drop(inode);
- return PTR_ERR(handle);
- }
- ret = dquot_drop(inode);
- err = ext4_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
static int ext4_write_dquot(struct dquot *dquot)
{
int ret, err;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 157ce6589c5..62b31c24699 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -490,7 +490,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext4_handle_dirty_metadata(handle, inode, bh);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
@@ -784,7 +784,7 @@ inserted:
/* The old block is released after updating
the inode. */
error = -EDQUOT;
- if (DQUOT_ALLOC_BLOCK(inode, 1))
+ if (vfs_dq_alloc_block(inode, 1))
goto cleanup;
error = ext4_journal_get_write_access(handle,
new_bh);
@@ -860,7 +860,7 @@ cleanup:
return error;
cleanup_dquot:
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
goto cleanup;
bad_block:
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 7ba03a4acbe..da3f361a37d 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -188,7 +188,7 @@ old_compare:
goto out;
}
-static struct dentry_operations msdos_dentry_operations = {
+static const struct dentry_operations msdos_dentry_operations = {
.d_hash = msdos_hash,
.d_compare = msdos_cmp,
};
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 8ae32e37673..a0e00e3a46e 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -166,13 +166,13 @@ static int vfat_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b)
return 1;
}
-static struct dentry_operations vfat_ci_dentry_ops = {
+static const struct dentry_operations vfat_ci_dentry_ops = {
.d_revalidate = vfat_revalidate_ci,
.d_hash = vfat_hashi,
.d_compare = vfat_cmpi,
};
-static struct dentry_operations vfat_dentry_ops = {
+static const struct dentry_operations vfat_dentry_ops = {
.d_revalidate = vfat_revalidate,
.d_hash = vfat_hash,
.d_compare = vfat_cmp,
diff --git a/fs/fcntl.c b/fs/fcntl.c
index bd215cc791d..d865ca66ccb 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -141,7 +141,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
return ret;
}
-#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
+#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
static int setfl(int fd, struct file * filp, unsigned long arg)
{
@@ -177,21 +177,21 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
return error;
/*
- * We still need a lock here for now to keep multiple FASYNC calls
- * from racing with each other.
+ * ->fasync() is responsible for setting the FASYNC bit.
*/
- lock_kernel();
- if ((arg ^ filp->f_flags) & FASYNC) {
- if (filp->f_op && filp->f_op->fasync) {
- error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
- if (error < 0)
- goto out;
- }
+ if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op &&
+ filp->f_op->fasync) {
+ error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
+ if (error < 0)
+ goto out;
+ if (error > 0)
+ error = 0;
}
-
+ spin_lock(&filp->f_lock);
filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
+ spin_unlock(&filp->f_lock);
+
out:
- unlock_kernel();
return error;
}
@@ -516,7 +516,7 @@ static DEFINE_RWLOCK(fasync_lock);
static struct kmem_cache *fasync_cache __read_mostly;
/*
- * fasync_helper() is used by some character device drivers (mainly mice)
+ * fasync_helper() is used by almost all character device drivers
* to set up the fasync queue. It returns negative on error, 0 if it did
* no changes and positive if it added/deleted the entry.
*/
@@ -555,6 +555,13 @@ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fap
result = 1;
}
out:
+ /* Fix up FASYNC bit while still holding fasync_lock */
+ spin_lock(&filp->f_lock);
+ if (on)
+ filp->f_flags |= FASYNC;
+ else
+ filp->f_flags &= ~FASYNC;
+ spin_unlock(&filp->f_lock);
write_unlock_irq(&fasync_lock);
return result;
}
diff --git a/fs/file_table.c b/fs/file_table.c
index da806aceae3..b74a8e1da91 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -128,6 +128,7 @@ struct file *get_empty_filp(void)
atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
f->f_cred = get_cred(cred);
+ spin_lock_init(&f->f_lock);
eventpoll_init_file(f);
/* f->f_version: 0 */
return f;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fdff346e96f..06da05261e0 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -224,7 +224,7 @@ static int invalid_nodeid(u64 nodeid)
return !nodeid || nodeid == FUSE_ROOT_ID;
}
-struct dentry_operations fuse_dentry_operations = {
+const struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
};
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5e64b815a5a..6fc5aedaa0d 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -493,7 +493,7 @@ static inline u64 get_node_id(struct inode *inode)
/** Device operations */
extern const struct file_operations fuse_dev_operations;
-extern struct dentry_operations fuse_dentry_operations;
+extern const struct dentry_operations fuse_dentry_operations;
/**
* Get a filled in inode
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 5eb57b04438..022c66cd560 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -107,7 +107,7 @@ static int gfs2_dhash(struct dentry *dentry, struct qstr *str)
return 0;
}
-struct dentry_operations gfs2_dops = {
+const struct dentry_operations gfs2_dops = {
.d_revalidate = gfs2_drevalidate,
.d_hash = gfs2_dhash,
};
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 91abdbedcc8..b56413e3e40 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -49,7 +49,7 @@ extern struct file_system_type gfs2_fs_type;
extern struct file_system_type gfs2meta_fs_type;
extern const struct export_operations gfs2_export_ops;
extern const struct super_operations gfs2_super_ops;
-extern struct dentry_operations gfs2_dops;
+extern const struct dentry_operations gfs2_dops;
#endif /* __SUPER_DOT_H__ */
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 9955232fdf8..052387e1167 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -213,7 +213,7 @@ extern void hfs_mdb_put(struct super_block *);
extern int hfs_part_find(struct super_block *, sector_t *, sector_t *);
/* string.c */
-extern struct dentry_operations hfs_dentry_operations;
+extern const struct dentry_operations hfs_dentry_operations;
extern int hfs_hash_dentry(struct dentry *, struct qstr *);
extern int hfs_strcmp(const unsigned char *, unsigned int,
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 5bf89ec01cd..7478f5c219a 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -31,7 +31,7 @@ static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
return 1;
}
-struct dentry_operations hfs_dentry_operations =
+const struct dentry_operations hfs_dentry_operations =
{
.d_revalidate = hfs_revalidate_dentry,
.d_hash = hfs_hash_dentry,
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index f027a905225..5c10d803d9d 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -327,7 +327,7 @@ void hfsplus_file_truncate(struct inode *);
/* inode.c */
extern const struct address_space_operations hfsplus_aops;
extern const struct address_space_operations hfsplus_btree_aops;
-extern struct dentry_operations hfsplus_dentry_operations;
+extern const struct dentry_operations hfsplus_dentry_operations;
void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *);
void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index f105ee9e1cc..1bcf597c056 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -137,7 +137,7 @@ const struct address_space_operations hfsplus_aops = {
.writepages = hfsplus_writepages,
};
-struct dentry_operations hfsplus_dentry_operations = {
+const struct dentry_operations hfsplus_dentry_operations = {
.d_hash = hfsplus_hash_dentry,
.d_compare = hfsplus_compare_dentry,
};
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 5c538e0ec14..fe02ad4740e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -31,12 +31,12 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode)
-int hostfs_d_delete(struct dentry *dentry)
+static int hostfs_d_delete(struct dentry *dentry)
{
return 1;
}
-struct dentry_operations hostfs_dentry_ops = {
+static const struct dentry_operations hostfs_dentry_ops = {
.d_delete = hostfs_d_delete,
};
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 08319126b2a..940d6d150be 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -49,7 +49,7 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst
return 0;
}
-static struct dentry_operations hpfs_dentry_operations = {
+static const struct dentry_operations hpfs_dentry_operations = {
.d_hash = hpfs_hash_dentry,
.d_compare = hpfs_compare_dentry,
};
diff --git a/fs/inode.c b/fs/inode.c
index 643ac43e5a5..d06d6d268de 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -294,7 +294,7 @@ void clear_inode(struct inode *inode)
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
inode_sync_wait(inode);
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
if (inode->i_sb->s_op->clear_inode)
inode->i_sb->s_op->clear_inode(inode);
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
@@ -366,6 +366,8 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
if (tmp == head)
break;
inode = list_entry(tmp, struct inode, i_sb_list);
+ if (inode->i_state & I_NEW)
+ continue;
invalidate_inode_buffers(inode);
if (!atomic_read(&inode->i_count)) {
list_move(&inode->i_list, dispose);
@@ -1168,7 +1170,7 @@ void generic_delete_inode(struct inode *inode)
if (op->delete_inode) {
void (*delete)(struct inode *) = op->delete_inode;
if (!is_bad_inode(inode))
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
/* Filesystems implementing their own
* s_op->delete_inode are required to call
* truncate_inode_pages and clear_inode()
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 240ec63984c..ac2d47e4392 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -404,10 +404,12 @@ static int ioctl_fionbio(struct file *filp, int __user *argp)
if (O_NONBLOCK != O_NDELAY)
flag |= O_NDELAY;
#endif
+ spin_lock(&filp->f_lock);
if (on)
filp->f_flags |= flag;
else
filp->f_flags &= ~flag;
+ spin_unlock(&filp->f_lock);
return error;
}
@@ -425,18 +427,12 @@ static int ioctl_fioasync(unsigned int fd, struct file *filp,
/* Did FASYNC state change ? */
if ((flag ^ filp->f_flags) & FASYNC) {
if (filp->f_op && filp->f_op->fasync)
+ /* fasync() adjusts filp->f_flags */
error = filp->f_op->fasync(fd, filp, on);
else
error = -ENOTTY;
}
- if (error)
- return error;
-
- if (on)
- filp->f_flags |= FASYNC;
- else
- filp->f_flags &= ~FASYNC;
- return error;
+ return error < 0 ? error : 0;
}
static int ioctl_fsfreeze(struct file *filp)
@@ -499,17 +495,11 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
break;
case FIONBIO:
- /* BKL needed to avoid races tweaking f_flags */
- lock_kernel();
error = ioctl_fionbio(filp, argp);
- unlock_kernel();
break;
case FIOASYNC:
- /* BKL needed to avoid races tweaking f_flags */
- lock_kernel();
error = ioctl_fioasync(fd, filp, argp);
- unlock_kernel();
break;
case FIOQSIZE:
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 6147ec3643a..13d2eddd069 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -114,7 +114,7 @@ static const struct super_operations isofs_sops = {
};
-static struct dentry_operations isofs_dentry_ops[] = {
+static const struct dentry_operations isofs_dentry_ops[] = {
{
.d_hash = isofs_hash,
.d_compare = isofs_dentry_cmp,
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index d3e5c33665d..a166c1669e8 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -233,7 +233,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
- if (DQUOT_TRANSFER(inode, iattr))
+ if (vfs_dq_transfer(inode, iattr))
return -EDQUOT;
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index b00ee9f05a0..b2ae190a77b 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -158,9 +158,9 @@ void jfs_delete_inode(struct inode *inode)
/*
* Free the inode from the quota allocation.
*/
- DQUOT_INIT(inode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_init(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
}
clear_inode(inode);
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 4dcc0581999..925871e9887 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* It's time to move the inline table to an external
* page and begin to build the xtree
*/
- if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage))
+ if (vfs_dq_alloc_block(ip, sbi->nbperpage))
goto clean_up;
if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
- DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+ vfs_dq_free_block(ip, sbi->nbperpage);
goto clean_up;
}
@@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
dbFree(ip, xaddr, sbi->nbperpage);
- DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+ vfs_dq_free_block(ip, sbi->nbperpage);
goto clean_up;
}
ip->i_size = PSIZE;
@@ -1027,7 +1027,7 @@ static int dtSplitUp(tid_t tid,
n = xlen;
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, n)) {
+ if (vfs_dq_alloc_block(ip, n)) {
rc = -EDQUOT;
goto extendOut;
}
@@ -1308,7 +1308,7 @@ static int dtSplitUp(tid_t tid,
/* Rollback quota allocation */
if (rc && quota_allocation)
- DQUOT_FREE_BLOCK(ip, quota_allocation);
+ vfs_dq_free_block(ip, quota_allocation);
dtSplitUp_Exit:
@@ -1369,7 +1369,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
return -EIO;
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
@@ -1916,7 +1916,7 @@ static int dtSplitRoot(tid_t tid,
rp = rmp->data;
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
@@ -2287,7 +2287,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&fp->header.self);
/* Free quota allocation. */
- DQUOT_FREE_BLOCK(ip, xlen);
+ vfs_dq_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(fmp);
@@ -2363,7 +2363,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&p->header.self);
/* Free quota allocation */
- DQUOT_FREE_BLOCK(ip, xlen);
+ vfs_dq_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(mp);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 7ae1e3281de..169802ea07f 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -141,7 +141,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
}
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+ if (vfs_dq_alloc_block(ip, nxlen)) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
@@ -164,7 +164,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
*/
if (rc) {
dbFree(ip, nxaddr, nxlen);
- DQUOT_FREE_BLOCK(ip, nxlen);
+ vfs_dq_free_block(ip, nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return (rc);
}
@@ -256,7 +256,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
goto exit;
/* Allocat blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+ if (vfs_dq_alloc_block(ip, nxlen)) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
@@ -297,7 +297,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
/* extend the extent */
if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
dbFree(ip, xaddr + xlen, delta);
- DQUOT_FREE_BLOCK(ip, nxlen);
+ vfs_dq_free_block(ip, nxlen);
goto exit;
}
} else {
@@ -308,7 +308,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
*/
if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
dbFree(ip, nxaddr, nxlen);
- DQUOT_FREE_BLOCK(ip, nxlen);
+ vfs_dq_free_block(ip, nxlen);
goto exit;
}
}
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index d4d142c2edd..dc0e02159ac 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -116,7 +116,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
/*
* Allocate inode to quota.
*/
- if (DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
rc = -EDQUOT;
goto fail_drop;
}
@@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
return inode;
fail_drop:
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
fail_unlock:
inode->i_nlink = 0;
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index adb2fafcc54..1eff7db34d6 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -47,5 +47,5 @@ extern const struct file_operations jfs_dir_operations;
extern const struct inode_operations jfs_file_inode_operations;
extern const struct file_operations jfs_file_operations;
extern const struct inode_operations jfs_symlink_inode_operations;
-extern struct dentry_operations jfs_ci_dentry_operations;
+extern const struct dentry_operations jfs_ci_dentry_operations;
#endif /* _H_JFS_INODE */
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index ae3acafb447..a27e26c9056 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -846,10 +846,10 @@ int xtInsert(tid_t tid, /* transaction id */
hint = addressXAD(xad) + lengthXAD(xad) - 1;
} else
hint = 0;
- if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen)))
+ if ((rc = vfs_dq_alloc_block(ip, xlen)))
goto out;
if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
- DQUOT_FREE_BLOCK(ip, xlen);
+ vfs_dq_free_block(ip, xlen);
goto out;
}
}
@@ -878,7 +878,7 @@ int xtInsert(tid_t tid, /* transaction id */
/* undo data extent allocation */
if (*xaddrp == 0) {
dbFree(ip, xaddr, (s64) xlen);
- DQUOT_FREE_BLOCK(ip, xlen);
+ vfs_dq_free_block(ip, xlen);
}
return rc;
}
@@ -1246,7 +1246,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
rbn = addressPXD(pxd);
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = -EDQUOT;
goto clean_up;
}
@@ -1456,7 +1456,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
/* Rollback quota allocation. */
if (quota_allocation)
- DQUOT_FREE_BLOCK(ip, quota_allocation);
+ vfs_dq_free_block(ip, quota_allocation);
return (rc);
}
@@ -1513,7 +1513,7 @@ xtSplitRoot(tid_t tid,
return -EIO;
/* Allocate blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
release_metapage(rmp);
return -EDQUOT;
}
@@ -3941,7 +3941,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
ip->i_size = newsize;
/* update quota allocation to reflect freed blocks */
- DQUOT_FREE_BLOCK(ip, nfreed);
+ vfs_dq_free_block(ip, nfreed);
/*
* free tlock of invalidated pages
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index b4de56b851e..514ee2edb92 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -35,7 +35,7 @@
/*
* forward references
*/
-struct dentry_operations jfs_ci_dentry_operations;
+const struct dentry_operations jfs_ci_dentry_operations;
static s64 commitZeroLink(tid_t, struct inode *);
@@ -356,7 +356,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
- DQUOT_INIT(ip);
+ vfs_dq_init(ip);
/* directory must be empty to be removed */
if (!dtEmpty(ip)) {
@@ -483,7 +483,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
- DQUOT_INIT(ip);
+ vfs_dq_init(ip);
if ((rc = get_UCSname(&dname, dentry)))
goto out;
@@ -1136,7 +1136,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
} else if (new_ip) {
IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
/* Init inode for quota operations. */
- DQUOT_INIT(new_ip);
+ vfs_dq_init(new_ip);
}
/*
@@ -1595,7 +1595,7 @@ out:
return result;
}
-struct dentry_operations jfs_ci_dentry_operations =
+const struct dentry_operations jfs_ci_dentry_operations =
{
.d_hash = jfs_ci_hash,
.d_compare = jfs_ci_compare,
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 9b7f2cdaae0..61dfa8173eb 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
/* Allocate new blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(ip, nblocks)) {
+ if (vfs_dq_alloc_block(ip, nblocks)) {
return -EDQUOT;
}
rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
if (rc) {
/*Rollback quota allocation. */
- DQUOT_FREE_BLOCK(ip, nblocks);
+ vfs_dq_free_block(ip, nblocks);
return rc;
}
@@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
failed:
/* Rollback quota allocation. */
- DQUOT_FREE_BLOCK(ip, nblocks);
+ vfs_dq_free_block(ip, nblocks);
dbFree(ip, blkno, nblocks);
return rc;
@@ -538,7 +538,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (blocks_needed > current_blocks) {
/* Allocate new blocks to quota. */
- if (DQUOT_ALLOC_BLOCK(inode, blocks_needed))
+ if (vfs_dq_alloc_block(inode, blocks_needed))
return -EDQUOT;
quota_allocation = blocks_needed;
@@ -602,7 +602,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
clean_up:
/* Rollback quota allocation */
if (quota_allocation)
- DQUOT_FREE_BLOCK(inode, quota_allocation);
+ vfs_dq_free_block(inode, quota_allocation);
return (rc);
}
@@ -677,7 +677,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
/* If old blocks exist, they must be removed from quota allocation. */
if (old_blocks)
- DQUOT_FREE_BLOCK(inode, old_blocks);
+ vfs_dq_free_block(inode, old_blocks);
inode->i_ctime = CURRENT_TIME;
diff --git a/fs/libfs.c b/fs/libfs.c
index 49b44099dab..4910a36f516 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -44,7 +44,7 @@ static int simple_delete_dentry(struct dentry *dentry)
*/
struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
- static struct dentry_operations simple_dentry_operations = {
+ static const struct dentry_operations simple_dentry_operations = {
.d_delete = simple_delete_dentry,
};
@@ -242,7 +242,8 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name,
d_instantiate(dentry, root);
s->s_root = dentry;
s->s_flags |= MS_ACTIVE;
- return simple_set_mnt(mnt, s);
+ simple_set_mnt(mnt, s);
+ return 0;
Enomem:
up_write(&s->s_umount);
diff --git a/fs/namei.c b/fs/namei.c
index 199317642ad..d040ce11785 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1473,7 +1473,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->create(dir, dentry, mode, nd);
if (!error)
fsnotify_create(dir, dentry);
@@ -1489,24 +1489,22 @@ int may_open(struct path *path, int acc_mode, int flag)
if (!inode)
return -ENOENT;
- if (S_ISLNK(inode->i_mode))
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFLNK:
return -ELOOP;
-
- if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE))
- return -EISDIR;
-
- /*
- * FIFO's, sockets and device files are special: they don't
- * actually live on the filesystem itself, and as such you
- * can write to them even if the filesystem is read-only.
- */
- if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- flag &= ~O_TRUNC;
- } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
+ case S_IFDIR:
+ if (acc_mode & MAY_WRITE)
+ return -EISDIR;
+ break;
+ case S_IFBLK:
+ case S_IFCHR:
if (path->mnt->mnt_flags & MNT_NODEV)
return -EACCES;
-
+ /*FALLTHRU*/
+ case S_IFIFO:
+ case S_IFSOCK:
flag &= ~O_TRUNC;
+ break;
}
error = inode_permission(inode, acc_mode);
@@ -1552,7 +1550,7 @@ int may_open(struct path *path, int acc_mode, int flag)
error = security_path_truncate(path, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
if (!error) {
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
error = do_truncate(dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
@@ -1563,7 +1561,7 @@ int may_open(struct path *path, int acc_mode, int flag)
return error;
} else
if (flag & FMODE_WRITE)
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
return 0;
}
@@ -1946,7 +1944,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
if (error)
return error;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
@@ -2045,7 +2043,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (error)
return error;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->mkdir(dir, dentry, mode);
if (!error)
fsnotify_mkdir(dir, dentry);
@@ -2131,7 +2129,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
if (!dir->i_op->rmdir)
return -EPERM;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
mutex_lock(&dentry->d_inode->i_mutex);
dentry_unhash(dentry);
@@ -2218,7 +2216,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
if (!dir->i_op->unlink)
return -EPERM;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
mutex_lock(&dentry->d_inode->i_mutex);
if (d_mountpoint(dentry))
@@ -2329,7 +2327,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
if (error)
return error;
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->symlink(dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
@@ -2413,7 +2411,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
return error;
mutex_lock(&inode->i_mutex);
- DQUOT_INIT(dir);
+ vfs_dq_init(dir);
error = dir->i_op->link(old_dentry, dir, new_dentry);
mutex_unlock(&inode->i_mutex);
if (!error)
@@ -2612,8 +2610,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old_dir->i_op->rename)
return -EPERM;
- DQUOT_INIT(old_dir);
- DQUOT_INIT(new_dir);
+ vfs_dq_init(old_dir);
+ vfs_dq_init(new_dir);
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
diff --git a/fs/namespace.c b/fs/namespace.c
index f0e75309735..0a42e0e9602 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -397,11 +397,10 @@ static void __mnt_unmake_readonly(struct vfsmount *mnt)
spin_unlock(&vfsmount_lock);
}
-int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
+void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
{
mnt->mnt_sb = sb;
mnt->mnt_root = dget(sb->s_root);
- return 0;
}
EXPORT_SYMBOL(simple_set_mnt);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 07e9715b865..9c590722d87 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -79,7 +79,7 @@ static int ncp_hash_dentry(struct dentry *, struct qstr *);
static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *);
static int ncp_delete_dentry(struct dentry *);
-static struct dentry_operations ncp_dentry_operations =
+static const struct dentry_operations ncp_dentry_operations =
{
.d_revalidate = ncp_lookup_validate,
.d_hash = ncp_hash_dentry,
@@ -87,7 +87,7 @@ static struct dentry_operations ncp_dentry_operations =
.d_delete = ncp_delete_dentry,
};
-struct dentry_operations ncp_root_dentry_operations =
+const struct dentry_operations ncp_root_dentry_operations =
{
.d_hash = ncp_hash_dentry,
.d_compare = ncp_compare_dentry,
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 672368f865c..78bf72fc1db 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -899,7 +899,7 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
iput(inode);
}
-struct dentry_operations nfs_dentry_operations = {
+const struct dentry_operations nfs_dentry_operations = {
.d_revalidate = nfs_lookup_revalidate,
.d_delete = nfs_dentry_delete,
.d_iput = nfs_dentry_iput,
@@ -967,7 +967,7 @@ out:
#ifdef CONFIG_NFS_V4
static int nfs_open_revalidate(struct dentry *, struct nameidata *);
-struct dentry_operations nfs4_dentry_operations = {
+const struct dentry_operations nfs4_dentry_operations = {
.d_revalidate = nfs_open_revalidate,
.d_delete = nfs_dentry_delete,
.d_iput = nfs_dentry_iput,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 4e4d3320437..84345deab26 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -179,7 +179,7 @@ struct nfs4_state_recovery_ops {
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
};
-extern struct dentry_operations nfs4_dentry_operations;
+extern const struct dentry_operations nfs4_dentry_operations;
extern const struct inode_operations nfs4_dir_inode_operations;
/* inode.c */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6e50aaa56ca..78376b6c023 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -356,7 +356,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
put_write_access(inode);
goto out_nfserr;
}
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
}
/* sanitize the mode change */
@@ -723,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
else
flags = O_WRONLY|O_LARGEFILE;
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
}
*filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
flags, cred);
@@ -998,8 +998,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
if (!EX_ISSYNC(exp))
stable = 0;
- if (stable && !EX_WGATHER(exp))
+ if (stable && !EX_WGATHER(exp)) {
+ spin_lock(&file->f_lock);
file->f_flags |= O_SYNC;
+ spin_unlock(&file->f_lock);
+ }
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
index 331f2e88e28..220c13f0d73 100644
--- a/fs/notify/inotify/inotify.c
+++ b/fs/notify/inotify/inotify.c
@@ -380,6 +380,14 @@ void inotify_unmount_inodes(struct list_head *list)
struct list_head *watches;
/*
+ * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
+ * I_WILL_FREE, or I_NEW which is fine because by that point
+ * the inode cannot have any associated watches.
+ */
+ if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
+ continue;
+
+ /*
* If i_count is zero, the inode cannot have any watches and
* doing an __iget/iput with MS_ACTIVE clear would actually
* evict all inodes with zero i_count from icache which is
@@ -388,14 +396,6 @@ void inotify_unmount_inodes(struct list_head *list)
if (!atomic_read(&inode->i_count))
continue;
- /*
- * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
- * I_WILL_FREE which is fine because by that point the inode
- * cannot have any associated watches.
- */
- if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
- continue;
-
need_iput_tmp = need_iput;
need_iput = NULL;
/* In case inotify_remove_watch_locked() drops a reference. */
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index e9d7c2038c0..7d604480557 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -455,7 +455,7 @@ out_move:
d_move(dentry, target);
}
-struct dentry_operations ocfs2_dentry_ops = {
+const struct dentry_operations ocfs2_dentry_ops = {
.d_revalidate = ocfs2_dentry_revalidate,
.d_iput = ocfs2_dentry_iput,
};
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
index d06e16c0664..faa12e75f98 100644
--- a/fs/ocfs2/dcache.h
+++ b/fs/ocfs2/dcache.h
@@ -26,7 +26,7 @@
#ifndef OCFS2_DCACHE_H
#define OCFS2_DCACHE_H
-extern struct dentry_operations ocfs2_dentry_ops;
+extern const struct dentry_operations ocfs2_dentry_ops;
struct ocfs2_dentry_lock {
/* Use count of dentry lock */
diff --git a/fs/open.c b/fs/open.c
index a3a78ceb2a2..75b61677daa 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -273,7 +273,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
if (!error)
error = security_path_truncate(&path, length, 0);
if (!error) {
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
error = do_truncate(path.dentry, length, 0, NULL);
}
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index 1e064c4a4f8..46297683cd3 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -21,20 +21,38 @@
* compute the block number from a
* cyl-cyl-head-head structure
*/
-static inline int
+static sector_t
cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) {
- return ptr->cc * geo->heads * geo->sectors +
- ptr->hh * geo->sectors;
+
+ sector_t cyl;
+ __u16 head;
+
+ /*decode cylinder and heads for large volumes */
+ cyl = ptr->hh & 0xFFF0;
+ cyl <<= 12;
+ cyl |= ptr->cc;
+ head = ptr->hh & 0x000F;
+ return cyl * geo->heads * geo->sectors +
+ head * geo->sectors;
}
/*
* compute the block number from a
* cyl-cyl-head-head-block structure
*/
-static inline int
+static sector_t
cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
- return ptr->cc * geo->heads * geo->sectors +
- ptr->hh * geo->sectors +
+
+ sector_t cyl;
+ __u16 head;
+
+ /*decode cylinder and heads for large volumes */
+ cyl = ptr->hh & 0xFFF0;
+ cyl <<= 12;
+ cyl |= ptr->cc;
+ head = ptr->hh & 0x000F;
+ return cyl * geo->heads * geo->sectors +
+ head * geo->sectors +
ptr->b;
}
@@ -43,14 +61,15 @@ cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
int
ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
{
- int blocksize, offset, size,res;
- loff_t i_size;
+ int blocksize, res;
+ loff_t i_size, offset, size, fmt_size;
dasd_information2_t *info;
struct hd_geometry *geo;
char type[5] = {0,};
char name[7] = {0,};
union label_t {
- struct vtoc_volume_label vol;
+ struct vtoc_volume_label_cdl vol;
+ struct vtoc_volume_label_ldl lnx;
struct vtoc_cms_label cms;
} *label;
unsigned char *data;
@@ -85,14 +104,16 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
if (data == NULL)
goto out_readerr;
- strncpy (type, data, 4);
- if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD")))
- strncpy(name, data + 8, 6);
- else
- strncpy(name, data + 4, 6);
memcpy(label, data, sizeof(union label_t));
put_dev_sector(sect);
+ if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD"))) {
+ strncpy(type, label->vol.vollbl, 4);
+ strncpy(name, label->vol.volid, 6);
+ } else {
+ strncpy(type, label->lnx.vollbl, 4);
+ strncpy(name, label->lnx.volid, 6);
+ }
EBCASC(type, 4);
EBCASC(name, 6);
@@ -110,36 +131,54 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
/*
* VM style CMS1 labeled disk
*/
+ blocksize = label->cms.block_size;
if (label->cms.disk_offset != 0) {
printk("CMS1/%8s(MDSK):", name);
/* disk is reserved minidisk */
- blocksize = label->cms.block_size;
offset = label->cms.disk_offset;
size = (label->cms.block_count - 1)
* (blocksize >> 9);
} else {
printk("CMS1/%8s:", name);
offset = (info->label_block + 1);
- size = i_size >> 9;
+ size = label->cms.block_count
+ * (blocksize >> 9);
}
+ put_partition(state, 1, offset*(blocksize >> 9),
+ size-offset*(blocksize >> 9));
} else {
- /*
- * Old style LNX1 or unlabeled disk
- */
- if (strncmp(type, "LNX1", 4) == 0)
- printk ("LNX1/%8s:", name);
- else
+ if (strncmp(type, "LNX1", 4) == 0) {
+ printk("LNX1/%8s:", name);
+ if (label->lnx.ldl_version == 0xf2) {
+ fmt_size = label->lnx.formatted_blocks
+ * (blocksize >> 9);
+ } else if (!strcmp(info->type, "ECKD")) {
+ /* formated w/o large volume support */
+ fmt_size = geo->cylinders * geo->heads
+ * geo->sectors * (blocksize >> 9);
+ } else {
+ /* old label and no usable disk geometry
+ * (e.g. DIAG) */
+ fmt_size = i_size >> 9;
+ }
+ size = i_size >> 9;
+ if (fmt_size < size)
+ size = fmt_size;
+ offset = (info->label_block + 1);
+ } else {
+ /* unlabeled disk */
printk("(nonl)");
- offset = (info->label_block + 1);
- size = i_size >> 9;
- }
- put_partition(state, 1, offset*(blocksize >> 9),
+ size = i_size >> 9;
+ offset = (info->label_block + 1);
+ }
+ put_partition(state, 1, offset*(blocksize >> 9),
size-offset*(blocksize >> 9));
+ }
} else if (info->format == DASD_FORMAT_CDL) {
/*
* New style CDL formatted disk
*/
- unsigned int blk;
+ sector_t blk;
int counter;
/*
@@ -166,7 +205,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
/* skip FMT4 / FMT5 / FMT7 labels */
if (f1.DS1FMTID == _ascebc['4']
|| f1.DS1FMTID == _ascebc['5']
- || f1.DS1FMTID == _ascebc['7']) {
+ || f1.DS1FMTID == _ascebc['7']
+ || f1.DS1FMTID == _ascebc['9']) {
blk++;
data = read_dev_sector(bdev, blk *
(blocksize/512),
@@ -174,8 +214,9 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
continue;
}
- /* only FMT1 valid at this point */
- if (f1.DS1FMTID != _ascebc['1'])
+ /* only FMT1 and 8 labels valid at this point */
+ if (f1.DS1FMTID != _ascebc['1'] &&
+ f1.DS1FMTID != _ascebc['8'])
break;
/* OK, we got valid partition data */
diff --git a/fs/pipe.c b/fs/pipe.c
index 14f502b89cf..4af7aa52181 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -667,10 +667,7 @@ pipe_read_fasync(int fd, struct file *filp, int on)
retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
mutex_unlock(&inode->i_mutex);
- if (retval < 0)
- return retval;
-
- return 0;
+ return retval;
}
@@ -684,10 +681,7 @@ pipe_write_fasync(int fd, struct file *filp, int on)
retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
mutex_unlock(&inode->i_mutex);
- if (retval < 0)
- return retval;
-
- return 0;
+ return retval;
}
@@ -706,11 +700,7 @@ pipe_rdwr_fasync(int fd, struct file *filp, int on)
fasync_helper(-1, filp, 0, &pipe->fasync_readers);
}
mutex_unlock(&inode->i_mutex);
-
- if (retval < 0)
- return retval;
-
- return 0;
+ return retval;
}
@@ -870,7 +860,7 @@ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
dentry->d_inode->i_ino);
}
-static struct dentry_operations pipefs_dentry_operations = {
+static const struct dentry_operations pipefs_dentry_operations = {
.d_delete = pipefs_delete_dentry,
.d_dname = pipefs_dname,
};
@@ -1034,11 +1024,6 @@ int do_pipe_flags(int *fd, int flags)
return error;
}
-int do_pipe(int *fd)
-{
- return do_pipe_flags(fd, 0);
-}
-
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index beaa0ce3b82..aef6d55b7de 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1545,7 +1545,7 @@ static int pid_delete_dentry(struct dentry * dentry)
return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
}
-static struct dentry_operations pid_dentry_operations =
+static const struct dentry_operations pid_dentry_operations =
{
.d_revalidate = pid_revalidate,
.d_delete = pid_delete_dentry,
@@ -1717,7 +1717,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
return 0;
}
-static struct dentry_operations tid_fd_dentry_operations =
+static const struct dentry_operations tid_fd_dentry_operations =
{
.d_revalidate = tid_fd_revalidate,
.d_delete = pid_delete_dentry,
@@ -2339,7 +2339,7 @@ static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
return 0;
}
-static struct dentry_operations proc_base_dentry_operations =
+static const struct dentry_operations proc_base_dentry_operations =
{
.d_revalidate = proc_base_revalidate,
.d_delete = pid_delete_dentry,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index db7fa5cab98..5d2989e9dcc 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -363,7 +363,7 @@ static int proc_delete_dentry(struct dentry * dentry)
return 1;
}
-static struct dentry_operations proc_dentry_operations =
+static const struct dentry_operations proc_dentry_operations =
{
.d_delete = proc_delete_dentry,
};
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 94fcfff6863..9b1e4e9a16b 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -7,7 +7,7 @@
#include <linux/security.h>
#include "internal.h"
-static struct dentry_operations proc_sys_dentry_operations;
+static const struct dentry_operations proc_sys_dentry_operations;
static const struct file_operations proc_sys_file_operations;
static const struct inode_operations proc_sys_inode_operations;
static const struct file_operations proc_sys_dir_file_operations;
@@ -396,7 +396,7 @@ static int proc_sys_compare(struct dentry *dir, struct qstr *qstr,
return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl);
}
-static struct dentry_operations proc_sys_dentry_operations = {
+static const struct dentry_operations proc_sys_dentry_operations = {
.d_revalidate = proc_sys_revalidate,
.d_delete = proc_sys_delete,
.d_compare = proc_sys_compare,
diff --git a/fs/proc/root.c b/fs/proc/root.c
index f6299a25594..1e15a2b176e 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -83,7 +83,8 @@ static int proc_get_sb(struct file_system_type *fs_type,
ns->proc_mnt = mnt;
}
- return simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+ return 0;
}
static void proc_kill_sb(struct super_block *sb)
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
new file mode 100644
index 00000000000..8047e01ef46
--- /dev/null
+++ b/fs/quota/Kconfig
@@ -0,0 +1,59 @@
+#
+# Quota configuration
+#
+
+config QUOTA
+ bool "Quota support"
+ help
+ If you say Y here, you will be able to set per user limits for disk
+ usage (also called disk quotas). Currently, it works for the
+ ext2, ext3, and reiserfs file system. ext3 also supports journalled
+ quotas for which you don't need to run quotacheck(8) after an unclean
+ shutdown.
+ For further details, read the Quota mini-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, or the documentation provided
+ with the quota tools. Probably the quota support is only useful for
+ multi user systems. If unsure, say N.
+
+config QUOTA_NETLINK_INTERFACE
+ bool "Report quota messages through netlink interface"
+ depends on QUOTA && NET
+ help
+ If you say Y here, quota warnings (about exceeding softlimit, reaching
+ hardlimit, etc.) will be reported through netlink interface. If unsure,
+ say Y.
+
+config PRINT_QUOTA_WARNING
+ bool "Print quota warnings to console (OBSOLETE)"
+ depends on QUOTA
+ default y
+ help
+ If you say Y here, quota warnings (about exceeding softlimit, reaching
+ hardlimit, etc.) will be printed to the process' controlling terminal.
+ Note that this behavior is currently deprecated and may go away in
+ future. Please use notification via netlink socket instead.
+
+# Generic support for tree structured quota files. Selected when needed.
+config QUOTA_TREE
+ tristate
+
+config QFMT_V1
+ tristate "Old quota format support"
+ depends on QUOTA
+ help
+ This quota format was (is) used by kernels earlier than 2.4.22. If
+ you have quota working and you don't want to convert to new quota
+ format say Y here.
+
+config QFMT_V2
+ tristate "Quota format v2 support"
+ depends on QUOTA
+ select QUOTA_TREE
+ help
+ This quota format allows using quotas with 32-bit UIDs/GIDs. If you
+ need this functionality say Y here.
+
+config QUOTACTL
+ bool
+ depends on XFS_QUOTA || QUOTA
+ default y
diff --git a/fs/quota/Makefile b/fs/quota/Makefile
new file mode 100644
index 00000000000..385a0831cc9
--- /dev/null
+++ b/fs/quota/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the Linux filesystems.
+#
+# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-y :=
+
+obj-$(CONFIG_QUOTA) += dquot.o
+obj-$(CONFIG_QFMT_V1) += quota_v1.o
+obj-$(CONFIG_QFMT_V2) += quota_v2.o
+obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
+obj-$(CONFIG_QUOTACTL) += quota.o
diff --git a/fs/dquot.c b/fs/quota/dquot.c
index bca3cac4bee..2ca967a5ef7 100644
--- a/fs/dquot.c
+++ b/fs/quota/dquot.c
@@ -129,9 +129,10 @@
* i_mutex on quota files is special (it's below dqio_mutex)
*/
-static DEFINE_SPINLOCK(dq_list_lock);
-static DEFINE_SPINLOCK(dq_state_lock);
-DEFINE_SPINLOCK(dq_data_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
+EXPORT_SYMBOL(dq_data_lock);
static char *quotatypes[] = INITQFNAMES;
static struct quota_format_type *quota_formats; /* List of registered formats */
@@ -148,35 +149,46 @@ int register_quota_format(struct quota_format_type *fmt)
spin_unlock(&dq_list_lock);
return 0;
}
+EXPORT_SYMBOL(register_quota_format);
void unregister_quota_format(struct quota_format_type *fmt)
{
struct quota_format_type **actqf;
spin_lock(&dq_list_lock);
- for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
+ for (actqf = &quota_formats; *actqf && *actqf != fmt;
+ actqf = &(*actqf)->qf_next)
+ ;
if (*actqf)
*actqf = (*actqf)->qf_next;
spin_unlock(&dq_list_lock);
}
+EXPORT_SYMBOL(unregister_quota_format);
static struct quota_format_type *find_quota_format(int id)
{
struct quota_format_type *actqf;
spin_lock(&dq_list_lock);
- for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
+ for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+ actqf = actqf->qf_next)
+ ;
if (!actqf || !try_module_get(actqf->qf_owner)) {
int qm;
spin_unlock(&dq_list_lock);
- for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
- if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
+ for (qm = 0; module_names[qm].qm_fmt_id &&
+ module_names[qm].qm_fmt_id != id; qm++)
+ ;
+ if (!module_names[qm].qm_fmt_id ||
+ request_module(module_names[qm].qm_mod_name))
return NULL;
spin_lock(&dq_list_lock);
- for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
+ for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+ actqf = actqf->qf_next)
+ ;
if (actqf && !try_module_get(actqf->qf_owner))
actqf = NULL;
}
@@ -215,6 +227,7 @@ static unsigned int dq_hash_bits, dq_hash_mask;
static struct hlist_head *dquot_hash;
struct dqstats dqstats;
+EXPORT_SYMBOL(dqstats);
static inline unsigned int
hashfn(const struct super_block *sb, unsigned int id, int type)
@@ -230,7 +243,8 @@ hashfn(const struct super_block *sb, unsigned int id, int type)
*/
static inline void insert_dquot_hash(struct dquot *dquot)
{
- struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
+ struct hlist_head *head;
+ head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
hlist_add_head(&dquot->dq_hash, head);
}
@@ -239,17 +253,19 @@ static inline void remove_dquot_hash(struct dquot *dquot)
hlist_del_init(&dquot->dq_hash);
}
-static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type)
+static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
+ unsigned int id, int type)
{
struct hlist_node *node;
struct dquot *dquot;
hlist_for_each (node, dquot_hash+hashent) {
dquot = hlist_entry(node, struct dquot, dq_hash);
- if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
+ if (dquot->dq_sb == sb && dquot->dq_id == id &&
+ dquot->dq_type == type)
return dquot;
}
- return NODQUOT;
+ return NULL;
}
/* Add a dquot to the tail of the free list */
@@ -309,6 +325,7 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
spin_unlock(&dq_list_lock);
return 0;
}
+EXPORT_SYMBOL(dquot_mark_dquot_dirty);
/* This function needs dq_list_lock */
static inline int clear_dquot_dirty(struct dquot *dquot)
@@ -345,8 +362,10 @@ int dquot_acquire(struct dquot *dquot)
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
/* Write the info if needed */
- if (info_dirty(&dqopt->info[dquot->dq_type]))
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
if (ret < 0)
goto out_iolock;
if (ret2 < 0) {
@@ -360,6 +379,7 @@ out_iolock:
mutex_unlock(&dquot->dq_lock);
return ret;
}
+EXPORT_SYMBOL(dquot_acquire);
/*
* Write dquot to disk
@@ -380,8 +400,10 @@ int dquot_commit(struct dquot *dquot)
* => we have better not writing it */
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
- if (info_dirty(&dqopt->info[dquot->dq_type]))
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
if (ret >= 0)
ret = ret2;
}
@@ -389,6 +411,7 @@ out_sem:
mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
+EXPORT_SYMBOL(dquot_commit);
/*
* Release dquot
@@ -406,8 +429,10 @@ int dquot_release(struct dquot *dquot)
if (dqopt->ops[dquot->dq_type]->release_dqblk) {
ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
/* Write the info */
- if (info_dirty(&dqopt->info[dquot->dq_type]))
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+ if (info_dirty(&dqopt->info[dquot->dq_type])) {
+ ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ dquot->dq_sb, dquot->dq_type);
+ }
if (ret >= 0)
ret = ret2;
}
@@ -417,6 +442,7 @@ out_dqlock:
mutex_unlock(&dquot->dq_lock);
return ret;
}
+EXPORT_SYMBOL(dquot_release);
void dquot_destroy(struct dquot *dquot)
{
@@ -516,6 +542,7 @@ out:
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return ret;
}
+EXPORT_SYMBOL(dquot_scan_active);
int vfs_quota_sync(struct super_block *sb, int type)
{
@@ -533,7 +560,8 @@ int vfs_quota_sync(struct super_block *sb, int type)
spin_lock(&dq_list_lock);
dirty = &dqopt->info[cnt].dqi_dirty_list;
while (!list_empty(dirty)) {
- dquot = list_first_entry(dirty, struct dquot, dq_dirty);
+ dquot = list_first_entry(dirty, struct dquot,
+ dq_dirty);
/* Dirty and inactive can be only bad dquot... */
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
clear_dquot_dirty(dquot);
@@ -563,6 +591,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
return 0;
}
+EXPORT_SYMBOL(vfs_quota_sync);
/* Free unused dquots from cache */
static void prune_dqcache(int count)
@@ -672,6 +701,7 @@ we_slept:
put_dquot_last(dquot);
spin_unlock(&dq_list_lock);
}
+EXPORT_SYMBOL(dqput);
struct dquot *dquot_alloc(struct super_block *sb, int type)
{
@@ -685,7 +715,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
dquot = sb->dq_op->alloc_dquot(sb, type);
if(!dquot)
- return NODQUOT;
+ return NULL;
mutex_init(&dquot->dq_lock);
INIT_LIST_HEAD(&dquot->dq_free);
@@ -711,10 +741,10 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
{
unsigned int hashent = hashfn(sb, id, type);
- struct dquot *dquot = NODQUOT, *empty = NODQUOT;
+ struct dquot *dquot = NULL, *empty = NULL;
if (!sb_has_quota_active(sb, type))
- return NODQUOT;
+ return NULL;
we_slept:
spin_lock(&dq_list_lock);
spin_lock(&dq_state_lock);
@@ -725,15 +755,17 @@ we_slept:
}
spin_unlock(&dq_state_lock);
- if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
- if (empty == NODQUOT) {
+ dquot = find_dquot(hashent, sb, id, type);
+ if (!dquot) {
+ if (!empty) {
spin_unlock(&dq_list_lock);
- if ((empty = get_empty_dquot(sb, type)) == NODQUOT)
+ empty = get_empty_dquot(sb, type);
+ if (!empty)
schedule(); /* Try to wait for a moment... */
goto we_slept;
}
dquot = empty;
- empty = NODQUOT;
+ empty = NULL;
dquot->dq_id = id;
/* all dquots go on the inuse_list */
put_inuse(dquot);
@@ -749,13 +781,14 @@ we_slept:
dqstats.lookups++;
spin_unlock(&dq_list_lock);
}
- /* Wait for dq_lock - after this we know that either dquot_release() is already
- * finished or it will be canceled due to dq_count > 1 test */
+ /* Wait for dq_lock - after this we know that either dquot_release() is
+ * already finished or it will be canceled due to dq_count > 1 test */
wait_on_dquot(dquot);
- /* Read the dquot and instantiate it (everything done only if needed) */
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
+ /* Read the dquot / allocate space in quota file */
+ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
+ sb->dq_op->acquire_dquot(dquot) < 0) {
dqput(dquot);
- dquot = NODQUOT;
+ dquot = NULL;
goto out;
}
#ifdef __DQUOT_PARANOIA
@@ -767,6 +800,7 @@ out:
return dquot;
}
+EXPORT_SYMBOL(dqget);
static int dqinit_needed(struct inode *inode, int type)
{
@@ -775,9 +809,9 @@ static int dqinit_needed(struct inode *inode, int type)
if (IS_NOQUOTA(inode))
return 0;
if (type != -1)
- return inode->i_dquot[type] == NODQUOT;
+ return !inode->i_dquot[type];
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
return 1;
return 0;
}
@@ -789,12 +823,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
+ continue;
if (!atomic_read(&inode->i_writecount))
continue;
if (!dqinit_needed(inode, type))
continue;
- if (inode->i_state & (I_FREEING|I_WILL_FREE))
- continue;
__iget(inode);
spin_unlock(&inode_lock);
@@ -813,7 +847,10 @@ static void add_dquot_ref(struct super_block *sb, int type)
iput(old_inode);
}
-/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
+/*
+ * Return 0 if dqput() won't block.
+ * (note that 1 doesn't necessarily mean blocking)
+ */
static inline int dqput_blocks(struct dquot *dquot)
{
if (atomic_read(&dquot->dq_count) <= 1)
@@ -821,22 +858,27 @@ static inline int dqput_blocks(struct dquot *dquot)
return 0;
}
-/* Remove references to dquots from inode - add dquot to list for freeing if needed */
-/* We can't race with anybody because we hold dqptr_sem for writing... */
+/*
+ * Remove references to dquots from inode and add dquot to list for freeing
+ * if we have the last referece to dquot
+ * We can't race with anybody because we hold dqptr_sem for writing...
+ */
static int remove_inode_dquot_ref(struct inode *inode, int type,
struct list_head *tofree_head)
{
struct dquot *dquot = inode->i_dquot[type];
- inode->i_dquot[type] = NODQUOT;
- if (dquot != NODQUOT) {
+ inode->i_dquot[type] = NULL;
+ if (dquot) {
if (dqput_blocks(dquot)) {
#ifdef __DQUOT_PARANOIA
if (atomic_read(&dquot->dq_count) != 1)
printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
#endif
spin_lock(&dq_list_lock);
- list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */
+ /* As dquot must have currently users it can't be on
+ * the free list... */
+ list_add(&dquot->dq_free, tofree_head);
spin_unlock(&dq_list_lock);
return 1;
}
@@ -846,19 +888,22 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
return 0;
}
-/* Free list of dquots - called from inode.c */
-/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
+/*
+ * Free list of dquots
+ * Dquots are removed from inodes and no new references can be got so we are
+ * the only ones holding reference
+ */
static void put_dquot_list(struct list_head *tofree_head)
{
struct list_head *act_head;
struct dquot *dquot;
act_head = tofree_head->next;
- /* So now we have dquots on the list... Just free them */
while (act_head != tofree_head) {
dquot = list_entry(act_head, struct dquot, dq_free);
act_head = act_head->next;
- list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */
+ /* Remove dquot from the list so we won't have problems... */
+ list_del_init(&dquot->dq_free);
dqput(dquot);
}
}
@@ -870,6 +915,12 @@ static void remove_dquot_ref(struct super_block *sb, int type,
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ /*
+ * We have to scan also I_NEW inodes because they can already
+ * have quota pointer initialized. Luckily, we need to touch
+ * only quota pointers and these have separate locking
+ * (dqptr_sem).
+ */
if (!IS_NOQUOTA(inode))
remove_inode_dquot_ref(inode, type, tofree_head);
}
@@ -899,7 +950,29 @@ static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
dquot->dq_dqb.dqb_curspace += number;
}
-static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
+static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
+{
+ dquot->dq_dqb.dqb_rsvspace += number;
+}
+
+/*
+ * Claim reserved quota space
+ */
+static void dquot_claim_reserved_space(struct dquot *dquot,
+ qsize_t number)
+{
+ WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
+ dquot->dq_dqb.dqb_curspace += number;
+ dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
+static inline
+void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
+{
+ dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
+static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
{
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curinodes >= number)
@@ -911,7 +984,7 @@ static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
clear_bit(DQ_INODES_B, &dquot->dq_flags);
}
-static inline void dquot_decr_space(struct dquot *dquot, qsize_t number)
+static void dquot_decr_space(struct dquot *dquot, qsize_t number)
{
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curspace >= number)
@@ -938,7 +1011,7 @@ static int warning_issued(struct dquot *dquot, const int warntype)
#ifdef CONFIG_PRINT_QUOTA_WARNING
static int flag_print_warnings = 1;
-static inline int need_print_warning(struct dquot *dquot)
+static int need_print_warning(struct dquot *dquot)
{
if (!flag_print_warnings)
return 0;
@@ -1057,10 +1130,7 @@ static void send_warning(const struct dquot *dquot, const char warntype)
goto attr_err_out;
genlmsg_end(skb, msg_head);
- ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
- if (ret < 0 && ret != -ESRCH)
- printk(KERN_ERR
- "VFS: Failed to send notification message: %d\n", ret);
+ genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
return;
attr_err_out:
printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
@@ -1068,13 +1138,17 @@ err_out:
kfree_skb(skb);
}
#endif
-
-static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
+/*
+ * Write warnings to the console and send warning messages over netlink.
+ *
+ * Note that this function can sleep.
+ */
+static void flush_warnings(struct dquot *const *dquots, char *warntype)
{
int i;
for (i = 0; i < MAXQUOTAS; i++)
- if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN &&
+ if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN &&
!warning_issued(dquots[i], warntype[i])) {
#ifdef CONFIG_PRINT_QUOTA_WARNING
print_warning(dquots[i], warntype[i]);
@@ -1085,42 +1159,47 @@ static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
}
}
-static inline char ignore_hardlimit(struct dquot *dquot)
+static int ignore_hardlimit(struct dquot *dquot)
{
struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
return capable(CAP_SYS_RESOURCE) &&
- (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
+ (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
+ !(info->dqi_flags & V1_DQF_RSQUASH));
}
/* needs dq_data_lock */
static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
{
+ qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
+
*warntype = QUOTA_NL_NOWARN;
if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
return QUOTA_OK;
if (dquot->dq_dqb.dqb_ihardlimit &&
- (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
+ newinodes > dquot->dq_dqb.dqb_ihardlimit &&
!ignore_hardlimit(dquot)) {
*warntype = QUOTA_NL_IHARDWARN;
return NO_QUOTA;
}
if (dquot->dq_dqb.dqb_isoftlimit &&
- (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
- dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
+ newinodes > dquot->dq_dqb.dqb_isoftlimit &&
+ dquot->dq_dqb.dqb_itime &&
+ get_seconds() >= dquot->dq_dqb.dqb_itime &&
!ignore_hardlimit(dquot)) {
*warntype = QUOTA_NL_ISOFTLONGWARN;
return NO_QUOTA;
}
if (dquot->dq_dqb.dqb_isoftlimit &&
- (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
+ newinodes > dquot->dq_dqb.dqb_isoftlimit &&
dquot->dq_dqb.dqb_itime == 0) {
*warntype = QUOTA_NL_ISOFTWARN;
- dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+ dquot->dq_dqb.dqb_itime = get_seconds() +
+ sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
}
return QUOTA_OK;
@@ -1129,13 +1208,19 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
/* needs dq_data_lock */
static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
{
+ qsize_t tspace;
+ struct super_block *sb = dquot->dq_sb;
+
*warntype = QUOTA_NL_NOWARN;
- if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
+ if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
return QUOTA_OK;
+ tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+ + space;
+
if (dquot->dq_dqb.dqb_bhardlimit &&
- dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit &&
+ tspace > dquot->dq_dqb.dqb_bhardlimit &&
!ignore_hardlimit(dquot)) {
if (!prealloc)
*warntype = QUOTA_NL_BHARDWARN;
@@ -1143,8 +1228,9 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
- dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
- dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
+ tspace > dquot->dq_dqb.dqb_bsoftlimit &&
+ dquot->dq_dqb.dqb_btime &&
+ get_seconds() >= dquot->dq_dqb.dqb_btime &&
!ignore_hardlimit(dquot)) {
if (!prealloc)
*warntype = QUOTA_NL_BSOFTLONGWARN;
@@ -1152,11 +1238,12 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
- dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
+ tspace > dquot->dq_dqb.dqb_bsoftlimit &&
dquot->dq_dqb.dqb_btime == 0) {
if (!prealloc) {
*warntype = QUOTA_NL_BSOFTWARN;
- dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
+ dquot->dq_dqb.dqb_btime = get_seconds() +
+ sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
}
else
/*
@@ -1171,15 +1258,18 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
static int info_idq_free(struct dquot *dquot, qsize_t inodes)
{
+ qsize_t newinodes;
+
if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
return QUOTA_NL_NOWARN;
- if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit)
+ newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
+ if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
return QUOTA_NL_ISOFTBELOW;
if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
- dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit)
+ newinodes < dquot->dq_dqb.dqb_ihardlimit)
return QUOTA_NL_IHARDBELOW;
return QUOTA_NL_NOWARN;
}
@@ -1206,7 +1296,7 @@ int dquot_initialize(struct inode *inode, int type)
{
unsigned int id = 0;
int cnt, ret = 0;
- struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT };
+ struct dquot *got[MAXQUOTAS] = { NULL, NULL };
struct super_block *sb = inode->i_sb;
/* First test before acquiring mutex - solves deadlocks when we
@@ -1239,9 +1329,9 @@ int dquot_initialize(struct inode *inode, int type)
/* Avoid races with quotaoff() */
if (!sb_has_quota_active(sb, cnt))
continue;
- if (inode->i_dquot[cnt] == NODQUOT) {
+ if (!inode->i_dquot[cnt]) {
inode->i_dquot[cnt] = got[cnt];
- got[cnt] = NODQUOT;
+ got[cnt] = NULL;
}
}
out_err:
@@ -1251,6 +1341,7 @@ out_err:
dqput(got[cnt]);
return ret;
}
+EXPORT_SYMBOL(dquot_initialize);
/*
* Release all quotas referenced by inode
@@ -1263,7 +1354,7 @@ int dquot_drop(struct inode *inode)
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
put[cnt] = inode->i_dquot[cnt];
- inode->i_dquot[cnt] = NODQUOT;
+ inode->i_dquot[cnt] = NULL;
}
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1271,6 +1362,7 @@ int dquot_drop(struct inode *inode)
dqput(put[cnt]);
return 0;
}
+EXPORT_SYMBOL(dquot_drop);
/* Wrapper to remove references to quota structures from inode */
void vfs_dq_drop(struct inode *inode)
@@ -1287,12 +1379,13 @@ void vfs_dq_drop(struct inode *inode)
* must assure that nobody can come after the DQUOT_DROP and
* add quota pointers back anyway */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt] != NODQUOT)
+ if (inode->i_dquot[cnt])
break;
if (cnt < MAXQUOTAS)
inode->i_sb->dq_op->drop(inode);
}
}
+EXPORT_SYMBOL(vfs_dq_drop);
/*
* Following four functions update i_blocks+i_bytes fields and
@@ -1306,51 +1399,93 @@ void vfs_dq_drop(struct inode *inode)
/*
* This operation can block, but only after everything is updated
*/
-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+int __dquot_alloc_space(struct inode *inode, qsize_t number,
+ int warn, int reserve)
{
- int cnt, ret = NO_QUOTA;
+ int cnt, ret = QUOTA_OK;
char warntype[MAXQUOTAS];
- /* First test before acquiring mutex - solves deadlocks when we
- * re-enter the quota code and are already holding the mutex */
- if (IS_NOQUOTA(inode)) {
-out_add:
- inode_add_bytes(inode, number);
- return QUOTA_OK;
- }
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode)) { /* Now we can do reliable test... */
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- goto out_add;
- }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
- if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
- goto warn_put_all;
+ if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
+ == NO_QUOTA) {
+ ret = NO_QUOTA;
+ goto out_unlock;
+ }
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
- dquot_incr_space(inode->i_dquot[cnt], number);
+ if (reserve)
+ dquot_resv_space(inode->i_dquot[cnt], number);
+ else
+ dquot_incr_space(inode->i_dquot[cnt], number);
}
- inode_add_bytes(inode, number);
- ret = QUOTA_OK;
-warn_put_all:
+ if (!reserve)
+ inode_add_bytes(inode, number);
+out_unlock:
spin_unlock(&dq_data_lock);
- if (ret == QUOTA_OK)
- /* Dirtify all the dquots - this can block when journalling */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt])
- mark_dquot_dirty(inode->i_dquot[cnt]);
flush_warnings(inode->i_dquot, warntype);
+ return ret;
+}
+
+int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+{
+ int cnt, ret = QUOTA_OK;
+
+ /*
+ * First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex
+ */
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out_unlock;
+ }
+
+ ret = __dquot_alloc_space(inode, number, warn, 0);
+ if (ret == NO_QUOTA)
+ goto out_unlock;
+
+ /* Dirtify all the dquots - this can block when journalling */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (inode->i_dquot[cnt])
+ mark_dquot_dirty(inode->i_dquot[cnt]);
+out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
return ret;
}
+EXPORT_SYMBOL(dquot_alloc_space);
+
+int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
+{
+ int ret = QUOTA_OK;
+
+ if (IS_NOQUOTA(inode))
+ goto out;
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode))
+ goto out_unlock;
+
+ ret = __dquot_alloc_space(inode, number, warn, 1);
+out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(dquot_reserve_space);
/*
* This operation can block, but only after everything is updated
@@ -1373,14 +1508,15 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number)
}
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
- if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
+ if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
+ == NO_QUOTA)
goto warn_put_all;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
dquot_incr_inodes(inode->i_dquot[cnt], number);
}
@@ -1396,6 +1532,73 @@ warn_put_all:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return ret;
}
+EXPORT_SYMBOL(dquot_alloc_inode);
+
+int dquot_claim_space(struct inode *inode, qsize_t number)
+{
+ int cnt;
+ int ret = QUOTA_OK;
+
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ spin_lock(&dq_data_lock);
+ /* Claim reserved quotas to allocated quotas */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (inode->i_dquot[cnt])
+ dquot_claim_reserved_space(inode->i_dquot[cnt],
+ number);
+ }
+ /* Update inode bytes */
+ inode_add_bytes(inode, number);
+ spin_unlock(&dq_data_lock);
+ /* Dirtify all the dquots - this can block when journalling */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (inode->i_dquot[cnt])
+ mark_dquot_dirty(inode->i_dquot[cnt]);
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(dquot_claim_space);
+
+/*
+ * Release reserved quota space
+ */
+void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+{
+ int cnt;
+
+ if (IS_NOQUOTA(inode))
+ goto out;
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode))
+ goto out_unlock;
+
+ spin_lock(&dq_data_lock);
+ /* Release reserved dquots */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (inode->i_dquot[cnt])
+ dquot_free_reserved_space(inode->i_dquot[cnt], number);
+ }
+ spin_unlock(&dq_data_lock);
+
+out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return;
+}
+EXPORT_SYMBOL(dquot_release_reserved_space);
/*
* This operation can block, but only after everything is updated
@@ -1421,7 +1624,7 @@ out_sub:
}
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
dquot_decr_space(inode->i_dquot[cnt], number);
@@ -1436,6 +1639,7 @@ out_sub:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
+EXPORT_SYMBOL(dquot_free_space);
/*
* This operation can block, but only after everything is updated
@@ -1458,7 +1662,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
}
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
dquot_decr_inodes(inode->i_dquot[cnt], number);
@@ -1472,6 +1676,20 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
+EXPORT_SYMBOL(dquot_free_inode);
+
+/*
+ * call back function, get reserved quota space from underlying fs
+ */
+qsize_t dquot_get_reserved_space(struct inode *inode)
+{
+ qsize_t reserved_space = 0;
+
+ if (sb_any_quota_active(inode->i_sb) &&
+ inode->i_sb->dq_op->get_reserved_space)
+ reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
+ return reserved_space;
+}
/*
* Transfer the number of inode and blocks from one diskquota to an other.
@@ -1481,7 +1699,8 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
*/
int dquot_transfer(struct inode *inode, struct iattr *iattr)
{
- qsize_t space;
+ qsize_t space, cur_space;
+ qsize_t rsv_space = 0;
struct dquot *transfer_from[MAXQUOTAS];
struct dquot *transfer_to[MAXQUOTAS];
int cnt, ret = QUOTA_OK;
@@ -1496,22 +1715,16 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
return QUOTA_OK;
/* Initialize the arrays */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- transfer_from[cnt] = NODQUOT;
- transfer_to[cnt] = NODQUOT;
+ transfer_from[cnt] = NULL;
+ transfer_to[cnt] = NULL;
warntype_to[cnt] = QUOTA_NL_NOWARN;
- switch (cnt) {
- case USRQUOTA:
- if (!chuid)
- continue;
- transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
- break;
- case GRPQUOTA:
- if (!chgid)
- continue;
- transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
- break;
- }
}
+ if (chuid)
+ transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
+ USRQUOTA);
+ if (chgid)
+ transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
+ GRPQUOTA);
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
/* Now recheck reliably when holding dqptr_sem */
@@ -1520,10 +1733,12 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
goto put_all;
}
spin_lock(&dq_data_lock);
- space = inode_get_bytes(inode);
+ cur_space = inode_get_bytes(inode);
+ rsv_space = dquot_get_reserved_space(inode);
+ space = cur_space + rsv_space;
/* Build the transfer_from list and check the limits */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (transfer_to[cnt] == NODQUOT)
+ if (!transfer_to[cnt])
continue;
transfer_from[cnt] = inode->i_dquot[cnt];
if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
@@ -1539,7 +1754,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
/*
* Skip changes for same uid or gid or for turned off quota-type.
*/
- if (transfer_to[cnt] == NODQUOT)
+ if (!transfer_to[cnt])
continue;
/* Due to IO error we might not have transfer_from[] structure */
@@ -1549,11 +1764,14 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
warntype_from_space[cnt] =
info_bdq_free(transfer_from[cnt], space);
dquot_decr_inodes(transfer_from[cnt], 1);
- dquot_decr_space(transfer_from[cnt], space);
+ dquot_decr_space(transfer_from[cnt], cur_space);
+ dquot_free_reserved_space(transfer_from[cnt],
+ rsv_space);
}
dquot_incr_inodes(transfer_to[cnt], 1);
- dquot_incr_space(transfer_to[cnt], space);
+ dquot_incr_space(transfer_to[cnt], cur_space);
+ dquot_resv_space(transfer_to[cnt], rsv_space);
inode->i_dquot[cnt] = transfer_to[cnt];
}
@@ -1567,7 +1785,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
if (transfer_to[cnt]) {
mark_dquot_dirty(transfer_to[cnt]);
/* The reference we got is transferred to the inode */
- transfer_to[cnt] = NODQUOT;
+ transfer_to[cnt] = NULL;
}
}
warn_put_all:
@@ -1585,10 +1803,11 @@ over_quota:
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
/* Clear dquot pointers we don't want to dqput() */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- transfer_from[cnt] = NODQUOT;
+ transfer_from[cnt] = NULL;
ret = NO_QUOTA;
goto warn_put_all;
}
+EXPORT_SYMBOL(dquot_transfer);
/* Wrapper for transferring ownership of an inode */
int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
@@ -1600,7 +1819,7 @@ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
}
return 0;
}
-
+EXPORT_SYMBOL(vfs_dq_transfer);
/*
* Write info of quota file to disk
@@ -1615,6 +1834,7 @@ int dquot_commit_info(struct super_block *sb, int type)
mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
+EXPORT_SYMBOL(dquot_commit_info);
/*
* Definitions of diskquota operations.
@@ -1700,8 +1920,8 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
drop_dquot_ref(sb, cnt);
invalidate_dquots(sb, cnt);
/*
- * Now all dquots should be invalidated, all writes done so we should be only
- * users of the info. No locks needed.
+ * Now all dquots should be invalidated, all writes done so we
+ * should be only users of the info. No locks needed.
*/
if (info_dirty(&dqopt->info[cnt]))
sb->dq_op->write_info(sb, cnt);
@@ -1739,10 +1959,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_loaded(sb, cnt)) {
- mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
+ mutex_lock_nested(&toputinode[cnt]->i_mutex,
+ I_MUTEX_QUOTA);
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
S_NOATIME | S_NOQUOTA);
- truncate_inode_pages(&toputinode[cnt]->i_data, 0);
+ truncate_inode_pages(&toputinode[cnt]->i_data,
+ 0);
mutex_unlock(&toputinode[cnt]->i_mutex);
mark_inode_dirty(toputinode[cnt]);
}
@@ -1767,13 +1989,14 @@ put_inodes:
}
return ret;
}
+EXPORT_SYMBOL(vfs_quota_disable);
int vfs_quota_off(struct super_block *sb, int type, int remount)
{
return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
(DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
}
-
+EXPORT_SYMBOL(vfs_quota_off);
/*
* Turn quotas on on a device
*/
@@ -1831,7 +2054,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
* possible) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit. */
down_write(&dqopt->dqptr_sem);
- oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA);
+ oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
+ S_NOQUOTA);
inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
up_write(&dqopt->dqptr_sem);
sb->dq_op->drop(inode);
@@ -1850,7 +2074,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
dqopt->info[type].dqi_fmt_id = format_id;
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
mutex_lock(&dqopt->dqio_mutex);
- if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
+ error = dqopt->ops[type]->read_file_info(sb, type);
+ if (error < 0) {
mutex_unlock(&dqopt->dqio_mutex);
goto out_file_init;
}
@@ -1930,6 +2155,7 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
DQUOT_LIMITS_ENABLED);
return error;
}
+EXPORT_SYMBOL(vfs_quota_on_path);
int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
int remount)
@@ -1947,6 +2173,7 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
}
return error;
}
+EXPORT_SYMBOL(vfs_quota_on);
/*
* More powerful function for turning on quotas allowing setting
@@ -1993,6 +2220,7 @@ out_lock:
load_quota:
return vfs_load_quota_inode(inode, type, format_id, flags);
}
+EXPORT_SYMBOL(vfs_quota_enable);
/*
* This function is used when filesystem needs to initialize quotas
@@ -2022,6 +2250,7 @@ out:
dput(dentry);
return error;
}
+EXPORT_SYMBOL(vfs_quota_on_mount);
/* Wrapper to turn on quotas when remounting rw */
int vfs_dq_quota_on_remount(struct super_block *sb)
@@ -2038,6 +2267,7 @@ int vfs_dq_quota_on_remount(struct super_block *sb)
}
return ret;
}
+EXPORT_SYMBOL(vfs_dq_quota_on_remount);
static inline qsize_t qbtos(qsize_t blocks)
{
@@ -2057,7 +2287,7 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
spin_lock(&dq_data_lock);
di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
- di->dqb_curspace = dm->dqb_curspace;
+ di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
di->dqb_ihardlimit = dm->dqb_ihardlimit;
di->dqb_isoftlimit = dm->dqb_isoftlimit;
di->dqb_curinodes = dm->dqb_curinodes;
@@ -2067,18 +2297,20 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
spin_unlock(&dq_data_lock);
}
-int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
+int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
+ struct if_dqblk *di)
{
struct dquot *dquot;
dquot = dqget(sb, id, type);
- if (dquot == NODQUOT)
+ if (!dquot)
return -ESRCH;
do_get_dqblk(dquot, di);
dqput(dquot);
return 0;
}
+EXPORT_SYMBOL(vfs_get_dqblk);
/* Generic routine for setting common part of quota structure */
static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
@@ -2097,7 +2329,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
spin_lock(&dq_data_lock);
if (di->dqb_valid & QIF_SPACE) {
- dm->dqb_curspace = di->dqb_curspace;
+ dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
check_blim = 1;
__set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
@@ -2130,22 +2362,25 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
}
if (check_blim) {
- if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) {
+ if (!dm->dqb_bsoftlimit ||
+ dm->dqb_curspace < dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
- }
- else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */
+ } else if (!(di->dqb_valid & QIF_BTIME))
+ /* Set grace only if user hasn't provided his own... */
dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
}
if (check_ilim) {
- if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
+ if (!dm->dqb_isoftlimit ||
+ dm->dqb_curinodes < dm->dqb_isoftlimit) {
dm->dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
- }
- else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */
+ } else if (!(di->dqb_valid & QIF_ITIME))
+ /* Set grace only if user hasn't provided his own... */
dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
}
- if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
+ if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
+ dm->dqb_isoftlimit)
clear_bit(DQ_FAKE_B, &dquot->dq_flags);
else
set_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -2155,7 +2390,8 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
return 0;
}
-int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
+int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
+ struct if_dqblk *di)
{
struct dquot *dquot;
int rc;
@@ -2170,6 +2406,7 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
out:
return rc;
}
+EXPORT_SYMBOL(vfs_set_dqblk);
/* Generic routine for getting common part of quota file information */
int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2191,6 +2428,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
+EXPORT_SYMBOL(vfs_get_dqinfo);
/* Generic routine for setting common part of quota file information */
int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2210,7 +2448,8 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
if (ii->dqi_valid & IIF_IGRACE)
mi->dqi_igrace = ii->dqi_igrace;
if (ii->dqi_valid & IIF_FLAGS)
- mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
+ mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
+ (ii->dqi_flags & DQF_MASK);
spin_unlock(&dq_data_lock);
mark_info_dirty(sb, type);
/* Force write to disk */
@@ -2219,6 +2458,7 @@ out:
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return err;
}
+EXPORT_SYMBOL(vfs_set_dqinfo);
struct quotactl_ops vfs_quotactl_ops = {
.quota_on = vfs_quota_on,
@@ -2368,43 +2608,10 @@ static int __init dquot_init(void)
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
if (genl_register_family(&quota_genl_family) != 0)
- printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n");
+ printk(KERN_ERR
+ "VFS: Failed to create quota netlink interface.\n");
#endif
return 0;
}
module_init(dquot_init);
-
-EXPORT_SYMBOL(register_quota_format);
-EXPORT_SYMBOL(unregister_quota_format);
-EXPORT_SYMBOL(dqstats);
-EXPORT_SYMBOL(dq_data_lock);
-EXPORT_SYMBOL(vfs_quota_enable);
-EXPORT_SYMBOL(vfs_quota_on);
-EXPORT_SYMBOL(vfs_quota_on_path);
-EXPORT_SYMBOL(vfs_quota_on_mount);
-EXPORT_SYMBOL(vfs_quota_disable);
-EXPORT_SYMBOL(vfs_quota_off);
-EXPORT_SYMBOL(dquot_scan_active);
-EXPORT_SYMBOL(vfs_quota_sync);
-EXPORT_SYMBOL(vfs_get_dqinfo);
-EXPORT_SYMBOL(vfs_set_dqinfo);
-EXPORT_SYMBOL(vfs_get_dqblk);
-EXPORT_SYMBOL(vfs_set_dqblk);
-EXPORT_SYMBOL(dquot_commit);
-EXPORT_SYMBOL(dquot_commit_info);
-EXPORT_SYMBOL(dquot_acquire);
-EXPORT_SYMBOL(dquot_release);
-EXPORT_SYMBOL(dquot_mark_dquot_dirty);
-EXPORT_SYMBOL(dquot_initialize);
-EXPORT_SYMBOL(dquot_drop);
-EXPORT_SYMBOL(vfs_dq_drop);
-EXPORT_SYMBOL(dqget);
-EXPORT_SYMBOL(dqput);
-EXPORT_SYMBOL(dquot_alloc_space);
-EXPORT_SYMBOL(dquot_alloc_inode);
-EXPORT_SYMBOL(dquot_free_space);
-EXPORT_SYMBOL(dquot_free_inode);
-EXPORT_SYMBOL(dquot_transfer);
-EXPORT_SYMBOL(vfs_dq_transfer);
-EXPORT_SYMBOL(vfs_dq_quota_on_remount);
diff --git a/fs/quota.c b/fs/quota/quota.c
index d76ada914f9..b7f5a468f07 100644
--- a/fs/quota.c
+++ b/fs/quota/quota.c
@@ -20,7 +20,8 @@
#include <linux/types.h>
/* Check validity of generic quotactl commands */
-static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
+ qid_t id)
{
if (type >= MAXQUOTAS)
return -EINVAL;
@@ -72,7 +73,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
case Q_SETINFO:
case Q_SETQUOTA:
case Q_GETQUOTA:
- /* This is just informative test so we are satisfied without a lock */
+ /* This is just an informative test so we are satisfied
+ * without the lock */
if (!sb_has_quota_active(sb, type))
return -ESRCH;
}
@@ -92,7 +94,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
}
/* Check validity of XFS Quota Manager commands */
-static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
+ qid_t id)
{
if (type >= XQM_MAXQUOTAS)
return -EINVAL;
@@ -142,7 +145,8 @@ static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t i
return 0;
}
-static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
+ qid_t id)
{
int error;
@@ -180,7 +184,8 @@ static void quota_sync_sb(struct super_block *sb, int type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
- mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
+ mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
+ I_MUTEX_QUOTA);
truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
}
@@ -200,14 +205,15 @@ void sync_dquots(struct super_block *sb, int type)
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
- /* This test just improves performance so it needn't be reliable... */
+ /* This test just improves performance so it needn't be
+ * reliable... */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && type != cnt)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
- list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
+ list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
continue;
break;
}
@@ -227,7 +233,8 @@ restart:
}
/* Copy parameters and call proper function */
-static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr)
+static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
+ void __user *addr)
{
int ret;
@@ -235,7 +242,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_QUOTAON: {
char *pathname;
- if (IS_ERR(pathname = getname(addr)))
+ pathname = getname(addr);
+ if (IS_ERR(pathname))
return PTR_ERR(pathname);
ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
putname(pathname);
@@ -261,7 +269,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_GETINFO: {
struct if_dqinfo info;
- if ((ret = sb->s_qcop->get_info(sb, type, &info)))
+ ret = sb->s_qcop->get_info(sb, type, &info);
+ if (ret)
return ret;
if (copy_to_user(addr, &info, sizeof(info)))
return -EFAULT;
@@ -277,7 +286,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_GETQUOTA: {
struct if_dqblk idq;
- if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)))
+ ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
+ if (ret)
return ret;
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
@@ -322,7 +332,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
case Q_XGETQUOTA: {
struct fs_disk_quota fdq;
- if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq)))
+ ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
+ if (ret)
return ret;
if (copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
@@ -341,7 +352,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
* look up a superblock on which quota ops will be performed
* - use the name of a block device to find the superblock thereon
*/
-static inline struct super_block *quotactl_block(const char __user *special)
+static struct super_block *quotactl_block(const char __user *special)
{
#ifdef CONFIG_BLOCK
struct block_device *bdev;
diff --git a/fs/quota_tree.c b/fs/quota/quota_tree.c
index 953404c95b1..f81f4bcfb17 100644
--- a/fs/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -22,8 +22,6 @@ MODULE_LICENSE("GPL");
#define __QUOTA_QT_PARANOIA
-typedef char *dqbuf_t;
-
static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
{
unsigned int epb = info->dqi_usable_bs >> 2;
@@ -35,46 +33,42 @@ static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
}
/* Number of entries in one blocks */
-static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
+static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
{
return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
/ info->dqi_entry_size;
}
-static dqbuf_t getdqbuf(size_t size)
+static char *getdqbuf(size_t size)
{
- dqbuf_t buf = kmalloc(size, GFP_NOFS);
+ char *buf = kmalloc(size, GFP_NOFS);
if (!buf)
- printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n");
+ printk(KERN_WARNING
+ "VFS: Not enough memory for quota buffers.\n");
return buf;
}
-static inline void freedqbuf(dqbuf_t buf)
-{
- kfree(buf);
-}
-
-static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
+static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
{
struct super_block *sb = info->dqi_sb;
memset(buf, 0, info->dqi_usable_bs);
- return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf,
+ return sb->s_op->quota_read(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
}
-static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
+static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
{
struct super_block *sb = info->dqi_sb;
- return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf,
+ return sb->s_op->quota_write(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
}
/* Remove empty block from list and return it */
static int get_free_dqblk(struct qtree_mem_dqinfo *info)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int ret, blk;
@@ -98,12 +92,12 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info)
mark_info_dirty(info->dqi_sb, info->dqi_type);
ret = blk;
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
/* Insert empty block to the list */
-static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
+static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
{
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int err;
@@ -120,9 +114,10 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
}
/* Remove given block from the list of blocks with free entries */
-static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
+static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+ uint blk)
{
- dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
+ char *tmpbuf = getdqbuf(info->dqi_usable_bs);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
uint nextblk = le32_to_cpu(dh->dqdh_next_free);
uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
@@ -153,21 +148,24 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
info->dqi_free_entry = nextblk;
mark_info_dirty(info->dqi_sb, info->dqi_type);
}
- freedqbuf(tmpbuf);
+ kfree(tmpbuf);
dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
/* No matter whether write succeeds block is out of list */
if (write_blk(info, blk, buf) < 0)
- printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk);
+ printk(KERN_ERR
+ "VFS: Can't write block (%u) with free entries.\n",
+ blk);
return 0;
out_buf:
- freedqbuf(tmpbuf);
+ kfree(tmpbuf);
return err;
}
/* Insert given block to the beginning of list with free entries */
-static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
+static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+ uint blk)
{
- dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
+ char *tmpbuf = getdqbuf(info->dqi_usable_bs);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int err;
@@ -188,12 +186,12 @@ static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
if (err < 0)
goto out_buf;
}
- freedqbuf(tmpbuf);
+ kfree(tmpbuf);
info->dqi_free_entry = blk;
mark_info_dirty(info->dqi_sb, info->dqi_type);
return 0;
out_buf:
- freedqbuf(tmpbuf);
+ kfree(tmpbuf);
return err;
}
@@ -215,7 +213,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
{
uint blk, i;
struct qt_disk_dqdbheader *dh;
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
char *ddquot;
*err = 0;
@@ -233,11 +231,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
blk = get_free_dqblk(info);
if ((int)blk < 0) {
*err = blk;
- freedqbuf(buf);
+ kfree(buf);
return 0;
}
memset(buf, 0, info->dqi_usable_bs);
- /* This is enough as block is already zeroed and entry list is empty... */
+ /* This is enough as the block is already zeroed and the entry
+ * list is empty... */
info->dqi_free_entry = blk;
mark_info_dirty(dquot->dq_sb, dquot->dq_type);
}
@@ -253,9 +252,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
}
le16_add_cpu(&dh->dqdh_entries, 1);
/* Find free structure in block */
- for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
- i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot);
- i++, ddquot += info->dqi_entry_size);
+ ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+ for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+ if (qtree_entry_unused(info, ddquot))
+ break;
+ ddquot += info->dqi_entry_size;
+ }
#ifdef __QUOTA_QT_PARANOIA
if (i == qtree_dqstr_in_blk(info)) {
printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
@@ -273,10 +275,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
dquot->dq_off = (blk << info->dqi_blocksize_bits) +
sizeof(struct qt_disk_dqdbheader) +
i * info->dqi_entry_size;
- freedqbuf(buf);
+ kfree(buf);
return blk;
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return 0;
}
@@ -284,7 +286,7 @@ out_buf:
static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *treeblk, int depth)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
int ret = 0, newson = 0, newact = 0;
__le32 *ref;
uint newblk;
@@ -333,7 +335,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
put_free_dqblk(info, buf, *treeblk);
}
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -346,14 +348,15 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
}
/*
- * We don't have to be afraid of deadlocks as we never have quotas on quota files...
+ * We don't have to be afraid of deadlocks as we never have quotas on quota
+ * files...
*/
int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
int type = dquot->dq_type;
struct super_block *sb = dquot->dq_sb;
ssize_t ret;
- dqbuf_t ddquot = getdqbuf(info->dqi_entry_size);
+ char *ddquot = getdqbuf(info->dqi_entry_size);
if (!ddquot)
return -ENOMEM;
@@ -364,15 +367,15 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
if (ret < 0) {
printk(KERN_ERR "VFS: Error %zd occurred while "
"creating quota.\n", ret);
- freedqbuf(ddquot);
+ kfree(ddquot);
return ret;
}
}
spin_lock(&dq_data_lock);
info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
spin_unlock(&dq_data_lock);
- ret = sb->s_op->quota_write(sb, type, (char *)ddquot,
- info->dqi_entry_size, dquot->dq_off);
+ ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
+ dquot->dq_off);
if (ret != info->dqi_entry_size) {
printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
sb->s_id);
@@ -382,7 +385,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
ret = 0;
}
dqstats.writes++;
- freedqbuf(ddquot);
+ kfree(ddquot);
return ret;
}
@@ -393,7 +396,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint blk)
{
struct qt_disk_dqdbheader *dh;
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
int ret = 0;
if (!buf)
@@ -444,7 +447,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
}
dquot->dq_off = 0; /* Quota is now unattached */
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -452,7 +455,7 @@ out_buf:
static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *blk, int depth)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
int ret = 0;
uint newblk;
__le32 *ref = (__le32 *)buf;
@@ -475,9 +478,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
int i;
ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
/* Block got empty? */
- for (i = 0;
- i < (info->dqi_usable_bs >> 2) && !ref[i];
- i++);
+ for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
+ ;
/* Don't put the root block into the free block list */
if (i == (info->dqi_usable_bs >> 2)
&& *blk != QT_TREEOFF) {
@@ -491,7 +493,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
}
}
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -510,7 +512,7 @@ EXPORT_SYMBOL(qtree_delete_dquot);
static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
loff_t ret = 0;
int i;
char *ddquot;
@@ -522,9 +524,12 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
goto out_buf;
}
- for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
- i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot);
- i++, ddquot += info->dqi_entry_size);
+ ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+ for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+ if (info->dqi_ops->is_id(ddquot, dquot))
+ break;
+ ddquot += info->dqi_entry_size;
+ }
if (i == qtree_dqstr_in_blk(info)) {
printk(KERN_ERR "VFS: Quota for id %u referenced "
"but not present.\n", dquot->dq_id);
@@ -535,7 +540,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
qt_disk_dqdbheader) + i * info->dqi_entry_size;
}
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -543,7 +548,7 @@ out_buf:
static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk, int depth)
{
- dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+ char *buf = getdqbuf(info->dqi_usable_bs);
loff_t ret = 0;
__le32 *ref = (__le32 *)buf;
@@ -563,7 +568,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
else
ret = find_block_dqentry(info, dquot, blk);
out_buf:
- freedqbuf(buf);
+ kfree(buf);
return ret;
}
@@ -579,7 +584,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
int type = dquot->dq_type;
struct super_block *sb = dquot->dq_sb;
loff_t offset;
- dqbuf_t ddquot;
+ char *ddquot;
int ret = 0;
#ifdef __QUOTA_QT_PARANOIA
@@ -607,8 +612,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
ddquot = getdqbuf(info->dqi_entry_size);
if (!ddquot)
return -ENOMEM;
- ret = sb->s_op->quota_read(sb, type, (char *)ddquot,
- info->dqi_entry_size, dquot->dq_off);
+ ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
+ dquot->dq_off);
if (ret != info->dqi_entry_size) {
if (ret >= 0)
ret = -EIO;
@@ -616,7 +621,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
"structure for id %u.\n", dquot->dq_id);
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
- freedqbuf(ddquot);
+ kfree(ddquot);
goto out;
}
spin_lock(&dq_data_lock);
@@ -627,7 +632,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
!dquot->dq_dqb.dqb_isoftlimit)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dq_data_lock);
- freedqbuf(ddquot);
+ kfree(ddquot);
out:
dqstats.reads++;
return ret;
@@ -638,7 +643,8 @@ EXPORT_SYMBOL(qtree_read_dquot);
* the only one operating on dquot (thanks to dq_lock) */
int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
- if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
+ if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
+ !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
return qtree_delete_dquot(info, dquot);
return 0;
}
diff --git a/fs/quota_tree.h b/fs/quota/quota_tree.h
index a1ab8db81a5..a1ab8db81a5 100644
--- a/fs/quota_tree.h
+++ b/fs/quota/quota_tree.h
diff --git a/fs/quota_v1.c b/fs/quota/quota_v1.c
index b4af1c69ad1..0edcf42b177 100644
--- a/fs/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -62,11 +62,14 @@ static int v1_read_dqblk(struct dquot *dquot)
/* Set structure to 0s in case read fails/is after end of file */
memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
- dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+ dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
- if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 &&
- dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0)
+ if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
+ dquot->dq_dqb.dqb_bsoftlimit == 0 &&
+ dquot->dq_dqb.dqb_ihardlimit == 0 &&
+ dquot->dq_dqb.dqb_isoftlimit == 0)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
dqstats.reads++;
@@ -81,13 +84,16 @@ static int v1_commit_dqblk(struct dquot *dquot)
v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
if (dquot->dq_id == 0) {
- dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
- dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
+ dqblk.dqb_btime =
+ sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
+ dqblk.dqb_itime =
+ sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
}
ret = 0;
if (sb_dqopt(dquot->dq_sb)->files[type])
- ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk,
- sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+ ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
+ (char *)&dqblk, sizeof(struct v1_disk_dqblk),
+ v1_dqoff(dquot->dq_id));
if (ret != sizeof(struct v1_disk_dqblk)) {
printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
dquot->dq_sb->s_id);
@@ -130,15 +136,20 @@ static int v1_check_quota_file(struct super_block *sb, int type)
return 0;
blocks = isize >> BLOCK_SIZE_BITS;
off = isize & (BLOCK_SIZE - 1);
- if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk))
+ if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
+ sizeof(struct v1_disk_dqblk))
return 0;
- /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */
- size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
+ /* Doublecheck whether we didn't get file with new format - with old
+ * quotactl() this could happen */
+ size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
+ sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader))
return 1; /* Probably not new format */
if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
return 1; /* Definitely not new format */
- printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id);
+ printk(KERN_INFO
+ "VFS: %s: Refusing to turn on old quota format on given file."
+ " It probably contains newer quota format.\n", sb->s_id);
return 0; /* Seems like a new format file -> refuse it */
}
@@ -148,7 +159,9 @@ static int v1_read_file_info(struct super_block *sb, int type)
struct v1_disk_dqblk dqblk;
int ret;
- if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
+ ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+ if (ret != sizeof(struct v1_disk_dqblk)) {
if (ret >= 0)
ret = -EIO;
goto out;
@@ -157,8 +170,10 @@ static int v1_read_file_info(struct super_block *sb, int type)
/* limits are stored as unsigned 32-bit data */
dqopt->info[type].dqi_maxblimit = 0xffffffff;
dqopt->info[type].dqi_maxilimit = 0xffffffff;
- dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
- dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
+ dqopt->info[type].dqi_igrace =
+ dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
+ dqopt->info[type].dqi_bgrace =
+ dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
out:
return ret;
}
@@ -170,8 +185,9 @@ static int v1_write_file_info(struct super_block *sb, int type)
int ret;
dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
- if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
- sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
+ ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+ if (ret != sizeof(struct v1_disk_dqblk)) {
if (ret >= 0)
ret = -EIO;
goto out;
diff --git a/fs/quota_v2.c b/fs/quota/quota_v2.c
index b618b563635..a5475fb1ae4 100644
--- a/fs/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -54,7 +54,8 @@ static int v2_check_quota_file(struct super_block *sb, int type)
static const uint quota_magics[] = V2_INITQMAGICS;
static const uint quota_versions[] = V2_INITQVERSIONS;
- size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
+ size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
+ sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader)) {
printk("quota_v2: failed read expected=%zd got=%zd\n",
sizeof(struct v2_disk_dqheader), size);
diff --git a/fs/quotaio_v1.h b/fs/quota/quotaio_v1.h
index 746654b5de7..746654b5de7 100644
--- a/fs/quotaio_v1.h
+++ b/fs/quota/quotaio_v1.h
diff --git a/fs/quotaio_v2.h b/fs/quota/quotaio_v2.h
index 530fe580685..530fe580685 100644
--- a/fs/quotaio_v2.h
+++ b/fs/quota/quotaio_v2.h
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 5d7c7ececa6..995ef1d6686 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -18,7 +18,6 @@
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/ramfs.h>
-#include <linux/quotaops.h>
#include <linux/pagevec.h>
#include <linux/mman.h>
@@ -205,11 +204,6 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
if (ret)
return ret;
- /* by providing our own setattr() method, we skip this quotaism */
- if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
- (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
- ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
-
/* pick out size-changing events */
if (ia->ia_valid & ATTR_SIZE) {
loff_t size = i_size_read(inode);
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 4646caa6045..f32d1425cc9 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -430,7 +430,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
journal_mark_dirty(th, s, sbh);
if (for_unformatted)
- DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
+ vfs_dq_free_block_nodirty(inode, 1);
}
void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1055,7 +1055,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
amount_needed, hint->inode->i_uid);
#endif
quota_ret =
- DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed);
+ vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
if (quota_ret) /* Quota exceeded? */
return QUOTA_EXCEEDED;
if (hint->preallocate && hint->prealloc_size) {
@@ -1064,8 +1064,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
"reiserquota: allocating (prealloc) %d blocks id=%u",
hint->prealloc_size, hint->inode->i_uid);
#endif
- quota_ret =
- DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
+ quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
hint->prealloc_size);
if (quota_ret)
hint->preallocate = hint->prealloc_size = 0;
@@ -1098,7 +1097,10 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
nr_allocated,
hint->inode->i_uid);
#endif
- DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */
+ /* Free not allocated blocks */
+ vfs_dq_free_block_nodirty(hint->inode,
+ amount_needed + hint->prealloc_size -
+ nr_allocated);
}
while (nr_allocated--)
reiserfs_free_block(hint->th, hint->inode,
@@ -1129,7 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
REISERFS_I(hint->inode)->i_prealloc_count,
hint->inode->i_uid);
#endif
- DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed +
+ vfs_dq_free_block_nodirty(hint->inode, amount_needed +
hint->prealloc_size - nr_allocated -
REISERFS_I(hint->inode)->
i_prealloc_count);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 55fce92cdf1..823227a7662 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -53,7 +53,7 @@ void reiserfs_delete_inode(struct inode *inode)
* after delete_object so that quota updates go into the same transaction as
* stat data deletion */
if (!err)
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
if (journal_end(&th, inode->i_sb, jbegin_count))
goto out;
@@ -1763,7 +1763,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
- if (DQUOT_ALLOC_INODE(inode)) {
+ if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
goto out_end_trans;
}
@@ -1947,12 +1947,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
INODE_PKEY(inode)->k_objectid = 0;
/* Quota change must be inside a transaction for journaling */
- DQUOT_FREE_INODE(inode);
+ vfs_dq_free_inode(inode);
out_end_trans:
journal_end(th, th->t_super, th->t_blocks_allocated);
/* Drop can be outside and it needs more credits so it's better to have it outside */
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
@@ -3119,7 +3119,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
goto out;
error =
- DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+ vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
if (error) {
journal_end(&th, inode->i_sb,
jbegin_count);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 738967f6c8e..639d635d9d4 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -555,7 +555,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
*/
static int drop_new_inode(struct inode *inode)
{
- DQUOT_DROP(inode);
+ vfs_dq_drop(inode);
make_bad_inode(inode);
inode->i_flags |= S_NOQUOTA;
iput(inode);
@@ -563,7 +563,7 @@ static int drop_new_inode(struct inode *inode)
}
/* utility function that does setup for reiserfs_new_inode.
-** DQUOT_INIT needs lots of credits so it's better to have it
+** vfs_dq_init needs lots of credits so it's better to have it
** outside of a transaction, so we had to pull some bits of
** reiserfs_new_inode out into this func.
*/
@@ -586,7 +586,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
} else {
inode->i_gid = current_fsgid();
}
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
return 0;
}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index abbc64dcc8d..73aaa33f673 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1297,7 +1297,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath
"reiserquota delete_item(): freeing %u, id=%u type=%c",
quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
#endif
- DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+ vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
/* Return deleted body length */
return n_ret_value;
@@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
quota_cut_bytes, inode->i_uid,
key2type(key));
#endif
- DQUOT_FREE_SPACE_NODIRTY(inode,
+ vfs_dq_free_space_nodirty(inode,
quota_cut_bytes);
}
break;
@@ -1734,7 +1734,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
"reiserquota cut_from_item(): freeing %u id=%u type=%c",
quota_cut_bytes, p_s_inode->i_uid, '?');
#endif
- DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+ vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
return n_ret_value;
}
@@ -1971,7 +1971,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
key2type(&(p_s_key->on_disk_key)));
#endif
- if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) {
+ if (vfs_dq_alloc_space_nodirty(inode, n_pasted_size)) {
pathrelse(p_s_search_path);
return -EDQUOT;
}
@@ -2027,7 +2027,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
n_pasted_size, inode->i_uid,
key2type(&(p_s_key->on_disk_key)));
#endif
- DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
+ vfs_dq_free_space_nodirty(inode, n_pasted_size);
return retval;
}
@@ -2060,7 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
#endif
/* We can't dirty inode here. It would be immediately written but
* appropriate stat item isn't inserted yet... */
- if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) {
+ if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) {
pathrelse(p_s_path);
return -EDQUOT;
}
@@ -2112,6 +2112,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
quota_bytes, inode->i_uid, head2type(p_s_ih));
#endif
if (inode)
- DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes);
+ vfs_dq_free_space_nodirty(inode, quota_bytes);
return retval;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f3c820b7582..5dbafb73940 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -250,7 +250,7 @@ static int finish_unfinished(struct super_block *s)
retval = remove_save_link_only(s, &save_link_key, 0);
continue;
}
- DQUOT_INIT(inode);
+ vfs_dq_init(inode);
if (truncate && S_ISDIR(inode->i_mode)) {
/* We got a truncate request for a dir which is impossible.
@@ -629,8 +629,6 @@ static const struct super_operations reiserfs_sops = {
#ifdef CONFIG_QUOTA
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
-static int reiserfs_dquot_initialize(struct inode *, int);
-static int reiserfs_dquot_drop(struct inode *);
static int reiserfs_write_dquot(struct dquot *);
static int reiserfs_acquire_dquot(struct dquot *);
static int reiserfs_release_dquot(struct dquot *);
@@ -639,8 +637,8 @@ static int reiserfs_write_info(struct super_block *, int);
static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
static struct dquot_operations reiserfs_quota_operations = {
- .initialize = reiserfs_dquot_initialize,
- .drop = reiserfs_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
@@ -1896,58 +1894,6 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
#ifdef CONFIG_QUOTA
-static int reiserfs_dquot_initialize(struct inode *inode, int type)
-{
- struct reiserfs_transaction_handle th;
- int ret, err;
-
- /* We may create quota structure so we need to reserve enough blocks */
- reiserfs_write_lock(inode->i_sb);
- ret =
- journal_begin(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (ret)
- goto out;
- ret = dquot_initialize(inode, type);
- err =
- journal_end(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (!ret && err)
- ret = err;
- out:
- reiserfs_write_unlock(inode->i_sb);
- return ret;
-}
-
-static int reiserfs_dquot_drop(struct inode *inode)
-{
- struct reiserfs_transaction_handle th;
- int ret, err;
-
- /* We may delete quota structure so we need to reserve enough blocks */
- reiserfs_write_lock(inode->i_sb);
- ret =
- journal_begin(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (ret) {
- /*
- * We call dquot_drop() anyway to at least release references
- * to quota structures so that umount does not hang.
- */
- dquot_drop(inode);
- goto out;
- }
- ret = dquot_drop(inode);
- err =
- journal_end(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (!ret && err)
- ret = err;
- out:
- reiserfs_write_unlock(inode->i_sb);
- return ret;
-}
-
static int reiserfs_write_dquot(struct dquot *dquot)
{
struct reiserfs_transaction_handle th;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ad92461cbfc..ae881ccd2f0 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -1136,7 +1136,7 @@ xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
return 1;
}
-static struct dentry_operations xattr_lookup_poison_ops = {
+static const struct dentry_operations xattr_lookup_poison_ops = {
.d_compare = xattr_lookup_poison,
};
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index e7ddd0328dd..3e4803b4427 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -277,7 +277,7 @@ static int smb_hash_dentry(struct dentry *, struct qstr *);
static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
static int smb_delete_dentry(struct dentry *);
-static struct dentry_operations smbfs_dentry_operations =
+static const struct dentry_operations smbfs_dentry_operations =
{
.d_revalidate = smb_lookup_validate,
.d_hash = smb_hash_dentry,
@@ -285,7 +285,7 @@ static struct dentry_operations smbfs_dentry_operations =
.d_delete = smb_delete_dentry,
};
-static struct dentry_operations smbfs_dentry_operations_case =
+static const struct dentry_operations smbfs_dentry_operations_case =
{
.d_revalidate = smb_lookup_validate,
.d_delete = smb_delete_dentry,
diff --git a/fs/super.c b/fs/super.c
index 6ce501447ad..2ba481518ba 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -197,7 +197,7 @@ void deactivate_super(struct super_block *s)
if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
s->s_count -= S_BIAS-1;
spin_unlock(&sb_lock);
- DQUOT_OFF(s, 0);
+ vfs_dq_off(s, 0);
down_write(&s->s_umount);
fs->kill_sb(s);
put_filesystem(fs);
@@ -266,7 +266,7 @@ EXPORT_SYMBOL(unlock_super);
void __fsync_super(struct super_block *sb)
{
sync_inodes_sb(sb, 0);
- DQUOT_SYNC(sb);
+ vfs_dq_sync(sb);
lock_super(sb);
if (sb->s_dirt && sb->s_op->write_super)
sb->s_op->write_super(sb);
@@ -655,7 +655,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
mark_files_ro(sb);
else if (!fs_may_remount_ro(sb))
return -EBUSY;
- retval = DQUOT_OFF(sb, 1);
+ retval = vfs_dq_off(sb, 1);
if (retval < 0 && retval != -ENOSYS)
return -EBUSY;
}
@@ -670,11 +670,11 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
}
sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
if (remount_rw)
- DQUOT_ON_REMOUNT(sb);
+ vfs_dq_quota_on_remount(sb);
return 0;
}
-static void do_emergency_remount(unsigned long foo)
+static void do_emergency_remount(struct work_struct *work)
{
struct super_block *sb;
@@ -697,12 +697,19 @@ static void do_emergency_remount(unsigned long foo)
spin_lock(&sb_lock);
}
spin_unlock(&sb_lock);
+ kfree(work);
printk("Emergency Remount complete\n");
}
void emergency_remount(void)
{
- pdflush_operation(do_emergency_remount, 0);
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, do_emergency_remount);
+ schedule_work(work);
+ }
}
/*
@@ -831,7 +838,8 @@ int get_sb_bdev(struct file_system_type *fs_type,
bdev->bd_super = s;
}
- return simple_set_mnt(mnt, s);
+ simple_set_mnt(mnt, s);
+ return 0;
error_s:
error = PTR_ERR(s);
@@ -877,7 +885,8 @@ int get_sb_nodev(struct file_system_type *fs_type,
return error;
}
s->s_flags |= MS_ACTIVE;
- return simple_set_mnt(mnt, s);
+ simple_set_mnt(mnt, s);
+ return 0;
}
EXPORT_SYMBOL(get_sb_nodev);
@@ -909,7 +918,8 @@ int get_sb_single(struct file_system_type *fs_type,
s->s_flags |= MS_ACTIVE;
}
do_remount_sb(s, flags, data, 0);
- return simple_set_mnt(mnt, s);
+ simple_set_mnt(mnt, s);
+ return 0;
}
EXPORT_SYMBOL(get_sb_single);
diff --git a/fs/sync.c b/fs/sync.c
index a16d53e5fe9..7abc65fbf21 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -25,7 +25,7 @@ static void do_sync(unsigned long wait)
{
wakeup_pdflush(0);
sync_inodes(0); /* All mappings, inodes and their blockdevs */
- DQUOT_SYNC(NULL);
+ vfs_dq_sync(NULL);
sync_supers(); /* Write the superblocks */
sync_filesystems(0); /* Start syncing the filesystems */
sync_filesystems(wait); /* Waitingly sync the filesystems */
@@ -42,9 +42,21 @@ SYSCALL_DEFINE0(sync)
return 0;
}
+static void do_sync_work(struct work_struct *work)
+{
+ do_sync(0);
+ kfree(work);
+}
+
void emergency_sync(void)
{
- pdflush_operation(do_sync, 0);
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, do_sync_work);
+ schedule_work(work);
+ }
}
/*
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 66aeb4fff0c..d88d0fac9fa 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -302,7 +302,7 @@ static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
iput(inode);
}
-static struct dentry_operations sysfs_dentry_ops = {
+static const struct dentry_operations sysfs_dentry_ops = {
.d_iput = sysfs_d_iput,
};
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index a1f1ef33e81..33e047b59b8 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -38,7 +38,7 @@ static int sysv_hash(struct dentry *dentry, struct qstr *qstr)
return 0;
}
-struct dentry_operations sysv_dentry_operations = {
+const struct dentry_operations sysv_dentry_operations = {
.d_hash = sysv_hash,
};
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 38ebe3f85b3..5784a318c88 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -170,7 +170,7 @@ extern const struct file_operations sysv_file_operations;
extern const struct file_operations sysv_dir_operations;
extern const struct address_space_operations sysv_aops;
extern const struct super_operations sysv_sops;
-extern struct dentry_operations sysv_dentry_operations;
+extern const struct dentry_operations sysv_dentry_operations;
enum {
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1182b66a549..c5c98355459 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2034,7 +2034,8 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
/* 'fill_super()' opens ubi again so we must close it here */
ubi_close_volume(ubi);
- return simple_set_mnt(mnt, sb);
+ simple_set_mnt(mnt, sb);
+ return 0;
out_deact:
up_write(&sb->s_umount);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1b809bd494b..2bb788a2acb 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -206,7 +206,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
((char *)bh->b_data)[(bit + i) >> 3]);
} else {
if (inode)
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
udf_add_free_space(sbi, sbi->s_partition, 1);
}
}
@@ -261,11 +261,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
while (bit < (sb->s_blocksize << 3) && block_count > 0) {
if (!udf_test_bit(bit, bh->b_data))
goto out;
- else if (DQUOT_PREALLOC_BLOCK(inode, 1))
+ else if (vfs_dq_prealloc_block(inode, 1))
goto out;
else if (!udf_clear_bit(bit, bh->b_data)) {
udf_debug("bit already cleared for block %d\n", bit);
- DQUOT_FREE_BLOCK(inode, 1);
+ vfs_dq_free_block(inode, 1);
goto out;
}
block_count--;
@@ -393,7 +393,7 @@ got_block:
/*
* Check quota for allocation of this block.
*/
- if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
+ if (inode && vfs_dq_alloc_block(inode, 1)) {
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;
return 0;
@@ -452,7 +452,7 @@ static void udf_table_free_blocks(struct super_block *sb,
/* We do this up front - There are some error conditions that
could occure, but.. oh well */
if (inode)
- DQUOT_FREE_BLOCK(inode, count);
+ vfs_dq_free_block(inode, count);
if (udf_add_free_space(sbi, sbi->s_partition, count))
mark_buffer_dirty(sbi->s_lvid_bh);
@@ -700,7 +700,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
epos.offset -= adsize;
alloc_count = (elen >> sb->s_blocksize_bits);
- if (inode && DQUOT_PREALLOC_BLOCK(inode,
+ if (inode && vfs_dq_prealloc_block(inode,
alloc_count > block_count ? block_count : alloc_count))
alloc_count = 0;
else if (alloc_count > block_count) {
@@ -806,7 +806,7 @@ static int udf_table_new_block(struct super_block *sb,
goal_eloc.logicalBlockNum++;
goal_elen -= sb->s_blocksize;
- if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
+ if (inode && vfs_dq_alloc_block(inode, 1)) {
brelse(goal_epos.bh);
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 31fc84297dd..47dbe5613f9 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
clear_inode(inode);
@@ -154,8 +154,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
insert_inode_hash(inode);
mark_inode_dirty(inode);
- if (DQUOT_ALLOC_INODE(inode)) {
- DQUOT_DROP(inode);
+ if (vfs_dq_alloc_inode(inode)) {
+ vfs_dq_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 0d9ada17373..54c16ec95df 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
"bit already cleared for fragment %u", i);
}
- DQUOT_FREE_BLOCK (inode, count);
+ vfs_dq_free_block(inode, count);
fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
@@ -195,7 +195,7 @@ do_more:
ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, 1);
- DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
+ vfs_dq_free_block(inode, uspi->s_fpb);
fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
uspi->cs_total.cs_nbfree++;
@@ -556,7 +556,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
for (i = oldcount; i < newcount; i++)
ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
- if(DQUOT_ALLOC_BLOCK(inode, count)) {
+ if (vfs_dq_alloc_block(inode, count)) {
*err = -EDQUOT;
return 0;
}
@@ -664,7 +664,7 @@ cg_found:
for (i = count; i < uspi->s_fpb; i++)
ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
i = uspi->s_fpb - count;
- DQUOT_FREE_BLOCK(inode, i);
+ vfs_dq_free_block(inode, i);
fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
uspi->cs_total.cs_nffree += i;
@@ -676,7 +676,7 @@ cg_found:
result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
if (result == INVBLOCK)
return 0;
- if(DQUOT_ALLOC_BLOCK(inode, count)) {
+ if (vfs_dq_alloc_block(inode, count)) {
*err = -EDQUOT;
return 0;
}
@@ -747,7 +747,7 @@ gotit:
ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, -1);
- if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
+ if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
*err = -EDQUOT;
return INVBLOCK;
}
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 6f5dcf00609..3527c00fef0 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode)
is_directory = S_ISDIR(inode->i_mode);
- DQUOT_FREE_INODE(inode);
- DQUOT_DROP(inode);
+ vfs_dq_free_inode(inode);
+ vfs_dq_drop(inode);
clear_inode (inode);
@@ -355,8 +355,8 @@ cg_found:
unlock_super (sb);
- if (DQUOT_ALLOC_INODE(inode)) {
- DQUOT_DROP(inode);
+ if (vfs_dq_alloc_inode(inode)) {
+ vfs_dq_drop(inode);
err = -EDQUOT;
goto fail_without_unlock;
}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 39f87789856..3d2512c21f0 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -622,7 +622,6 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
mode_t mode;
- unsigned i;
/*
* Copy data to the in-core inode.
@@ -655,11 +654,12 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
- ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
+ memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
+ sizeof(ufs_inode->ui_u2.ui_addr));
} else {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
- ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
+ memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
+ sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
+ ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
}
return 0;
}
@@ -669,7 +669,6 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
mode_t mode;
- unsigned i;
UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
/*
@@ -704,12 +703,12 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
*/
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
- ufsi->i_u1.u2_i_data[i] =
- ufs2_inode->ui_u2.ui_addr.ui_db[i];
+ memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
+ sizeof(ufs2_inode->ui_u2.ui_addr));
} else {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
- ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
+ memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
+ sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
+ ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
}
return 0;
}
@@ -781,7 +780,6 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
{
struct super_block *sb = inode->i_sb;
struct ufs_inode_info *ufsi = UFS_I(inode);
- unsigned i;
ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
@@ -809,12 +807,12 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
} else if (inode->i_blocks) {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
- ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i];
+ memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
+ sizeof(ufs_inode->ui_u2.ui_addr));
}
else {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
- ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
+ memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
+ sizeof(ufs_inode->ui_u2.ui_symlink));
}
if (!inode->i_nlink)
@@ -825,7 +823,6 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
{
struct super_block *sb = inode->i_sb;
struct ufs_inode_info *ufsi = UFS_I(inode);
- unsigned i;
UFSD("ENTER\n");
ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
@@ -850,11 +847,11 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
} else if (inode->i_blocks) {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
- ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.u2_i_data[i];
+ memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
+ sizeof(ufs_inode->ui_u2.ui_addr));
} else {
- for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
- ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
+ memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
+ sizeof(ufs_inode->ui_u2.ui_symlink));
}
if (!inode->i_nlink)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index e3a9b1fac75..23119fe7ad6 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -147,7 +147,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
} else {
/* fast symlink */
inode->i_op = &ufs_fast_symlink_inode_operations;
- memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l);
+ memcpy(UFS_I(inode)->i_u1.i_symlink, symname, l);
inode->i_size = l-1;
}
mark_inode_dirty(inode);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 261a1c2f22d..e1c1fc5ee23 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -636,6 +636,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
unsigned block_size, super_block_size;
unsigned flags;
unsigned super_block_offset;
+ unsigned maxsymlen;
int ret = -EINVAL;
uspi = NULL;
@@ -1069,6 +1070,16 @@ magic_found:
uspi->s_maxsymlinklen =
fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
+ if (uspi->fs_magic == UFS2_MAGIC)
+ maxsymlen = 2 * 4 * (UFS_NDADDR + UFS_NINDIR);
+ else
+ maxsymlen = 4 * (UFS_NDADDR + UFS_NINDIR);
+ if (uspi->s_maxsymlinklen > maxsymlen) {
+ ufs_warning(sb, __func__, "ufs_read_super: excessive maximum "
+ "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
+ uspi->s_maxsymlinklen = maxsymlen;
+ }
+
inode = ufs_iget(sb, UFS_ROOTINO);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 11c035168ea..69b3427d788 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -23,7 +23,7 @@ struct ufs_sb_info {
struct ufs_inode_info {
union {
__fs32 i_data[15];
- __u8 i_symlink[4*15];
+ __u8 i_symlink[2 * 4 * 15];
__fs64 u2_i_data[15];
} i_u1;
__u32 i_flags;