aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig62
-rw-r--r--fs/Makefile1
-rw-r--r--fs/binfmt_elf.c1
-rw-r--r--fs/binfmt_misc.c1
-rw-r--r--fs/block_dev.c32
-rw-r--r--fs/dcache.c75
-rw-r--r--fs/eventpoll.c6
-rw-r--r--fs/exec.c7
-rw-r--r--fs/ext3/dir.c2
-rw-r--r--fs/inotify.c991
-rw-r--r--fs/inotify_user.c719
-rw-r--r--fs/jffs/intrep.c15
-rw-r--r--fs/jffs2/Makefile3
-rw-r--r--fs/jffs2/README.Locking21
-rw-r--r--fs/jffs2/acl.c485
-rw-r--r--fs/jffs2/acl.h45
-rw-r--r--fs/jffs2/build.c2
-rw-r--r--fs/jffs2/compr.c2
-rw-r--r--fs/jffs2/compr.h4
-rw-r--r--fs/jffs2/debug.c14
-rw-r--r--fs/jffs2/debug.h6
-rw-r--r--fs/jffs2/dir.c121
-rw-r--r--fs/jffs2/erase.c56
-rw-r--r--fs/jffs2/file.c35
-rw-r--r--fs/jffs2/fs.c63
-rw-r--r--fs/jffs2/gc.c131
-rw-r--r--fs/jffs2/histo.h3
-rw-r--r--fs/jffs2/jffs2_fs_i.h55
-rw-r--r--fs/jffs2/jffs2_fs_sb.h133
-rw-r--r--fs/jffs2/malloc.c127
-rw-r--r--fs/jffs2/nodelist.c183
-rw-r--r--fs/jffs2/nodelist.h190
-rw-r--r--fs/jffs2/nodemgmt.c196
-rw-r--r--fs/jffs2/os-linux.h23
-rw-r--r--fs/jffs2/readinode.c137
-rw-r--r--fs/jffs2/scan.c442
-rw-r--r--fs/jffs2/security.c82
-rw-r--r--fs/jffs2/summary.c484
-rw-r--r--fs/jffs2/summary.h64
-rw-r--r--fs/jffs2/super.c20
-rw-r--r--fs/jffs2/symlink.c7
-rw-r--r--fs/jffs2/wbuf.c968
-rw-r--r--fs/jffs2/write.c147
-rw-r--r--fs/jffs2/xattr.c1238
-rw-r--r--fs/jffs2/xattr.h116
-rw-r--r--fs/jffs2/xattr_trusted.c52
-rw-r--r--fs/jffs2/xattr_user.c52
-rw-r--r--fs/locks.c57
-rw-r--r--fs/namei.c2
-rw-r--r--fs/ntfs/file.c13
-rw-r--r--fs/open.c4
-rw-r--r--fs/partitions/check.c4
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/super.c2
-rw-r--r--fs/xattr.c4
-rw-r--r--fs/xfs/Kconfig27
-rw-r--r--fs/xfs/Makefile-linux-2.64
-rw-r--r--fs/xfs/linux-2.6/kmem.h38
-rw-r--r--fs/xfs/linux-2.6/mrlock.h4
-rw-r--r--fs/xfs/linux-2.6/sema.h19
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c65
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h8
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c13
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c170
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c59
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c65
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c117
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h35
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c123
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c189
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.c41
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.h167
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c52
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h376
-rw-r--r--fs/xfs/quota/xfs_dquot.c8
-rw-r--r--fs/xfs/quota/xfs_dquot.h4
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c6
-rw-r--r--fs/xfs/quota/xfs_qm.c8
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c81
-rw-r--r--fs/xfs/quota/xfs_qm_stats.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c6
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c2
-rw-r--r--fs/xfs/support/debug.c4
-rw-r--r--fs/xfs/support/debug.h3
-rw-r--r--fs/xfs/xfs_acl.c71
-rw-r--r--fs/xfs/xfs_acl.h16
-rw-r--r--fs/xfs/xfs_alloc.c31
-rw-r--r--fs/xfs/xfs_alloc.h2
-rw-r--r--fs/xfs/xfs_alloc_btree.c2
-rw-r--r--fs/xfs/xfs_attr.c66
-rw-r--r--fs/xfs/xfs_attr.h14
-rw-r--r--fs/xfs/xfs_attr_leaf.c4
-rw-r--r--fs/xfs/xfs_bmap.c386
-rw-r--r--fs/xfs/xfs_bmap.h22
-rw-r--r--fs/xfs/xfs_bmap_btree.c12
-rw-r--r--fs/xfs/xfs_btree.c2
-rw-r--r--fs/xfs/xfs_buf_item.c9
-rw-r--r--fs/xfs/xfs_cap.h10
-rw-r--r--fs/xfs/xfs_da_btree.c194
-rw-r--r--fs/xfs/xfs_da_btree.h4
-rw-r--r--fs/xfs/xfs_dfrag.c84
-rw-r--r--fs/xfs/xfs_dfrag.h3
-rw-r--r--fs/xfs/xfs_dinode.h5
-rw-r--r--fs/xfs/xfs_dir.c1217
-rw-r--r--fs/xfs/xfs_dir.h142
-rw-r--r--fs/xfs/xfs_dir2.c396
-rw-r--r--fs/xfs/xfs_dir2.h32
-rw-r--r--fs/xfs/xfs_dir2_block.c31
-rw-r--r--fs/xfs/xfs_dir2_data.c5
-rw-r--r--fs/xfs/xfs_dir2_data.h8
-rw-r--r--fs/xfs/xfs_dir2_leaf.c14
-rw-r--r--fs/xfs/xfs_dir2_node.c16
-rw-r--r--fs/xfs/xfs_dir2_sf.c13
-rw-r--r--fs/xfs/xfs_dir2_trace.c2
-rw-r--r--fs/xfs/xfs_dir_leaf.c2213
-rw-r--r--fs/xfs/xfs_dir_leaf.h231
-rw-r--r--fs/xfs/xfs_dir_sf.h155
-rw-r--r--fs/xfs/xfs_dmapi.h2
-rw-r--r--fs/xfs/xfs_dmops.c1
-rw-r--r--fs/xfs/xfs_error.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c57
-rw-r--r--fs/xfs/xfs_extfree_item.h53
-rw-r--r--fs/xfs/xfs_fs.h7
-rw-r--r--fs/xfs/xfs_fsops.c20
-rw-r--r--fs/xfs/xfs_ialloc.c5
-rw-r--r--fs/xfs/xfs_ialloc_btree.c2
-rw-r--r--fs/xfs/xfs_iget.c22
-rw-r--r--fs/xfs/xfs_inode.c94
-rw-r--r--fs/xfs/xfs_inode.h7
-rw-r--r--fs/xfs/xfs_inode_item.c57
-rw-r--r--fs/xfs/xfs_inode_item.h61
-rw-r--r--fs/xfs/xfs_iocore.c7
-rw-r--r--fs/xfs/xfs_iomap.c26
-rw-r--r--fs/xfs/xfs_itable.c20
-rw-r--r--fs/xfs/xfs_itable.h1
-rw-r--r--fs/xfs/xfs_log.c21
-rw-r--r--fs/xfs/xfs_log_recover.c113
-rw-r--r--fs/xfs/xfs_mount.c54
-rw-r--r--fs/xfs/xfs_mount.h70
-rw-r--r--fs/xfs/xfs_qmops.c1
-rw-r--r--fs/xfs/xfs_quota.h2
-rw-r--r--fs/xfs/xfs_rename.c68
-rw-r--r--fs/xfs/xfs_rtalloc.c16
-rw-r--r--fs/xfs/xfs_rw.c124
-rw-r--r--fs/xfs/xfs_rw.h10
-rw-r--r--fs/xfs/xfs_trans.c48
-rw-r--r--fs/xfs/xfs_trans.h7
-rw-r--r--fs/xfs/xfs_trans_ail.c6
-rw-r--r--fs/xfs/xfs_trans_buf.c12
-rw-r--r--fs/xfs/xfs_trans_extfree.c1
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_trans_item.c2
-rw-r--r--fs/xfs/xfs_trans_space.h11
-rw-r--r--fs/xfs/xfs_utils.c13
-rw-r--r--fs/xfs/xfs_utils.h7
-rw-r--r--fs/xfs/xfs_vfsops.c122
-rw-r--r--fs/xfs/xfs_vnodeops.c295
163 files changed, 7840 insertions, 8447 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index f9b5842c8d2..20f9b557732 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -393,18 +393,30 @@ config INOTIFY
bool "Inotify file change notification support"
default y
---help---
- Say Y here to enable inotify support and the associated system
- calls. Inotify is a file change notification system and a
- replacement for dnotify. Inotify fixes numerous shortcomings in
- dnotify and introduces several new features. It allows monitoring
- of both files and directories via a single open fd. Other features
- include multiple file events, one-shot support, and unmount
+ Say Y here to enable inotify support. Inotify is a file change
+ notification system and a replacement for dnotify. Inotify fixes
+ numerous shortcomings in dnotify and introduces several new features
+ including multiple file events, one-shot support, and unmount
notification.
For more information, see Documentation/filesystems/inotify.txt
If unsure, say Y.
+config INOTIFY_USER
+ bool "Inotify support for userspace"
+ depends on INOTIFY
+ default y
+ ---help---
+ Say Y here to enable inotify support for userspace, including the
+ associated system calls. Inotify allows monitoring of both files and
+ directories via a single open fd. Events are read from the file
+ descriptor, which is also select()- and poll()-able.
+
+ For more information, see Documentation/filesystems/inotify.txt
+
+ If unsure, say Y.
+
config QUOTA
bool "Quota support"
help
@@ -1101,6 +1113,44 @@ config JFFS2_SUMMARY
If unsure, say 'N'.
+config JFFS2_FS_XATTR
+ bool "JFFS2 XATTR support (EXPERIMENTAL)"
+ depends on JFFS2_FS && EXPERIMENTAL && !JFFS2_FS_WRITEBUFFER
+ default n
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page, or visit
+ <http://acl.bestbits.at/> for details).
+
+ If unsure, say N.
+
+config JFFS2_FS_POSIX_ACL
+ bool "JFFS2 POSIX Access Control Lists"
+ depends on JFFS2_FS_XATTR
+ default y
+ select FS_POSIX_ACL
+ help
+ Posix Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the Posix ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N
+
+config JFFS2_FS_SECURITY
+ bool "JFFS2 Security Labels"
+ depends on JFFS2_FS_XATTR
+ default y
+ help
+ Security labels support alternative access control models
+ implemented by security modules like SELinux. This option
+ enables an extended attribute handler for file security
+ labels in the jffs2 filesystem.
+
+ If you are not using a security module that requires using
+ extended attributes for file security labels, say N.
+
config JFFS2_COMPRESSION_OPTIONS
bool "Advanced compression options for JFFS2"
depends on JFFS2_FS
diff --git a/fs/Makefile b/fs/Makefile
index 078d3d1191a..d0ea6bfccf2 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -13,6 +13,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \
ioprio.o pnode.o drop_caches.o splice.o sync.o
obj-$(CONFIG_INOTIFY) += inotify.o
+obj-$(CONFIG_INOTIFY_USER) += inotify_user.o
obj-$(CONFIG_EPOLL) += eventpoll.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 537893a1601..8a04216e8b4 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -759,7 +759,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* Discard our unneeded old files struct */
if (files) {
- steal_locks(files);
put_files_struct(files);
files = NULL;
}
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index d73d75591a3..599f36fd0f6 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -203,7 +203,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto _error;
if (files) {
- steal_locks(files);
put_files_struct(files);
files = NULL;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index f5958f413bd..44aaba202f7 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -414,21 +414,31 @@ EXPORT_SYMBOL(bdput);
static struct block_device *bd_acquire(struct inode *inode)
{
struct block_device *bdev;
+
spin_lock(&bdev_lock);
bdev = inode->i_bdev;
- if (bdev && igrab(bdev->bd_inode)) {
+ if (bdev) {
+ atomic_inc(&bdev->bd_inode->i_count);
spin_unlock(&bdev_lock);
return bdev;
}
spin_unlock(&bdev_lock);
+
bdev = bdget(inode->i_rdev);
if (bdev) {
spin_lock(&bdev_lock);
- if (inode->i_bdev)
- __bd_forget(inode);
- inode->i_bdev = bdev;
- inode->i_mapping = bdev->bd_inode->i_mapping;
- list_add(&inode->i_devices, &bdev->bd_inodes);
+ if (!inode->i_bdev) {
+ /*
+ * We take an additional bd_inode->i_count for inode,
+ * and it's released in clear_inode() of inode.
+ * So, we can access it via ->i_mapping always
+ * without igrab().
+ */
+ atomic_inc(&bdev->bd_inode->i_count);
+ inode->i_bdev = bdev;
+ inode->i_mapping = bdev->bd_inode->i_mapping;
+ list_add(&inode->i_devices, &bdev->bd_inodes);
+ }
spin_unlock(&bdev_lock);
}
return bdev;
@@ -438,10 +448,18 @@ static struct block_device *bd_acquire(struct inode *inode)
void bd_forget(struct inode *inode)
{
+ struct block_device *bdev = NULL;
+
spin_lock(&bdev_lock);
- if (inode->i_bdev)
+ if (inode->i_bdev) {
+ if (inode->i_sb != blockdev_superblock)
+ bdev = inode->i_bdev;
__bd_forget(inode);
+ }
spin_unlock(&bdev_lock);
+
+ if (bdev)
+ iput(bdev->bd_inode);
}
int bd_claim(struct block_device *bdev, void *holder)
diff --git a/fs/dcache.c b/fs/dcache.c
index 940d188e5d1..59dbc92c207 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -359,12 +359,13 @@ restart:
}
/*
- * Throw away a dentry - free the inode, dput the parent.
- * This requires that the LRU list has already been
- * removed.
+ * Throw away a dentry - free the inode, dput the parent. This requires that
+ * the LRU list has already been removed.
+ *
* Called with dcache_lock, drops it and then regains.
+ * Called with dentry->d_lock held, drops it.
*/
-static inline void prune_one_dentry(struct dentry * dentry)
+static void prune_one_dentry(struct dentry * dentry)
{
struct dentry * parent;
@@ -382,6 +383,8 @@ static inline void prune_one_dentry(struct dentry * dentry)
/**
* prune_dcache - shrink the dcache
* @count: number of entries to try and free
+ * @sb: if given, ignore dentries for other superblocks
+ * which are being unmounted.
*
* Shrink the dcache. This is done when we need
* more memory, or simply when we need to unmount
@@ -392,16 +395,29 @@ static inline void prune_one_dentry(struct dentry * dentry)
* all the dentries are in use.
*/
-static void prune_dcache(int count)
+static void prune_dcache(int count, struct super_block *sb)
{
spin_lock(&dcache_lock);
for (; count ; count--) {
struct dentry *dentry;
struct list_head *tmp;
+ struct rw_semaphore *s_umount;
cond_resched_lock(&dcache_lock);
tmp = dentry_unused.prev;
+ if (unlikely(sb)) {
+ /* Try to find a dentry for this sb, but don't try
+ * too hard, if they aren't near the tail they will
+ * be moved down again soon
+ */
+ int skip = count;
+ while (skip && tmp != &dentry_unused &&
+ list_entry(tmp, struct dentry, d_lru)->d_sb != sb) {
+ skip--;
+ tmp = tmp->prev;
+ }
+ }
if (tmp == &dentry_unused)
break;
list_del_init(tmp);
@@ -427,7 +443,45 @@ static void prune_dcache(int count)
spin_unlock(&dentry->d_lock);
continue;
}
- prune_one_dentry(dentry);
+ /*
+ * If the dentry is not DCACHED_REFERENCED, it is time
+ * to remove it from the dcache, provided the super block is
+ * NULL (which means we are trying to reclaim memory)
+ * or this dentry belongs to the same super block that
+ * we want to shrink.
+ */
+ /*
+ * If this dentry is for "my" filesystem, then I can prune it
+ * without taking the s_umount lock (I already hold it).
+ */
+ if (sb && dentry->d_sb == sb) {
+ prune_one_dentry(dentry);
+ continue;
+ }
+ /*
+ * ...otherwise we need to be sure this filesystem isn't being
+ * unmounted, otherwise we could race with
+ * generic_shutdown_super(), and end up holding a reference to
+ * an inode while the filesystem is unmounted.
+ * So we try to get s_umount, and make sure s_root isn't NULL.
+ * (Take a local copy of s_umount to avoid a use-after-free of
+ * `dentry').
+ */
+ s_umount = &dentry->d_sb->s_umount;
+ if (down_read_trylock(s_umount)) {
+ if (dentry->d_sb->s_root != NULL) {
+ prune_one_dentry(dentry);
+ up_read(s_umount);
+ continue;
+ }
+ up_read(s_umount);
+ }
+ spin_unlock(&dentry->d_lock);
+ /* Cannot remove the first dentry, and it isn't appropriate
+ * to move it to the head of the list, so give up, and try
+ * later
+ */
+ break;
}
spin_unlock(&dcache_lock);
}
@@ -630,7 +684,7 @@ void shrink_dcache_parent(struct dentry * parent)
int found;
while ((found = select_parent(parent)) != 0)
- prune_dcache(found);
+ prune_dcache(found, parent->d_sb);
}
/**
@@ -643,9 +697,10 @@ void shrink_dcache_parent(struct dentry * parent)
* done under dcache_lock.
*
*/
-void shrink_dcache_anon(struct hlist_head *head)
+void shrink_dcache_anon(struct super_block *sb)
{
struct hlist_node *lp;
+ struct hlist_head *head = &sb->s_anon;
int found;
do {
found = 0;
@@ -668,7 +723,7 @@ void shrink_dcache_anon(struct hlist_head *head)
}
}
spin_unlock(&dcache_lock);
- prune_dcache(found);
+ prune_dcache(found, sb);
} while(found);
}
@@ -689,7 +744,7 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
if (nr) {
if (!(gfp_mask & __GFP_FS))
return -1;
- prune_dcache(nr);
+ prune_dcache(nr, NULL);
}
return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1b4491cdd11..2695337d4d6 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -337,20 +337,20 @@ static inline int ep_cmp_ffd(struct epoll_filefd *p1,
/* Special initialization for the rb-tree node to detect linkage */
static inline void ep_rb_initnode(struct rb_node *n)
{
- n->rb_parent = n;
+ rb_set_parent(n, n);
}
/* Removes a node from the rb-tree and marks it for a fast is-linked check */
static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r)
{
rb_erase(n, r);
- n->rb_parent = n;
+ rb_set_parent(n, n);
}
/* Fast check to verify that the item is linked to the main rb-tree */
static inline int ep_rb_linked(struct rb_node *n)
{
- return n->rb_parent != n;
+ return rb_parent(n) != n;
}
/*
diff --git a/fs/exec.c b/fs/exec.c
index 3a79d97ac23..0b88bf64614 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -49,6 +49,7 @@
#include <linux/rmap.h>
#include <linux/acct.h>
#include <linux/cn_proc.h>
+#include <linux/audit.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -865,7 +866,6 @@ int flush_old_exec(struct linux_binprm * bprm)
bprm->mm = NULL; /* We're using it now */
/* This is the point of no return */
- steal_locks(files);
put_files_struct(files);
current->sas_ss_sp = current->sas_ss_size = 0;
@@ -1085,6 +1085,11 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
/* kernel module loader fixup */
/* so we don't try to load run modprobe in kernel space. */
set_fs(USER_DS);
+
+ retval = audit_bprm(bprm);
+ if (retval)
+ return retval;
+
retval = -ENOENT;
for (try=0; try<2; try++) {
read_lock(&binfmt_lock);
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index f37528ed222..fbb0d4ed07d 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -284,7 +284,7 @@ static void free_rb_tree_fname(struct rb_root *root)
* beginning of the loop and try to free the parent
* node.
*/
- parent = n->rb_parent;
+ parent = rb_parent(n);
fname = rb_entry(n, struct fname, rb_hash);
while (fname) {
struct fname * old = fname;
diff --git a/fs/inotify.c b/fs/inotify.c
index 732ec4bd577..723836a1f71 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -5,7 +5,10 @@
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
+ * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
+ *
* Copyright (C) 2005 John McCutchan
+ * Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,35 +23,17 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/mount.h>
-#include <linux/namei.h>
-#include <linux/poll.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/writeback.h>
#include <linux/inotify.h>
-#include <linux/syscalls.h>
-
-#include <asm/ioctls.h>
static atomic_t inotify_cookie;
-static kmem_cache_t *watch_cachep __read_mostly;
-static kmem_cache_t *event_cachep __read_mostly;
-
-static struct vfsmount *inotify_mnt __read_mostly;
-
-/* these are configurable via /proc/sys/fs/inotify/ */
-int inotify_max_user_instances __read_mostly;
-int inotify_max_user_watches __read_mostly;
-int inotify_max_queued_events __read_mostly;
-
/*
* Lock ordering:
*
@@ -56,327 +41,108 @@ int inotify_max_queued_events __read_mostly;
* iprune_mutex (synchronize shrink_icache_memory())
* inode_lock (protects the super_block->s_inodes list)
* inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
- * inotify_dev->mutex (protects inotify_device and watches->d_list)
+ * inotify_handle->mutex (protects inotify_handle and watches->h_list)
+ *
+ * The inode->inotify_mutex and inotify_handle->mutex and held during execution
+ * of a caller's event handler. Thus, the caller must not hold any locks
+ * taken in their event handler while calling any of the published inotify
+ * interfaces.
*/
/*
- * Lifetimes of the three main data structures--inotify_device, inode, and
+ * Lifetimes of the three main data structures--inotify_handle, inode, and
* inotify_watch--are managed by reference count.
*
- * inotify_device: Lifetime is from inotify_init() until release. Additional
- * references can bump the count via get_inotify_dev() and drop the count via
- * put_inotify_dev().
+ * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
+ * Additional references can bump the count via get_inotify_handle() and drop
+ * the count via put_inotify_handle().
*
- * inotify_watch: Lifetime is from create_watch() to destory_watch().
- * Additional references can bump the count via get_inotify_watch() and drop
- * the count via put_inotify_watch().
+ * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
+ * to remove_watch_no_event(). Additional references can bump the count via
+ * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
+ * is reponsible for the final put after receiving IN_IGNORED, or when using
+ * IN_ONESHOT after receiving the first event. Inotify does the final put if
+ * inotify_destroy() is called.
*
* inode: Pinned so long as the inode is associated with a watch, from
- * create_watch() to put_inotify_watch().
+ * inotify_add_watch() to the final put_inotify_watch().
*/
/*
- * struct inotify_device - represents an inotify instance
+ * struct inotify_handle - represents an inotify instance
*
* This structure is protected by the mutex 'mutex'.
*/
-struct inotify_device {
- wait_queue_head_t wq; /* wait queue for i/o */
+struct inotify_handle {
struct idr idr; /* idr mapping wd -> watch */
struct mutex mutex; /* protects this bad boy */
- struct list_head events; /* list of queued events */
struct list_head watches; /* list of watches */
atomic_t count; /* reference count */
- struct user_struct *user; /* user who opened this dev */
- unsigned int queue_size; /* size of the queue (bytes) */
- unsigned int event_count; /* number of pending events */
- unsigned int max_events; /* maximum number of events */
u32 last_wd; /* the last wd allocated */
+ const struct inotify_operations *in_ops; /* inotify caller operations */
};
-/*
- * struct inotify_kernel_event - An inotify event, originating from a watch and
- * queued for user-space. A list of these is attached to each instance of the
- * device. In read(), this list is walked and all events that can fit in the
- * buffer are returned.
- *
- * Protected by dev->mutex of the device in which we are queued.
- */
-struct inotify_kernel_event {
- struct inotify_event event; /* the user-space event */
- struct list_head list; /* entry in inotify_device's list */
- char *name; /* filename, if any */
-};
-
-/*
- * struct inotify_watch - represents a watch request on a specific inode
- *
- * d_list is protected by dev->mutex of the associated watch->dev.
- * i_list and mask are protected by inode->inotify_mutex of the associated inode.
- * dev, inode, and wd are never written to once the watch is created.
- */
-struct inotify_watch {
- struct list_head d_list; /* entry in inotify_device's list */
- struct list_head i_list; /* entry in inode's list */
- atomic_t count; /* reference count */
- struct inotify_device *dev; /* associated device */
- struct inode *inode; /* associated inode */
- s32 wd; /* watch descriptor */
- u32 mask; /* event mask for this watch */
-};
-
-#ifdef CONFIG_SYSCTL
-
-#include <linux/sysctl.h>
-
-static int zero;
-
-ctl_table inotify_table[] = {
- {
- .ctl_name = INOTIFY_MAX_USER_INSTANCES,
- .procname = "max_user_instances",
- .data = &inotify_max_user_instances,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &zero,
- },
- {
- .ctl_name = INOTIFY_MAX_USER_WATCHES,
- .procname = "max_user_watches",
- .data = &inotify_max_user_watches,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &zero,
- },
- {
- .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
- .procname = "max_queued_events",
- .data = &inotify_max_queued_events,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &zero
- },
- { .ctl_name = 0 }
-};
-#endif /* CONFIG_SYSCTL */
-
-static inline void get_inotify_dev(struct inotify_device *dev)
+static inline void get_inotify_handle(struct inotify_handle *ih)
{
- atomic_inc(&dev->count);
+ atomic_inc(&ih->count);
}
-static inline void put_inotify_dev(struct inotify_device *dev)
+static inline void put_inotify_handle(struct inotify_handle *ih)
{
- if (atomic_dec_and_test(&dev->count)) {
- atomic_dec(&dev->user->inotify_devs);
- free_uid(dev->user);
- idr_destroy(&dev->idr);
- kfree(dev);
+ if (atomic_dec_and_test(&ih->count)) {
+ idr_destroy(&ih->idr);
+ kfree(ih);
}
}
-static inline void get_inotify_watch(struct inotify_watch *watch)
+/**
+ * get_inotify_watch - grab a reference to an inotify_watch
+ * @watch: watch to grab
+ */
+void get_inotify_watch(struct inotify_watch *watch)
{
atomic_inc(&watch->count);
}
+EXPORT_SYMBOL_GPL(get_inotify_watch);
-/*
+/**
* put_inotify_watch - decrements the ref count on a given watch. cleans up
- * the watch and its references if the count reaches zero.
+ * watch references if the count reaches zero. inotify_watch is freed by
+ * inotify callers via the destroy_watch() op.
+ * @watch: watch to release
*/
-static inline void put_inotify_watch(struct inotify_watch *watch)
+void put_inotify_watch(struct inotify_watch *watch)
{
if (atomic_dec_and_test(&watch->count)) {
- put_inotify_dev(watch->dev);
- iput(watch->inode);
- kmem_cache_free(watch_cachep, watch);
- }
-}
-
-/*
- * kernel_event - create a new kernel event with the given parameters
- *
- * This function can sleep.
- */
-static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
- const char *name)
-{
- struct inotify_kernel_event *kevent;
-
- kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
- if (unlikely(!kevent))
- return NULL;
-
- /* we hand this out to user-space, so zero it just in case */
- memset(&kevent->event, 0, sizeof(struct inotify_event));
-
- kevent->event.wd = wd;
- kevent->event.mask = mask;
- kevent->event.cookie = cookie;
-
- INIT_LIST_HEAD(&kevent->list);
-
- if (name) {
- size_t len, rem, event_size = sizeof(struct inotify_event);
-
- /*
- * We need to pad the filename so as to properly align an
- * array of inotify_event structures. Because the structure is
- * small and the common case is a small filename, we just round
- * up to the next multiple of the structure's sizeof. This is
- * simple and safe for all architectures.
- */
- len = strlen(name) + 1;
- rem = event_size - len;
- if (len > event_size) {
- rem = event_size - (len % event_size);
- if (len % event_size == 0)
- rem = 0;
- }
-
- kevent->name = kmalloc(len + rem, GFP_KERNEL);
- if (unlikely(!kevent->name)) {
- kmem_cache_free(event_cachep, kevent);
- return NULL;
- }
- memcpy(kevent->name, name, len);
- if (rem)
- memset(kevent->name + len, 0, rem);
- kevent->event.len = len + rem;
- } else {
- kevent->event.len = 0;
- kevent->name = NULL;
- }
-
- return kevent;
-}
-
-/*
- * inotify_dev_get_event - return the next event in the given dev's queue
- *
- * Caller must hold dev->mutex.
- */
-static inline struct inotify_kernel_event *
-inotify_dev_get_event(struct inotify_device *dev)
-{
- return list_entry(dev->events.next, struct inotify_kernel_event, list);
-}
-
-/*
- * inotify_dev_queue_event - add a new event to the given device
- *
- * Caller must hold dev->mutex. Can sleep (calls kernel_event()).
- */
-static void inotify_dev_queue_event(struct inotify_device *dev,
- struct inotify_watch *watch, u32 mask,
- u32 cookie, const char *name)
-{
- struct inotify_kernel_event *kevent, *last;
-
- /* coalescing: drop this event if it is a dupe of the previous */
- last = inotify_dev_get_event(dev);
- if (last && last->event.mask == mask && last->event.wd == watch->wd &&
- last->event.cookie == cookie) {
- const char *lastname = last->name;
-
- if (!name && !lastname)
- return;
- if (name && lastname && !strcmp(lastname, name))
- return;
- }
-
- /* the queue overflowed and we already sent the Q_OVERFLOW event */
- if (unlikely(dev->event_count > dev->max_events))
- return;
-
- /* if the queue overflows, we need to notify user space */
- if (unlikely(dev->event_count == dev->max_events))
- kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
- else
- kevent = kernel_event(watch->wd, mask, cookie, name);
-
- if (unlikely(!kevent))
- return;
-
- /* queue the event and wake up anyone waiting */
- dev->event_count++;
- dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
- list_add_tail(&kevent->list, &dev->events);
- wake_up_interruptible(&dev->wq);
-}
-
-/*
- * remove_kevent - cleans up and ultimately frees the given kevent
- *
- * Caller must hold dev->mutex.
- */
-static void remove_kevent(struct inotify_device *dev,
- struct inotify_kernel_event *kevent)
-{
- list_del(&kevent->list);
-
- dev->event_count--;
- dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
-
- kfree(kevent->name);
- kmem_cache_free(event_cachep, kevent);
-}
+ struct inotify_handle *ih = watch->ih;
-/*
- * inotify_dev_event_dequeue - destroy an event on the given device
- *
- * Caller must hold dev->mutex.
- */
-static void inotify_dev_event_dequeue(struct inotify_device *dev)
-{
- if (!list_empty(&dev->events)) {
- struct inotify_kernel_event *kevent;
- kevent = inotify_dev_get_event(dev);
- remove_kevent(dev, kevent);
+ iput(watch->inode);
+ ih->in_ops->destroy_watch(watch);
+ put_inotify_handle(ih);
}
}
+EXPORT_SYMBOL_GPL(put_inotify_watch);
/*
- * inotify_dev_get_wd - returns the next WD for use by the given dev
+ * inotify_handle_get_wd - returns the next WD for use by the given handle
*
- * Callers must hold dev->mutex. This function can sleep.
+ * Callers must hold ih->mutex. This function can sleep.
*/
-static int inotify_dev_get_wd(struct inotify_device *dev,
- struct inotify_watch *watch)
+static int inotify_handle_get_wd(struct inotify_handle *ih,
+ struct inotify_watch *watch)
{
int ret;
do {
- if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
+ if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL)))
return -ENOSPC;
- ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd);
+ ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
} while (ret == -EAGAIN);
- return ret;
-}
+ if (likely(!ret))
+ ih->last_wd = watch->wd;
-/*
- * find_inode - resolve a user-given path to a specific inode and return a nd
- */
-static int find_inode(const char __user *dirname, struct nameidata *nd,
- unsigned flags)
-{
- int error;
-
- error = __user_walk(dirname, flags, nd);
- if (error)
- return error;
- /* you can only watch an inode if you have read permissions on it */
- error = vfs_permission(nd, MAY_READ);
- if (error)
- path_release(nd);
- return error;
+ return ret;
}
/*
@@ -422,67 +188,18 @@ static void set_dentry_child_flags(struct inode *inode, int watched)
}
/*
- * create_watch - creates a watch on the given device.
- *
- * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep.
- * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
- */
-static struct inotify_watch *create_watch(struct inotify_device *dev,
- u32 mask, struct inode *inode)
-{
- struct inotify_watch *watch;
- int ret;
-
- if (atomic_read(&dev->user->inotify_watches) >=
- inotify_max_user_watches)
- return ERR_PTR(-ENOSPC);
-
- watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
- if (unlikely(!watch))
- return ERR_PTR(-ENOMEM);
-
- ret = inotify_dev_get_wd(dev, watch);
- if (unlikely(ret)) {
- kmem_cache_free(watch_cachep, watch);
- return ERR_PTR(ret);
- }
-
- dev->last_wd = watch->wd;
- watch->mask = mask;
- atomic_set(&watch->count, 0);
- INIT_LIST_HEAD(&watch->d_list);
- INIT_LIST_HEAD(&watch->i_list);
-
- /* save a reference to device and bump the count to make it official */
- get_inotify_dev(dev);
- watch->dev = dev;
-
- /*
- * Save a reference to the inode and bump the ref count to make it
- * official. We hold a reference to nameidata, which makes this safe.
- */
- watch->inode = igrab(inode);
-
- /* bump our own count, corresponding to our entry in dev->watches */
- get_inotify_watch(watch);
-
- atomic_inc(&dev->user->inotify_watches);
-
- return watch;
-}
-
-/*
- * inotify_find_dev - find the watch associated with the given inode and dev
+ * inotify_find_handle - find the watch associated with the given inode and
+ * handle
*
* Callers must hold inode->inotify_mutex.
*/
-static struct inotify_watch *inode_find_dev(struct inode *inode,
- struct inotify_device *dev)
+static struct inotify_watch *inode_find_handle(struct inode *inode,
+ struct inotify_handle *ih)
{
struct inotify_watch *watch;
list_for_each_entry(watch, &inode->inotify_watches, i_list) {
- if (watch->dev == dev)
+ if (watch->ih == ih)
return watch;
}
@@ -490,40 +207,40 @@ static struct inotify_watch *inode_find_dev(struct inode *inode,
}
/*
- * remove_watch_no_event - remove_watch() without the IN_IGNORED event.
+ * remove_watch_no_event - remove watch without the IN_IGNORED event.
+ *
+ * Callers must hold both inode->inotify_mutex and ih->mutex.
*/
static void remove_watch_no_event(struct inotify_watch *watch,
- struct inotify_device *dev)
+ struct inotify_handle *ih)
{
list_del(&watch->i_list);
- list_del(&watch->d_list);
+ list_del(&watch->h_list);
if (!inotify_inode_watched(watch->inode))
set_dentry_child_flags(watch->inode, 0);
- atomic_dec(&dev->user->inotify_watches);
- idr_remove(&dev->idr, watch->wd);
- put_inotify_watch(watch);
+ idr_remove(&ih->idr, watch->wd);
}
-/*
- * remove_watch - Remove a watch from both the device and the inode. Sends
- * the IN_IGNORED event to the given device signifying that the inode is no
- * longer watched.
- *
- * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a
- * reference to the inode before returning.
+/**
+ * inotify_remove_watch_locked - Remove a watch from both the handle and the
+ * inode. Sends the IN_IGNORED event signifying that the inode is no longer
+ * watched. May be invoked from a caller's event handler.
+ * @ih: inotify handle associated with watch
+ * @watch: watch to remove
*
- * The inode is not iput() so as to remain atomic. If the inode needs to be
- * iput(), the call returns one. Otherwise, it returns zero.
+ * Callers must hold both inode->inotify_mutex and ih->mutex.
*/
-static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev)
+void inotify_remove_watch_locked(struct inotify_handle *ih,
+ struct inotify_watch *watch)
{
- inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL);
- remove_watch_no_event(watch, dev);
+ remove_watch_no_event(watch, ih);
+ ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
}
+EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
-/* Kernel API */
+/* Kernel API for producing events */
/*
* inotify_d_instantiate - instantiate dcache entry for inode
@@ -563,9 +280,10 @@ void inotify_d_move(struct dentry *entry)
* @mask: event mask describing this event
* @cookie: cookie for synchronization, or zero
* @name: filename, if any
+ * @n_inode: inode associated with name
*/
void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
- const char *name)
+ const char *name, struct inode *n_inode)
{
struct inotify_watch *watch, *next;
@@ -576,14 +294,13 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
u32 watch_mask = watch->mask;
if (watch_mask & mask) {
- struct inotify_device *dev = watch->dev;
- get_inotify_watch(watch);
- mutex_lock(&dev->mutex);
- inotify_dev_queue_event(dev, watch, mask, cookie, name);
+ struct inotify_handle *ih= watch->ih;
+ mutex_lock(&ih->mutex);
if (watch_mask & IN_ONESHOT)
- remove_watch_no_event(watch, dev);
- mutex_unlock(&dev->mutex);
- put_inotify_watch(watch);
+ remove_watch_no_event(watch, ih);
+ ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
+ name, n_inode);
+ mutex_unlock(&ih->mutex);
}
}
mutex_unlock(&inode->inotify_mutex);
@@ -613,7 +330,8 @@ void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
if (inotify_inode_watched(inode)) {
dget(parent);
spin_unlock(&dentry->d_lock);
- inotify_inode_queue_event(inode, mask, cookie, name);
+ inotify_inode_queue_event(inode, mask, cookie, name,
+ dentry->d_inode);
dput(parent);
} else
spin_unlock(&dentry->d_lock);
@@ -665,7 +383,7 @@ void inotify_unmount_inodes(struct list_head *list)
need_iput_tmp = need_iput;
need_iput = NULL;
- /* In case the remove_watch() drops a reference. */
+ /* In case inotify_remove_watch_locked() drops a reference. */
if (inode != need_iput_tmp)
__iget(inode);
else
@@ -694,11 +412,12 @@ void inotify_unmount_inodes(struct list_head *list)
mutex_lock(&inode->inotify_mutex);
watches = &inode->inotify_watches;
list_for_each_entry_safe(watch, next_w, watches, i_list) {
- struct inotify_device *dev = watch->dev;
- mutex_lock(&dev->mutex);
- inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
- remove_watch(watch, dev);
- mutex_unlock(&dev->mutex);
+ struct inotify_handle *ih= watch->ih;
+ mutex_lock(&ih->mutex);
+ ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
+ NULL, NULL);
+ inotify_remove_watch_locked(ih, watch);
+ mutex_unlock(&ih->mutex);
}
mutex_unlock(&inode->inotify_mutex);
iput(inode);
@@ -718,432 +437,292 @@ void inotify_inode_is_dead(struct inode *inode)
mutex_lock(&inode->inotify_mutex);
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
- struct inotify_device *dev = watch->dev;
- mutex_lock(&dev->mutex);
- remove_watch(watch, dev);
- mutex_unlock(&dev->mutex);
+ struct inotify_handle *ih = watch->ih;
+ mutex_lock(&ih->mutex);
+ inotify_remove_watch_locked(ih, watch);
+ mutex_unlock(&ih->mutex);
}
mutex_unlock(&inode->inotify_mutex);
}
EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
-/* Device Interface */
+/* Kernel Consumer API */
-static unsigned int inotify_poll(struct file *file, poll_table *wait)
+/**
+ * inotify_init - allocate and initialize an inotify instance
+ * @ops: caller's inotify operations
+ */
+struct inotify_handle *inotify_init(const struct inotify_operations *ops)
{
- struct inotify_device *dev = file->private_data;
- int ret = 0;
+ struct inotify_handle *ih;
- poll_wait(file, &dev->wq, wait);
- mutex_lock(&dev->mutex);
- if (!list_empty(&dev->events))
- ret = POLLIN | POLLRDNORM;
- mutex_unlock(&dev->mutex);
+ ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
+ if (unlikely(!ih))
+ return ERR_PTR(-ENOMEM);
- return ret;
+ idr_init(&ih->idr);
+ INIT_LIST_HEAD(&ih->watches);
+ mutex_init(&ih->mutex);
+ ih->last_wd = 0;
+ ih->in_ops = ops;
+ atomic_set(&ih->count, 0);
+ get_inotify_handle(ih);
+
+ return ih;
}
+EXPORT_SYMBOL_GPL(inotify_init);
-static ssize_t inotify_read(struct file *file, char __user *buf,
- size_t count, loff_t *pos)
+/**
+ * inotify_init_watch - initialize an inotify watch
+ * @watch: watch to initialize
+ */
+void inotify_init_watch(struct inotify_watch *watch)
{
- size_t event_size = sizeof (struct inotify_event);
- struct inotify_device *dev;
- char __user *start;
- int ret;
- DEFINE_WAIT(wait);
-
- start = buf;
- dev = file->private_data;
-
- while (1) {
- int events;
-
- prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
-
- mutex_lock(&dev->mutex);
- events = !list_empty(&dev->events);
- mutex_unlock(&dev->mutex);
- if (events) {
- ret = 0;
- break;
- }
-
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
-
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
-
- schedule();
- }
-
- finish_wait(&dev->wq, &wait);
- if (ret)
- return ret;
-
- mutex_lock(&dev->mutex);
- while (1) {
- struct inotify_kernel_event *kevent;
-
- ret = buf - start;
- if (list_empty(&dev->events))
- break;
-
- kevent = inotify_dev_get_event(dev);
- if (event_size + kevent->event.len > count)
- break;
-
- if (copy_to_user(buf, &kevent->event, event_size)) {
- ret = -EFAULT;
- break;
- }
- buf += event_size;
- count -= event_size;
-
- if (kevent->name) {
- if (copy_to_user(buf, kevent->name, kevent->event.len)){
- ret = -EFAULT;
- break;
- }
- buf += kevent->event.len;
- count -= kevent->event.len;
- }
-
- remove_kevent(dev, kevent);
- }
- mutex_unlock(&dev->mutex);
-
- return ret;
+ INIT_LIST_HEAD(&watch->h_list);
+ INIT_LIST_HEAD(&watch->i_list);
+ atomic_set(&watch->count, 0);
+ get_inotify_watch(watch); /* initial get */
}
+EXPORT_SYMBOL_GPL(inotify_init_watch);
-static int inotify_release(struct inode *ignored, struct file *file)
+/**
+ * inotify_destroy - clean up and destroy an inotify instance
+ * @ih: inotify handle
+ */
+void inotify_destroy(struct inotify_handle *ih)
{
- struct inotify_device *dev = file->private_data;
-
/*
- * Destroy all of the watches on this device. Unfortunately, not very
+ * Destroy all of the watches for this handle. Unfortunately, not very
* pretty. We cannot do a simple iteration over the list, because we
* do not know the inode until we iterate to the watch. But we need to
- * hold inode->inotify_mutex before dev->mutex. The following works.
+ * hold inode->inotify_mutex before ih->mutex. The following works.
*/
while (1) {
struct inotify_watch *watch;
struct list_head *watches;
struct inode *inode;
- mutex_lock(&dev->mutex);
- watches = &dev->watches;
+ mutex_lock(&ih->mutex);
+ watches = &ih->watches;
if (list_empty(watches)) {
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&ih->mutex);
break;
}
- watch = list_entry(watches->next, struct inotify_watch, d_list);
+ watch = list_entry(watches->next, struct inotify_watch, h_list);
get_inotify_watch(watch);
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&ih->mutex);
inode = watch->inode;
mutex_lock(&inode->inotify_mutex);
- mutex_lock(&dev->mutex);
+ mutex_lock(&ih->mutex);
/* make sure we didn't race with another list removal */
- if (likely(idr_find(&dev->idr, watch->wd)))
- remove_watch_no_event(watch, dev);
+ if (likely(idr_find(&ih->idr, watch->wd))) {
+ remove_watch_no_event(watch, ih);
+ put_inotify_watch(watch);
+ }
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch);
}
- /* destroy all of the events on this device */
- mutex_lock(&dev->mutex);
- while (!list_empty(&dev->events))
- inotify_dev_event_dequeue(dev);
- mutex_unlock(&dev->mutex);
-
- /* free this device: the put matching the get in inotify_init() */
- put_inotify_dev(dev);
-
- return 0;
+ /* free this handle: the put matching the get in inotify_init() */
+ put_inotify_handle(ih);
}
+EXPORT_SYMBOL_GPL(inotify_destroy);
-/*
- * inotify_ignore - remove a given wd from this inotify instance.
+/**
+ * inotify_find_watch - find an existing watch for an (ih,inode) pair
+ * @ih: inotify handle
+ * @inode: inode to watch
+ * @watchp: pointer to existing inotify_watch
*
- * Can sleep.
+ * Caller must pin given inode (via nameidata).
*/
-static int inotify_ignore(struct inotify_device *dev, s32 wd)
+s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
+ struct inotify_watch **watchp)
{
- struct inotify_watch *watch;
- struct inode *inode;
-
- mutex_lock(&dev->mutex);
- watch = idr_find(&dev->idr, wd);
- if (unlikely(!watch)) {
- mutex_unlock(&dev->mutex);
- return -EINVAL;
- }
- get_inotify_watch(watch);
- inode = watch->inode;
- mutex_unlock(&dev->mutex);
+ struct inotify_watch *old;
+ int ret = -ENOENT;
mutex_lock(&inode->inotify_mutex);
- mutex_lock(&dev->mutex);
+ mutex_lock(&ih->mutex);
- /* make sure that we did not race */
- if (likely(idr_find(&dev->idr, wd) == watch))
- remove_watch(watch, dev);
+ old = inode_find_handle(inode, ih);
+ if (unlikely(old)) {
+ get_inotify_watch(old); /* caller must put watch */
+ *watchp = old;
+ ret = old->wd;
+ }
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(watch);
- return 0;
+ return ret;
}
+EXPORT_SYMBOL_GPL(inotify_find_watch);
-static long inotify_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+/**
+ * inotify_find_update_watch - find and update the mask of an existing watch
+ * @ih: inotify handle
+ * @inode: inode's watch to update
+ * @mask: mask of events to watch
+ *
+ * Caller must pin given inode (via nameidata).
+ */
+s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
+ u32 mask)
{
- struct inotify_device *dev;
- void __user *p;
- int ret = -ENOTTY;
-
- dev = file->private_data;
- p = (void __user *) arg;
-
- switch (cmd) {
- case FIONREAD:
- ret = put_user(dev->queue_size, (int __user *) p);
- break;
- }
-
- return ret;
-}
+ struct inotify_watch *old;
+ int mask_add = 0;
+ int ret;
-static const struct file_operations inotify_fops = {
- .poll = inotify_poll,
- .read = inotify_read,
- .release = inotify_release,
- .unlocked_ioctl = inotify_ioctl,
- .compat_ioctl = inotify_ioctl,
-};
+ if (mask & IN_MASK_ADD)
+ mask_add = 1;
-asmlinkage long sys_inotify_init(void)
-{
- struct inotify_device *dev;
- struct user_struct *user;
- struct file *filp;
- int fd, ret;
-
- fd = get_unused_fd();
- if (fd < 0)
- return fd;
-
- filp = get_empty_filp();
- if (!filp) {
- ret = -ENFILE;
- goto out_put_fd;
- }
+ /* don't allow invalid bits: we don't want flags set */
+ mask &= IN_ALL_EVENTS | IN_ONESHOT;
+ if (unlikely(!mask))
+ return -EINVAL;
- user = get_uid(current->user);
- if (unlikely(atomic_read(&user->inotify_devs) >=
- inotify_max_user_instances)) {
- ret = -EMFILE;
- goto out_free_uid;
- }
+ mutex_lock(&inode->inotify_mutex);
+ mutex_lock(&ih->mutex);
- dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
- if (unlikely(!dev)) {
- ret = -ENOMEM;
- goto out_free_uid;
+ /*
+ * Handle the case of re-adding a watch on an (inode,ih) pair that we
+ * are already watching. We just update the mask and return its wd.
+ */
+ old = inode_find_handle(inode, ih);
+ if (unlikely(!old)) {
+ ret = -ENOENT;
+ goto out;
}
- filp->f_op = &inotify_fops;
- filp->f_vfsmnt = mntget(inotify_mnt);
- filp->f_dentry = dget(inotify_mnt->mnt_root);
- filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
- filp->f_mode = FMODE_READ;
- filp->f_flags = O_RDONLY;
- filp->private_data = dev;
-
- idr_init(&dev->idr);
- INIT_LIST_HEAD(&dev->events);
- INIT_LIST_HEAD(&dev->watches);
- init_waitqueue_head(&dev->wq);
- mutex_init(&dev->mutex);
- dev->event_count = 0;
- dev->queue_size = 0;
- dev->max_events = inotify_max_queued_events;
- dev->user = user;
- dev->last_wd = 0;
- atomic_set(&dev->count, 0);
-
- get_inotify_dev(dev);
- atomic_inc(&user->inotify_devs);
- fd_install(fd, filp);
-
- return fd;
-out_free_uid:
- free_uid(user);
- put_filp(filp);
-out_put_fd:
- put_unused_fd(fd);
+ if (mask_add)
+ old->mask |= mask;
+ else
+ old->mask = mask;
+ ret = old->wd;
+out:
+ mutex_unlock(&ih->mutex);
+ mutex_unlock(&inode->inotify_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(inotify_find_update_watch);
-asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
+/**
+ * inotify_add_watch - add a watch to an inotify instance
+ * @ih: inotify handle
+ * @watch: caller allocated watch structure
+ * @inode: inode to watch
+ * @mask: mask of events to watch
+ *
+ * Caller must pin given inode (via nameidata).
+ * Caller must ensure it only calls inotify_add_watch() once per watch.
+ * Calls inotify_handle_get_wd() so may sleep.
+ */
+s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
+ struct inode *inode, u32 mask)
{
- struct inotify_watch *watch, *old;
- struct inode *inode;
- struct inotify_device *dev;
- struct nameidata nd;
- struct file *filp;
- int ret, fput_needed;
- int mask_add = 0;
- unsigned flags = 0;
-
- filp = fget_light(fd, &fput_needed);
- if (unlikely(!filp))
- return -EBADF;
-
- /* verify that this is indeed an inotify instance */
- if (unlikely(filp->f_op != &inotify_fops)) {
- ret = -EINVAL;
- goto fput_and_out;
- }
-
- if (!(mask & IN_DONT_FOLLOW))
- flags |= LOOKUP_FOLLOW;
- if (mask & IN_ONLYDIR)
- flags |= LOOKUP_DIRECTORY;
-
- ret = find_inode(path, &nd, flags);
- if (unlikely(ret))
- goto fput_and_out;
+ int ret = 0;
- /* inode held in place by reference to nd; dev by fget on fd */
- inode = nd.dentry->d_inode;
- dev = filp->private_data;
+ /* don't allow invalid bits: we don't want flags set */
+ mask &= IN_ALL_EVENTS | IN_ONESHOT;
+ if (unlikely(!mask))
+ return -EINVAL;
+ watch->mask = mask;
mutex_lock(&inode->inotify_mutex);
- mutex_lock(&dev->mutex);
-
- if (mask & IN_MASK_ADD)
- mask_add = 1;
+ mutex_lock(&ih->mutex);
- /* don't let user-space set invalid bits: we don't want flags set */
- mask &= IN_ALL_EVENTS | IN_ONESHOT;
- if (unlikely(!mask)) {
- ret = -EINVAL;
+ /* Initialize a new watch */
+ ret = inotify_handle_get_wd(ih, watch);
+ if (unlikely(ret))
goto out;
- }
+ ret = watch->wd;
+
+ /* save a reference to handle and bump the count to make it official */
+ get_inotify_handle(ih);
+ watch->ih = ih;
/*
- * Handle the case of re-adding a watch on an (inode,dev) pair that we
- * are already watching. We just update the mask and return its wd.
+ * Save a reference to the inode and bump the ref count to make it
+ * official. We hold a reference to nameidata, which makes this safe.
*/
- old = inode_find_dev(inode, dev);
- if (unlikely(old)) {
- if (mask_add)
- old->mask |= mask;
- else
- old->mask = mask;
- ret = old->wd;
- goto out;
- }
-
- watch = create_watch(dev, mask, inode);
- if (unlikely(IS_ERR(watch))) {
- ret = PTR_ERR(watch);
- goto out;
- }
+ watch->inode = igrab(inode);
if (!inotify_inode_watched(inode))
set_dentry_child_flags(inode, 1);
- /* Add the watch to the device's and the inode's list */
- list_add(&watch->d_list, &dev->watches);
+ /* Add the watch to the handle's and the inode's list */
+ list_add(&watch->h_list, &ih->watches);
list_add(&watch->i_list, &inode->inotify_watches);
- ret = watch->wd;
out:
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
- path_release(&nd);
-fput_and_out:
- fput_light(filp, fput_needed);
return ret;
}
+EXPORT_SYMBOL_GPL(inotify_add_watch);
-asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
+/**
+ * inotify_rm_wd - remove a watch from an inotify instance
+ * @ih: inotify handle
+ * @wd: watch descriptor to remove
+ *
+ * Can sleep.
+ */
+int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
{
- struct file *filp;
- struct inotify_device *dev;
- int ret, fput_needed;
-
- filp = fget_light(fd, &fput_needed);
- if (unlikely(!filp))
- return -EBADF;
+ struct inotify_watch *watch;
+ struct inode *inode;
- /* verify that this is indeed an inotify instance */
- if (unlikely(filp->f_op != &inotify_fops)) {
- ret = -EINVAL;
- goto out;
+ mutex_lock(&ih->mutex);
+ watch = idr_find(&ih->idr, wd);
+ if (unlikely(!watch)) {
+ mutex_unlock(&ih->mutex);
+ return -EINVAL;
}
+ get_inotify_watch(watch);
+ inode = watch->inode;
+ mutex_unlock(&ih->mutex);
- dev = filp->private_data;
- ret = inotify_ignore(dev, wd);
+ mutex_lock(&inode->inotify_mutex);
+ mutex_lock(&ih->mutex);
-out:
- fput_light(filp, fput_needed);
- return ret;
+ /* make sure that we did not race */
+ if (likely(idr_find(&ih->idr, wd) == watch))
+ inotify_remove_watch_locked(ih, watch);
+
+ mutex_unlock(&ih->mutex);
+ mutex_unlock(&inode->inotify_mutex);
+ put_inotify_watch(watch);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(inotify_rm_wd);
-static struct super_block *
-inotify_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+/**
+ * inotify_rm_watch - remove a watch from an inotify instance
+ * @ih: inotify handle
+ * @watch: watch to remove
+ *
+ * Can sleep.
+ */
+int inotify_rm_watch(struct inotify_handle *ih,
+ struct inotify_watch *watch)
{
- return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
+ return inotify_rm_wd(ih, watch->wd);
}
-
-static struct file_system_type inotify_fs_type = {
- .name = "inotifyfs",
- .get_sb = inotify_get_sb,
- .kill_sb = kill_anon_super,
-};
+EXPORT_SYMBOL_GPL(inotify_rm_watch);
/*
- * inotify_setup - Our initialization function. Note that we cannnot return
- * error because we have compiled-in VFS hooks. So an (unlikely) failure here
- * must result in panic().
+ * inotify_setup - core initialization function
*/
static int __init inotify_setup(void)
{
- int ret;
-
- ret = register_filesystem(&inotify_fs_type);
- if (unlikely(ret))
- panic("inotify: register_filesystem returned %d!\n", ret);
-
- inotify_mnt = kern_mount(&inotify_fs_type);
- if (IS_ERR(inotify_mnt))
- panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
-
- inotify_max_queued_events = 16384;
- inotify_max_user_instances = 128;
- inotify_max_user_watches = 8192;
-
atomic_set(&inotify_cookie, 0);
- watch_cachep = kmem_cache_create("inotify_watch_cache",
- sizeof(struct inotify_watch),
- 0, SLAB_PANIC, NULL, NULL);
- event_cachep = kmem_cache_create("inotify_event_cache",
- sizeof(struct inotify_kernel_event),
- 0, SLAB_PANIC, NULL, NULL);
-
return 0;
}
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
new file mode 100644
index 00000000000..9e9931e2bad
--- /dev/null
+++ b/fs/inotify_user.c
@@ -0,0 +1,719 @@
+/*
+ * fs/inotify_user.c - inotify support for userspace
+ *
+ * Authors:
+ * John McCutchan <ttb@tentacle.dhs.org>
+ * Robert Love <rml@novell.com>
+ *
+ * Copyright (C) 2005 John McCutchan
+ * Copyright 2006 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/inotify.h>
+#include <linux/syscalls.h>
+
+#include <asm/ioctls.h>
+
+static kmem_cache_t *watch_cachep __read_mostly;
+static kmem_cache_t *event_cachep __read_mostly;
+
+static struct vfsmount *inotify_mnt __read_mostly;
+
+/* these are configurable via /proc/sys/fs/inotify/ */
+int inotify_max_user_instances __read_mostly;
+int inotify_max_user_watches __read_mostly;
+int inotify_max_queued_events __read_mostly;
+
+/*
+ * Lock ordering:
+ *
+ * inotify_dev->up_mutex (ensures we don't re-add the same watch)
+ * inode->inotify_mutex (protects inode's watch list)
+ * inotify_handle->mutex (protects inotify_handle's watch list)
+ * inotify_dev->ev_mutex (protects device's event queue)
+ */
+
+/*
+ * Lifetimes of the main data structures:
+ *
+ * inotify_device: Lifetime is managed by reference count, from
+ * sys_inotify_init() until release. Additional references can bump the count
+ * via get_inotify_dev() and drop the count via put_inotify_dev().
+ *
+ * inotify_user_watch: Lifetime is from create_watch() to the receipt of an
+ * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the
+ * first event, or to inotify_destroy().
+ */
+
+/*
+ * struct inotify_device - represents an inotify instance
+ *
+ * This structure is protected by the mutex 'mutex'.
+ */
+struct inotify_device {
+ wait_queue_head_t wq; /* wait queue for i/o */
+ struct mutex ev_mutex; /* protects event queue */
+ struct mutex up_mutex; /* synchronizes watch updates */
+ struct list_head events; /* list of queued events */
+ atomic_t count; /* reference count */
+ struct user_struct *user; /* user who opened this dev */
+ struct inotify_handle *ih; /* inotify handle */
+ unsigned int queue_size; /* size of the queue (bytes) */
+ unsigned int event_count; /* number of pending events */
+ unsigned int max_events; /* maximum number of events */
+};
+
+/*
+ * struct inotify_kernel_event - An inotify event, originating from a watch and
+ * queued for user-space. A list of these is attached to each instance of the
+ * device. In read(), this list is walked and all events that can fit in the
+ * buffer are returned.
+ *
+ * Protected by dev->ev_mutex of the device in which we are queued.
+ */
+struct inotify_kernel_event {
+ struct inotify_event event; /* the user-space event */
+ struct list_head list; /* entry in inotify_device's list */
+ char *name; /* filename, if any */
+};
+
+/*
+ * struct inotify_user_watch - our version of an inotify_watch, we add
+ * a reference to the associated inotify_device.
+ */
+struct inotify_user_watch {
+ struct inotify_device *dev; /* associated device */
+ struct inotify_watch wdata; /* inotify watch data */
+};
+
+#ifdef CONFIG_SYSCTL
+
+#include <linux/sysctl.h>
+
+static int zero;
+
+ctl_table inotify_table[] = {
+ {
+ .ctl_name = INOTIFY_MAX_USER_INSTANCES,
+ .procname = "max_user_instances",
+ .data = &inotify_max_user_instances,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ },
+ {
+ .ctl_name = INOTIFY_MAX_USER_WATCHES,
+ .procname = "max_user_watches",
+ .data = &inotify_max_user_watches,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ },
+ {
+ .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
+ .procname = "max_queued_events",
+ .data = &inotify_max_queued_events,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero
+ },
+ { .ctl_name = 0 }
+};
+#endif /* CONFIG_SYSCTL */
+
+static inline void get_inotify_dev(struct inotify_device *dev)
+{
+ atomic_inc(&dev->count);
+}
+
+static inline void put_inotify_dev(struct inotify_device *dev)
+{
+ if (atomic_dec_and_test(&dev->count)) {
+ atomic_dec(&dev->user->inotify_devs);
+ free_uid(dev->user);
+ kfree(dev);
+ }
+}
+
+/*
+ * free_inotify_user_watch - cleans up the watch and its references
+ */
+static void free_inotify_user_watch(struct inotify_watch *w)
+{
+ struct inotify_user_watch *watch;
+ struct inotify_device *dev;
+
+ watch = container_of(w, struct inotify_user_watch, wdata);
+ dev = watch->dev;
+
+ atomic_dec(&dev->user->inotify_watches);
+ put_inotify_dev(dev);
+ kmem_cache_free(watch_cachep, watch);
+}
+
+/*
+ * kernel_event - create a new kernel event with the given parameters
+ *
+ * This function can sleep.
+ */
+static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
+ const char *name)
+{
+ struct inotify_kernel_event *kevent;
+
+ kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
+ if (unlikely(!kevent))
+ return NULL;
+
+ /* we hand this out to user-space, so zero it just in case */
+ memset(&kevent->event, 0, sizeof(struct inotify_event));
+
+ kevent->event.wd = wd;
+ kevent->event.mask = mask;
+ kevent->event.cookie = cookie;
+
+ INIT_LIST_HEAD(&kevent->list);
+
+ if (name) {
+ size_t len, rem, event_size = sizeof(struct inotify_event);
+
+ /*
+ * We need to pad the filename so as to properly align an
+ * array of inotify_event structures. Because the structure is
+ * small and the common case is a small filename, we just round
+ * up to the next multiple of the structure's sizeof. This is
+ * simple and safe for all architectures.
+ */
+ len = strlen(name) + 1;
+ rem = event_size - len;
+ if (len > event_size) {
+ rem = event_size - (len % event_size);
+ if (len % event_size == 0)
+ rem = 0;
+ }
+
+ kevent->name = kmalloc(len + rem, GFP_KERNEL);
+ if (unlikely(!kevent->name)) {
+ kmem_cache_free(event_cachep, kevent);
+ return NULL;
+ }
+ memcpy(kevent->name, name, len);
+ if (rem)
+ memset(kevent->name + len, 0, rem);
+ kevent->event.len = len + rem;
+ } else {
+ kevent->event.len = 0;
+ kevent->name = NULL;
+ }
+
+ return kevent;
+}
+
+/*
+ * inotify_dev_get_event - return the next event in the given dev's queue
+ *
+ * Caller must hold dev->ev_mutex.
+ */
+static inline struct inotify_kernel_event *
+inotify_dev_get_event(struct inotify_device *dev)
+{
+ return list_entry(dev->events.next, struct inotify_kernel_event, list);
+}
+
+/*
+ * inotify_dev_queue_event - event handler registered with core inotify, adds
+ * a new event to the given device
+ *
+ * Can sleep (calls kernel_event()).
+ */
+static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
+ u32 cookie, const char *name,
+ struct inode *ignored)
+{
+ struct inotify_user_watch *watch;
+ struct inotify_device *dev;
+ struct inotify_kernel_event *kevent, *last;
+
+ watch = container_of(w, struct inotify_user_watch, wdata);
+ dev = watch->dev;
+
+ mutex_lock(&dev->ev_mutex);
+
+ /* we can safely put the watch as we don't reference it while
+ * generating the event
+ */
+ if (mask & IN_IGNORED || mask & IN_ONESHOT)
+ put_inotify_watch(w); /* final put */
+
+ /* coalescing: drop this event if it is a dupe of the previous */
+ last = inotify_dev_get_event(dev);
+ if (last && last->event.mask == mask && last->event.wd == wd &&
+ last->event.cookie == cookie) {
+ const char *lastname = last->name;
+
+ if (!name && !lastname)
+ goto out;
+ if (name && lastname && !strcmp(lastname, name))
+ goto out;
+ }
+
+ /* the queue overflowed and we already sent the Q_OVERFLOW event */
+ if (unlikely(dev->event_count > dev->max_events))
+ goto out;
+
+ /* if the queue overflows, we need to notify user space */
+ if (unlikely(dev->event_count == dev->max_events))
+ kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
+ else
+ kevent = kernel_event(wd, mask, cookie, name);
+
+ if (unlikely(!kevent))
+ goto out;
+
+ /* queue the event and wake up anyone waiting */
+ dev->event_count++;
+ dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
+ list_add_tail(&kevent->list, &dev->events);
+ wake_up_interruptible(&dev->wq);
+
+out:
+ mutex_unlock(&dev->ev_mutex);
+}
+
+/*
+ * remove_kevent - cleans up and ultimately frees the given kevent
+ *
+ * Caller must hold dev->ev_mutex.
+ */
+static void remove_kevent(struct inotify_device *dev,
+ struct inotify_kernel_event *kevent)
+{
+ list_del(&kevent->list);
+
+ dev->event_count--;
+ dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
+
+ kfree(kevent->name);
+ kmem_cache_free(event_cachep, kevent);
+}
+
+/*
+ * inotify_dev_event_dequeue - destroy an event on the given device
+ *
+ * Caller must hold dev->ev_mutex.
+ */
+static void inotify_dev_event_dequeue(struct inotify_device *dev)
+{
+ if (!list_empty(&dev->events)) {
+ struct inotify_kernel_event *kevent;
+ kevent = inotify_dev_get_event(dev);
+ remove_kevent(dev, kevent);
+ }
+}
+
+/*
+ * find_inode - resolve a user-given path to a specific inode and return a nd
+ */
+static int find_inode(const char __user *dirname, struct nameidata *nd,
+ unsigned flags)
+{
+ int error;
+
+ error = __user_walk(dirname, flags, nd);
+ if (error)
+ return error;
+ /* you can only watch an inode if you have read permissions on it */
+ error = vfs_permission(nd, MAY_READ);
+ if (error)
+ path_release(nd);
+ return error;
+}
+
+/*
+ * create_watch - creates a watch on the given device.
+ *
+ * Callers must hold dev->up_mutex.
+ */
+static int create_watch(struct inotify_device *dev, struct inode *inode,
+ u32 mask)
+{
+ struct inotify_user_watch *watch;
+ int ret;
+
+ if (atomic_read(&dev->user->inotify_watches) >=
+ inotify_max_user_watches)
+ return -ENOSPC;
+
+ watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
+ if (unlikely(!watch))
+ return -ENOMEM;
+
+ /* save a reference to device and bump the count to make it official */
+ get_inotify_dev(dev);
+ watch->dev = dev;
+
+ atomic_inc(&dev->user->inotify_watches);
+
+ inotify_init_watch(&watch->wdata);
+ ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask);
+ if (ret < 0)
+ free_inotify_user_watch(&watch->wdata);
+
+ return ret;
+}
+
+/* Device Interface */
+
+static unsigned int inotify_poll(struct file *file, poll_table *wait)
+{
+ struct inotify_device *dev = file->private_data;
+ int ret = 0;
+
+ poll_wait(file, &dev->wq, wait);
+ mutex_lock(&dev->ev_mutex);
+ if (!list_empty(&dev->events))
+ ret = POLLIN | POLLRDNORM;
+ mutex_unlock(&dev->ev_mutex);
+
+ return ret;
+}
+
+static ssize_t inotify_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ size_t event_size = sizeof (struct inotify_event);
+ struct inotify_device *dev;
+ char __user *start;
+ int ret;
+ DEFINE_WAIT(wait);
+
+ start = buf;
+ dev = file->private_data;
+
+ while (1) {
+ int events;
+
+ prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&dev->ev_mutex);
+ events = !list_empty(&dev->events);
+ mutex_unlock(&dev->ev_mutex);
+ if (events) {
+ ret = 0;
+ break;
+ }
+
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ schedule();
+ }
+
+ finish_wait(&dev->wq, &wait);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->ev_mutex);
+ while (1) {
+ struct inotify_kernel_event *kevent;
+
+ ret = buf - start;
+ if (list_empty(&dev->events))
+ break;
+
+ kevent = inotify_dev_get_event(dev);
+ if (event_size + kevent->event.len > count)
+ break;
+
+ if (copy_to_user(buf, &kevent->event, event_size)) {
+ ret = -EFAULT;
+ break;
+ }
+ buf += event_size;
+ count -= event_size;
+
+ if (kevent->name) {
+ if (copy_to_user(buf, kevent->name, kevent->event.len)){
+ ret = -EFAULT;
+ break;
+ }
+ buf += kevent->event.len;
+ count -= kevent->event.len;
+ }
+
+ remove_kevent(dev, kevent);
+ }
+ mutex_unlock(&dev->ev_mutex);
+
+ return ret;
+}
+
+static int inotify_release(struct inode *ignored, struct file *file)
+{
+ struct inotify_device *dev = file->private_data;
+
+ inotify_destroy(dev->ih);
+
+ /* destroy all of the events on this device */
+ mutex_lock(&dev->ev_mutex);
+ while (!list_empty(&dev->events))
+ inotify_dev_event_dequeue(dev);
+ mutex_unlock(&dev->ev_mutex);
+
+ /* free this device: the put matching the get in inotify_init() */
+ put_inotify_dev(dev);
+
+ return 0;
+}
+
+static long inotify_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct inotify_device *dev;
+ void __user *p;
+ int ret = -ENOTTY;
+
+ dev = file->private_data;
+ p = (void __user *) arg;
+
+ switch (cmd) {
+ case FIONREAD:
+ ret = put_user(dev->queue_size, (int __user *) p);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations inotify_fops = {
+ .poll = inotify_poll,
+ .read = inotify_read,
+ .release = inotify_release,
+ .unlocked_ioctl = inotify_ioctl,
+ .compat_ioctl = inotify_ioctl,
+};
+
+static const struct inotify_operations inotify_user_ops = {
+ .handle_event = inotify_dev_queue_event,
+ .destroy_watch = free_inotify_user_watch,
+};
+
+asmlinkage long sys_inotify_init(void)
+{
+ struct inotify_device *dev;
+ struct inotify_handle *ih;
+ struct user_struct *user;
+ struct file *filp;
+ int fd, ret;
+
+ fd = get_unused_fd();
+ if (fd < 0)
+ return fd;
+
+ filp = get_empty_filp();
+ if (!filp) {
+ ret = -ENFILE;
+ goto out_put_fd;
+ }
+
+ user = get_uid(current->user);
+ if (unlikely(atomic_read(&user->inotify_devs) >=
+ inotify_max_user_instances)) {
+ ret = -EMFILE;
+ goto out_free_uid;
+ }
+
+ dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
+ if (unlikely(!dev)) {
+ ret = -ENOMEM;
+ goto out_free_uid;
+ }
+
+ ih = inotify_init(&inotify_user_ops);
+ if (unlikely(IS_ERR(ih))) {
+ ret = PTR_ERR(ih);
+ goto out_free_dev;
+ }
+ dev->ih = ih;
+
+ filp->f_op = &inotify_fops;
+ filp->f_vfsmnt = mntget(inotify_mnt);
+ filp->f_dentry = dget(inotify_mnt->mnt_root);
+ filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
+ filp->f_mode = FMODE_READ;
+ filp->f_flags = O_RDONLY;
+ filp->private_data = dev;
+
+ INIT_LIST_HEAD(&dev->events);
+ init_waitqueue_head(&dev->wq);
+ mutex_init(&dev->ev_mutex);
+ mutex_init(&dev->up_mutex);
+ dev->event_count = 0;
+ dev->queue_size = 0;
+ dev->max_events = inotify_max_queued_events;
+ dev->user = user;
+ atomic_set(&dev->count, 0);
+
+ get_inotify_dev(dev);
+ atomic_inc(&user->inotify_devs);
+ fd_install(fd, filp);
+
+ return fd;
+out_free_dev:
+ kfree(dev);
+out_free_uid:
+ free_uid(user);
+ put_filp(filp);
+out_put_fd:
+ put_unused_fd(fd);
+ return ret;
+}
+
+asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
+{
+ struct inode *inode;
+ struct inotify_device *dev;
+ struct nameidata nd;
+ struct file *filp;
+ int ret, fput_needed;
+ unsigned flags = 0;
+
+ filp = fget_light(fd, &fput_needed);
+ if (unlikely(!filp))
+ return -EBADF;
+
+ /* verify that this is indeed an inotify instance */
+ if (unlikely(filp->f_op != &inotify_fops)) {
+ ret = -EINVAL;
+ goto fput_and_out;
+ }
+
+ if (!(mask & IN_DONT_FOLLOW))
+ flags |= LOOKUP_FOLLOW;
+ if (mask & IN_ONLYDIR)
+ flags |= LOOKUP_DIRECTORY;
+
+ ret = find_inode(path, &nd, flags);
+ if (unlikely(ret))
+ goto fput_and_out;
+
+ /* inode held in place by reference to nd; dev by fget on fd */
+ inode = nd.dentry->d_inode;
+ dev = filp->private_data;
+
+ mutex_lock(&dev->up_mutex);
+ ret = inotify_find_update_watch(dev->ih, inode, mask);
+ if (ret == -ENOENT)
+ ret = create_watch(dev, inode, mask);
+ mutex_unlock(&dev->up_mutex);
+
+ path_release(&nd);
+fput_and_out:
+ fput_light(filp, fput_needed);
+ return ret;
+}
+
+asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
+{
+ struct file *filp;
+ struct inotify_device *dev;
+ int ret, fput_needed;
+
+ filp = fget_light(fd, &fput_needed);
+ if (unlikely(!filp))
+ return -EBADF;
+
+ /* verify that this is indeed an inotify instance */
+ if (unlikely(filp->f_op != &inotify_fops)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev = filp->private_data;
+
+ /* we free our watch data when we get IN_IGNORED */
+ ret = inotify_rm_wd(dev->ih, wd);
+
+out:
+ fput_light(filp, fput_needed);
+ return ret;
+}
+
+static struct super_block *
+inotify_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
+}
+
+static struct file_system_type inotify_fs_type = {
+ .name = "inotifyfs",
+ .get_sb = inotify_get_sb,
+ .kill_sb = kill_anon_super,
+};
+
+/*
+ * inotify_user_setup - Our initialization function. Note that we cannnot return
+ * error because we have compiled-in VFS hooks. So an (unlikely) failure here
+ * must result in panic().
+ */
+static int __init inotify_user_setup(void)
+{
+ int ret;
+
+ ret = register_filesystem(&inotify_fs_type);
+ if (unlikely(ret))
+ panic("inotify: register_filesystem returned %d!\n", ret);
+
+ inotify_mnt = kern_mount(&inotify_fs_type);
+ if (IS_ERR(inotify_mnt))
+ panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
+
+ inotify_max_queued_events = 16384;
+ inotify_max_user_instances = 128;
+ inotify_max_user_watches = 8192;
+
+ watch_cachep = kmem_cache_create("inotify_watch_cache",
+ sizeof(struct inotify_user_watch),
+ 0, SLAB_PANIC, NULL, NULL);
+ event_cachep = kmem_cache_create("inotify_event_cache",
+ sizeof(struct inotify_kernel_event),
+ 0, SLAB_PANIC, NULL, NULL);
+
+ return 0;
+}
+
+module_init(inotify_user_setup);
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index 0ef207dfaf6..5371a403130 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -247,7 +247,7 @@ flash_safe_read(struct mtd_info *mtd, loff_t from,
D3(printk(KERN_NOTICE "flash_safe_read(%p, %08x, %p, %08x)\n",
mtd, (unsigned int) from, buf, count));
- res = MTD_READ(mtd, from, count, &retlen, buf);
+ res = mtd->read(mtd, from, count, &retlen, buf);
if (retlen != count) {
panic("Didn't read all bytes in flash_safe_read(). Returned %d\n", res);
}
@@ -262,7 +262,7 @@ flash_read_u32(struct mtd_info *mtd, loff_t from)
__u32 ret;
int res;
- res = MTD_READ(mtd, from, 4, &retlen, (unsigned char *)&ret);
+ res = mtd->read(mtd, from, 4, &retlen, (unsigned char *)&ret);
if (retlen != 4) {
printk("Didn't read all bytes in flash_read_u32(). Returned %d\n", res);
return 0;
@@ -282,7 +282,7 @@ flash_safe_write(struct mtd_info *mtd, loff_t to,
D3(printk(KERN_NOTICE "flash_safe_write(%p, %08x, %p, %08x)\n",
mtd, (unsigned int) to, buf, count));
- res = MTD_WRITE(mtd, to, count, &retlen, buf);
+ res = mtd->write(mtd, to, count, &retlen, buf);
if (retlen != count) {
printk("Didn't write all bytes in flash_safe_write(). Returned %d\n", res);
}
@@ -300,9 +300,9 @@ flash_safe_writev(struct mtd_info *mtd, const struct kvec *vecs,
D3(printk(KERN_NOTICE "flash_safe_writev(%p, %08x, %p)\n",
mtd, (unsigned int) to, vecs));
-
+
if (mtd->writev) {
- res = MTD_WRITEV(mtd, vecs, iovec_cnt, to, &retlen);
+ res = mtd->writev(mtd, vecs, iovec_cnt, to, &retlen);
return res ? res : retlen;
}
/* Not implemented writev. Repeatedly use write - on the not so
@@ -312,7 +312,8 @@ flash_safe_writev(struct mtd_info *mtd, const struct kvec *vecs,
retlen=0;
for (i=0; !res && i<iovec_cnt; i++) {
- res = MTD_WRITE(mtd, to, vecs[i].iov_len, &retlen_a, vecs[i].iov_base);
+ res = mtd->write(mtd, to, vecs[i].iov_len, &retlen_a,
+ vecs[i].iov_base);
if (retlen_a != vecs[i].iov_len) {
printk("Didn't write all bytes in flash_safe_writev(). Returned %d\n", res);
if (i != iovec_cnt-1)
@@ -393,7 +394,7 @@ flash_erase_region(struct mtd_info *mtd, loff_t start,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- if (MTD_ERASE(mtd, erase) < 0) {
+ if (mtd->erase(mtd, erase) < 0) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
kfree(erase);
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile
index 77dc5561a04..7f28ee0bd13 100644
--- a/fs/jffs2/Makefile
+++ b/fs/jffs2/Makefile
@@ -12,6 +12,9 @@ jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o
jffs2-y += super.o debug.o
jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o
+jffs2-$(CONFIG_JFFS2_FS_XATTR) += xattr.o xattr_trusted.o xattr_user.o
+jffs2-$(CONFIG_JFFS2_FS_SECURITY) += security.o
+jffs2-$(CONFIG_JFFS2_FS_POSIX_ACL) += acl.o
jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o
jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
index b7943439b6e..c8f0bd64e53 100644
--- a/fs/jffs2/README.Locking
+++ b/fs/jffs2/README.Locking
@@ -150,3 +150,24 @@ the buffer.
Ordering constraints:
Lock wbuf_sem last, after the alloc_sem or and f->sem.
+
+
+ c->xattr_sem
+ ------------
+
+This read/write semaphore protects against concurrent access to the
+xattr related objects which include stuff in superblock and ic->xref.
+In read-only path, write-semaphore is too much exclusion. It's enough
+by read-semaphore. But you must hold write-semaphore when updating,
+creating or deleting any xattr related object.
+
+Once xattr_sem released, there would be no assurance for the existence
+of those objects. Thus, a series of processes is often required to retry,
+when updating such a object is necessary under holding read semaphore.
+For example, do_jffs2_getxattr() holds read-semaphore to scan xref and
+xdatum at first. But it retries this process with holding write-semaphore
+after release read-semaphore, if it's necessary to load name/value pair
+from medium.
+
+Ordering constraints:
+ Lock xattr_sem last, after the alloc_sem.
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
new file mode 100644
index 00000000000..320dd48b834
--- /dev/null
+++ b/fs/jffs2/acl.c
@@ -0,0 +1,485 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2006 NEC Corporation
+ *
+ * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/crc32.h>
+#include <linux/jffs2.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+
+static size_t jffs2_acl_size(int count)
+{
+ if (count <= 4) {
+ return sizeof(struct jffs2_acl_header)
+ + count * sizeof(struct jffs2_acl_entry_short);
+ } else {
+ return sizeof(struct jffs2_acl_header)
+ + 4 * sizeof(struct jffs2_acl_entry_short)
+ + (count - 4) * sizeof(struct jffs2_acl_entry);
+ }
+}
+
+static int jffs2_acl_count(size_t size)
+{
+ size_t s;
+
+ size -= sizeof(struct jffs2_acl_header);
+ s = size - 4 * sizeof(struct jffs2_acl_entry_short);
+ if (s < 0) {
+ if (size % sizeof(struct jffs2_acl_entry_short))
+ return -1;
+ return size / sizeof(struct jffs2_acl_entry_short);
+ } else {
+ if (s % sizeof(struct jffs2_acl_entry))
+ return -1;
+ return s / sizeof(struct jffs2_acl_entry) + 4;
+ }
+}
+
+static struct posix_acl *jffs2_acl_from_medium(void *value, size_t size)
+{
+ void *end = value + size;
+ struct jffs2_acl_header *header = value;
+ struct jffs2_acl_entry *entry;
+ struct posix_acl *acl;
+ uint32_t ver;
+ int i, count;
+
+ if (!value)
+ return NULL;
+ if (size < sizeof(struct jffs2_acl_header))
+ return ERR_PTR(-EINVAL);
+ ver = je32_to_cpu(header->a_version);
+ if (ver != JFFS2_ACL_VERSION) {
+ JFFS2_WARNING("Invalid ACL version. (=%u)\n", ver);
+ return ERR_PTR(-EINVAL);
+ }
+
+ value += sizeof(struct jffs2_acl_header);
+ count = jffs2_acl_count(size);
+ if (count < 0)
+ return ERR_PTR(-EINVAL);
+ if (count == 0)
+ return NULL;
+
+ acl = posix_acl_alloc(count, GFP_KERNEL);
+ if (!acl)
+ return ERR_PTR(-ENOMEM);
+
+ for (i=0; i < count; i++) {
+ entry = value;
+ if (value + sizeof(struct jffs2_acl_entry_short) > end)
+ goto fail;
+ acl->a_entries[i].e_tag = je16_to_cpu(entry->e_tag);
+ acl->a_entries[i].e_perm = je16_to_cpu(entry->e_perm);
+ switch (acl->a_entries[i].e_tag) {
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ value += sizeof(struct jffs2_acl_entry_short);
+ acl->a_entries[i].e_id = ACL_UNDEFINED_ID;
+ break;
+
+ case ACL_USER:
+ case ACL_GROUP:
+ value += sizeof(struct jffs2_acl_entry);
+ if (value > end)
+ goto fail;
+ acl->a_entries[i].e_id = je32_to_cpu(entry->e_id);
+ break;
+
+ default:
+ goto fail;
+ }
+ }
+ if (value != end)
+ goto fail;
+ return acl;
+ fail:
+ posix_acl_release(acl);
+ return ERR_PTR(-EINVAL);
+}
+
+static void *jffs2_acl_to_medium(const struct posix_acl *acl, size_t *size)
+{
+ struct jffs2_acl_header *header;
+ struct jffs2_acl_entry *entry;
+ void *e;
+ size_t i;
+
+ *size = jffs2_acl_size(acl->a_count);
+ header = kmalloc(sizeof(*header) + acl->a_count * sizeof(*entry), GFP_KERNEL);
+ if (!header)
+ return ERR_PTR(-ENOMEM);
+ header->a_version = cpu_to_je32(JFFS2_ACL_VERSION);
+ e = header + 1;
+ for (i=0; i < acl->a_count; i++) {
+ entry = e;
+ entry->e_tag = cpu_to_je16(acl->a_entries[i].e_tag);
+ entry->e_perm = cpu_to_je16(acl->a_entries[i].e_perm);
+ switch(acl->a_entries[i].e_tag) {
+ case ACL_USER:
+ case ACL_GROUP:
+ entry->e_id = cpu_to_je32(acl->a_entries[i].e_id);
+ e += sizeof(struct jffs2_acl_entry);
+ break;
+
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ e += sizeof(struct jffs2_acl_entry_short);
+ break;
+
+ default:
+ goto fail;
+ }
+ }
+ return header;
+ fail:
+ kfree(header);
+ return ERR_PTR(-EINVAL);
+}
+
+static struct posix_acl *jffs2_iget_acl(struct inode *inode, struct posix_acl **i_acl)
+{
+ struct posix_acl *acl = JFFS2_ACL_NOT_CACHED;
+
+ spin_lock(&inode->i_lock);
+ if (*i_acl != JFFS2_ACL_NOT_CACHED)
+ acl = posix_acl_dup(*i_acl);
+ spin_unlock(&inode->i_lock);
+ return acl;
+}
+
+static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct posix_acl *acl)
+{
+ spin_lock(&inode->i_lock);
+ if (*i_acl != JFFS2_ACL_NOT_CACHED)
+ posix_acl_release(*i_acl);
+ *i_acl = posix_acl_dup(acl);
+ spin_unlock(&inode->i_lock);
+}
+
+static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
+{
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct posix_acl *acl;
+ char *value = NULL;
+ int rc, xprefix;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ acl = jffs2_iget_acl(inode, &f->i_acl_access);
+ if (acl != JFFS2_ACL_NOT_CACHED)
+ return acl;
+ xprefix = JFFS2_XPREFIX_ACL_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ acl = jffs2_iget_acl(inode, &f->i_acl_default);
+ if (acl != JFFS2_ACL_NOT_CACHED)
+ return acl;
+ xprefix = JFFS2_XPREFIX_ACL_DEFAULT;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ rc = do_jffs2_getxattr(inode, xprefix, "", NULL, 0);
+ if (rc > 0) {
+ value = kmalloc(rc, GFP_KERNEL);
+ if (!value)
+ return ERR_PTR(-ENOMEM);
+ rc = do_jffs2_getxattr(inode, xprefix, "", value, rc);
+ }
+ if (rc > 0) {
+ acl = jffs2_acl_from_medium(value, rc);
+ } else if (rc == -ENODATA || rc == -ENOSYS) {
+ acl = NULL;
+ } else {
+ acl = ERR_PTR(rc);
+ }
+ if (value)
+ kfree(value);
+ if (!IS_ERR(acl)) {
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ jffs2_iset_acl(inode, &f->i_acl_access, acl);
+ break;
+ case ACL_TYPE_DEFAULT:
+ jffs2_iset_acl(inode, &f->i_acl_default, acl);
+ break;
+ }
+ }
+ return acl;
+}
+
+static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+{
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ size_t size = 0;
+ char *value = NULL;
+ int rc, xprefix;
+
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ xprefix = JFFS2_XPREFIX_ACL_ACCESS;
+ if (acl) {
+ mode_t mode = inode->i_mode;
+ rc = posix_acl_equiv_mode(acl, &mode);
+ if (rc < 0)
+ return rc;
+ if (inode->i_mode != mode) {
+ inode->i_mode = mode;
+ jffs2_dirty_inode(inode);
+ }
+ if (rc == 0)
+ acl = NULL;
+ }
+ break;
+ case ACL_TYPE_DEFAULT:
+ xprefix = JFFS2_XPREFIX_ACL_DEFAULT;
+ if (!S_ISDIR(inode->i_mode))
+ return acl ? -EACCES : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (acl) {
+ value = jffs2_acl_to_medium(acl, &size);
+ if (IS_ERR(value))
+ return PTR_ERR(value);
+ }
+
+ rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0);
+ if (value)
+ kfree(value);
+ if (!rc) {
+ switch(type) {
+ case ACL_TYPE_ACCESS:
+ jffs2_iset_acl(inode, &f->i_acl_access, acl);
+ break;
+ case ACL_TYPE_DEFAULT:
+ jffs2_iset_acl(inode, &f->i_acl_default, acl);
+ break;
+ }
+ }
+ return rc;
+}
+
+static int jffs2_check_acl(struct inode *inode, int mask)
+{
+ struct posix_acl *acl;
+ int rc;
+
+ acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl) {
+ rc = posix_acl_permission(inode, acl, mask);
+ posix_acl_release(acl);
+ return rc;
+ }
+ return -EAGAIN;
+}
+
+int jffs2_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+ return generic_permission(inode, mask, jffs2_check_acl);
+}
+
+int jffs2_init_acl(struct inode *inode, struct inode *dir)
+{
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct posix_acl *acl = NULL, *clone;
+ mode_t mode;
+ int rc = 0;
+
+ f->i_acl_access = JFFS2_ACL_NOT_CACHED;
+ f->i_acl_default = JFFS2_ACL_NOT_CACHED;
+ if (!S_ISLNK(inode->i_mode)) {
+ acl = jffs2_get_acl(dir, ACL_TYPE_DEFAULT);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (!acl)
+ inode->i_mode &= ~current->fs->umask;
+ }
+ if (acl) {
+ if (S_ISDIR(inode->i_mode)) {
+ rc = jffs2_set_acl(inode, ACL_TYPE_DEFAULT, acl);
+ if (rc)
+ goto cleanup;
+ }
+ clone = posix_acl_clone(acl, GFP_KERNEL);
+ rc = -ENOMEM;
+ if (!clone)
+ goto cleanup;
+ mode = inode->i_mode;
+ rc = posix_acl_create_masq(clone, &mode);
+ if (rc >= 0) {
+ inode->i_mode = mode;
+ if (rc > 0)
+ rc = jffs2_set_acl(inode, ACL_TYPE_ACCESS, clone);
+ }
+ posix_acl_release(clone);
+ }
+ cleanup:
+ posix_acl_release(acl);
+ return rc;
+}
+
+void jffs2_clear_acl(struct inode *inode)
+{
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+ if (f->i_acl_access && f->i_acl_access != JFFS2_ACL_NOT_CACHED) {
+ posix_acl_release(f->i_acl_access);
+ f->i_acl_access = JFFS2_ACL_NOT_CACHED;
+ }
+ if (f->i_acl_default && f->i_acl_default != JFFS2_ACL_NOT_CACHED) {
+ posix_acl_release(f->i_acl_default);
+ f->i_acl_default = JFFS2_ACL_NOT_CACHED;
+ }
+}
+
+int jffs2_acl_chmod(struct inode *inode)
+{
+ struct posix_acl *acl, *clone;
+ int rc;
+
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+ acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS);
+ if (IS_ERR(acl) || !acl)
+ return PTR_ERR(acl);
+ clone = posix_acl_clone(acl, GFP_KERNEL);
+ posix_acl_release(acl);
+ if (!clone)
+ return -ENOMEM;
+ rc = posix_acl_chmod_masq(clone, inode->i_mode);
+ if (!rc)
+ rc = jffs2_set_acl(inode, ACL_TYPE_ACCESS, clone);
+ posix_acl_release(clone);
+ return rc;
+}
+
+static size_t jffs2_acl_access_listxattr(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
+{
+ const int retlen = sizeof(POSIX_ACL_XATTR_ACCESS);
+
+ if (list && retlen <= list_size)
+ strcpy(list, POSIX_ACL_XATTR_ACCESS);
+ return retlen;
+}
+
+static size_t jffs2_acl_default_listxattr(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
+{
+ const int retlen = sizeof(POSIX_ACL_XATTR_DEFAULT);
+
+ if (list && retlen <= list_size)
+ strcpy(list, POSIX_ACL_XATTR_DEFAULT);
+ return retlen;
+}
+
+static int jffs2_acl_getxattr(struct inode *inode, int type, void *buffer, size_t size)
+{
+ struct posix_acl *acl;
+ int rc;
+
+ acl = jffs2_get_acl(inode, type);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (!acl)
+ return -ENODATA;
+ rc = posix_acl_to_xattr(acl, buffer, size);
+ posix_acl_release(acl);
+
+ return rc;
+}
+
+static int jffs2_acl_access_getxattr(struct inode *inode, const char *name, void *buffer, size_t size)
+{
+ if (name[0] != '\0')
+ return -EINVAL;
+ return jffs2_acl_getxattr(inode, ACL_TYPE_ACCESS, buffer, size);
+}
+
+static int jffs2_acl_default_getxattr(struct inode *inode, const char *name, void *buffer, size_t size)
+{
+ if (name[0] != '\0')
+ return -EINVAL;
+ return jffs2_acl_getxattr(inode, ACL_TYPE_DEFAULT, buffer, size);
+}
+
+static int jffs2_acl_setxattr(struct inode *inode, int type, const void *value, size_t size)
+{
+ struct posix_acl *acl;
+ int rc;
+
+ if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ return -EPERM;
+
+ if (value) {
+ acl = posix_acl_from_xattr(value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl) {
+ rc = posix_acl_valid(acl);
+ if (rc)
+ goto out;
+ }
+ } else {
+ acl = NULL;
+ }
+ rc = jffs2_set_acl(inode, type, acl);
+ out:
+ posix_acl_release(acl);
+ return rc;
+}
+
+static int jffs2_acl_access_setxattr(struct inode *inode, const char *name,
+ const void *buffer, size_t size, int flags)
+{
+ if (name[0] != '\0')
+ return -EINVAL;
+ return jffs2_acl_setxattr(inode, ACL_TYPE_ACCESS, buffer, size);
+}
+
+static int jffs2_acl_default_setxattr(struct inode *inode, const char *name,
+ const void *buffer, size_t size, int flags)
+{
+ if (name[0] != '\0')
+ return -EINVAL;
+ return jffs2_acl_setxattr(inode, ACL_TYPE_DEFAULT, buffer, size);
+}
+
+struct xattr_handler jffs2_acl_access_xattr_handler = {
+ .prefix = POSIX_ACL_XATTR_ACCESS,
+ .list = jffs2_acl_access_listxattr,
+ .get = jffs2_acl_access_getxattr,
+ .set = jffs2_acl_access_setxattr,
+};
+
+struct xattr_handler jffs2_acl_default_xattr_handler = {
+ .prefix = POSIX_ACL_XATTR_DEFAULT,
+ .list = jffs2_acl_default_listxattr,
+ .get = jffs2_acl_default_getxattr,
+ .set = jffs2_acl_default_setxattr,
+};
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
new file mode 100644
index 00000000000..8893bd1a6ba
--- /dev/null
+++ b/fs/jffs2/acl.h
@@ -0,0 +1,45 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2006 NEC Corporation
+ *
+ * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ */
+struct jffs2_acl_entry {
+ jint16_t e_tag;
+ jint16_t e_perm;
+ jint32_t e_id;
+};
+
+struct jffs2_acl_entry_short {
+ jint16_t e_tag;
+ jint16_t e_perm;
+};
+
+struct jffs2_acl_header {
+ jint32_t a_version;
+};
+
+#ifdef CONFIG_JFFS2_FS_POSIX_ACL
+
+#define JFFS2_ACL_NOT_CACHED ((void *)-1)
+
+extern int jffs2_permission(struct inode *, int, struct nameidata *);
+extern int jffs2_acl_chmod(struct inode *);
+extern int jffs2_init_acl(struct inode *, struct inode *);
+extern void jffs2_clear_acl(struct inode *);
+
+extern struct xattr_handler jffs2_acl_access_xattr_handler;
+extern struct xattr_handler jffs2_acl_default_xattr_handler;
+
+#else
+
+#define jffs2_permission NULL
+#define jffs2_acl_chmod(inode) (0)
+#define jffs2_init_acl(inode,dir) (0)
+#define jffs2_clear_acl(inode)
+
+#endif /* CONFIG_JFFS2_FS_POSIX_ACL */
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 70f7a896c04..02826967ab5 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -160,6 +160,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
ic->scan_dents = NULL;
cond_resched();
}
+ jffs2_build_xattr_subsystem(c);
c->flags &= ~JFFS2_SB_FLAG_BUILDING;
dbg_fsbuild("FS build complete\n");
@@ -178,6 +179,7 @@ exit:
jffs2_free_full_dirent(fd);
}
}
+ jffs2_clear_xattr_subsystem(c);
}
return ret;
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index e7944e665b9..7001ba26c06 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -412,7 +412,7 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
kfree(comprbuf);
}
-int jffs2_compressors_init(void)
+int __init jffs2_compressors_init(void)
{
/* Registering compressors */
#ifdef CONFIG_JFFS2_ZLIB
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
index a77e830d85c..509b8b1c081 100644
--- a/fs/jffs2/compr.h
+++ b/fs/jffs2/compr.h
@@ -23,8 +23,8 @@
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/jffs2.h>
-#include <linux/jffs2_fs_i.h>
-#include <linux/jffs2_fs_sb.h>
+#include "jffs2_fs_i.h"
+#include "jffs2_fs_sb.h"
#include "nodelist.h"
#define JFFS2_RUBINMIPS_PRIORITY 10
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 1fe17de713e..72b4fc13a10 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -192,13 +192,13 @@ __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
else
my_dirty_size += totlen;
- if ((!ref2->next_phys) != (ref2 == jeb->last_node)) {
- JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next_phys at %#08x (mem %p), last_node is at %#08x (mem %p).\n",
- ref_offset(ref2), ref2, ref_offset(ref2->next_phys), ref2->next_phys,
- ref_offset(jeb->last_node), jeb->last_node);
+ if ((!ref_next(ref2)) != (ref2 == jeb->last_node)) {
+ JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n",
+ ref_offset(ref2), ref2, ref_offset(ref_next(ref2)), ref_next(ref2),
+ ref_offset(jeb->last_node), jeb->last_node);
goto error;
}
- ref2 = ref2->next_phys;
+ ref2 = ref_next(ref2);
}
if (my_used_size != jeb->used_size) {
@@ -268,9 +268,9 @@ __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
}
printk(JFFS2_DBG);
- for (ref = jeb->first_node; ; ref = ref->next_phys) {
+ for (ref = jeb->first_node; ; ref = ref_next(ref)) {
printk("%#08x(%#x)", ref_offset(ref), ref->__totlen);
- if (ref->next_phys)
+ if (ref_next(ref))
printk("->");
else
break;
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h
index 162af6dfe29..5fa494a792b 100644
--- a/fs/jffs2/debug.h
+++ b/fs/jffs2/debug.h
@@ -171,6 +171,12 @@
#define dbg_memalloc(fmt, ...)
#endif
+/* Watch the XATTR subsystem */
+#ifdef JFFS2_DBG_XATTR_MESSAGES
+#define dbg_xattr(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_xattr(fmt, ...)
+#endif
/* "Sanity" checks */
void
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 8bc7a5018e4..edd8371fc6a 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -17,8 +17,8 @@
#include <linux/fs.h>
#include <linux/crc32.h>
#include <linux/jffs2.h>
-#include <linux/jffs2_fs_i.h>
-#include <linux/jffs2_fs_sb.h>
+#include "jffs2_fs_i.h"
+#include "jffs2_fs_sb.h"
#include <linux/time.h>
#include "nodelist.h"
@@ -57,7 +57,12 @@ struct inode_operations jffs2_dir_inode_operations =
.rmdir = jffs2_rmdir,
.mknod = jffs2_mknod,
.rename = jffs2_rename,
+ .permission = jffs2_permission,
.setattr = jffs2_setattr,
+ .setxattr = jffs2_setxattr,
+ .getxattr = jffs2_getxattr,
+ .listxattr = jffs2_listxattr,
+ .removexattr = jffs2_removexattr
};
/***********************************************************************/
@@ -78,6 +83,9 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
D1(printk(KERN_DEBUG "jffs2_lookup()\n"));
+ if (target->d_name.len > JFFS2_MAX_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
dir_f = JFFS2_INODE_INFO(dir_i);
c = JFFS2_SB_INFO(dir_i->i_sb);
@@ -206,12 +214,15 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
ret = jffs2_do_create(c, dir_f, f, ri,
dentry->d_name.name, dentry->d_name.len);
- if (ret) {
- make_bad_inode(inode);
- iput(inode);
- jffs2_free_raw_inode(ri);
- return ret;
- }
+ if (ret)
+ goto fail;
+
+ ret = jffs2_init_security(inode, dir_i);
+ if (ret)
+ goto fail;
+ ret = jffs2_init_acl(inode, dir_i);
+ if (ret)
+ goto fail;
dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
@@ -221,6 +232,12 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages));
return 0;
+
+ fail:
+ make_bad_inode(inode);
+ iput(inode);
+ jffs2_free_raw_inode(ri);
+ return ret;
}
/***********************************************************************/
@@ -291,7 +308,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
struct jffs2_full_dnode *fn;
struct jffs2_full_dirent *fd;
int namelen;
- uint32_t alloclen, phys_ofs;
+ uint32_t alloclen;
int ret, targetlen = strlen(target);
/* FIXME: If you care. We'd need to use frags for the target
@@ -310,8 +327,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
* Just the node will do for now, though
*/
namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen,
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
@@ -339,7 +356,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
ri->data_crc = cpu_to_je32(crc32(0, target, targetlen));
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
- fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL);
+ fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL);
jffs2_free_raw_inode(ri);
@@ -371,8 +388,20 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
- ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+
+ ret = jffs2_init_security(inode, dir_i);
+ if (ret) {
+ jffs2_clear_inode(inode);
+ return ret;
+ }
+ ret = jffs2_init_acl(inode, dir_i);
+ if (ret) {
+ jffs2_clear_inode(inode);
+ return ret;
+ }
+
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
/* Eep. */
jffs2_clear_inode(inode);
@@ -404,7 +433,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
- fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
+ fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);
if (IS_ERR(fd)) {
/* dirent failed to write. Delete the inode normally
@@ -442,7 +471,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
struct jffs2_full_dnode *fn;
struct jffs2_full_dirent *fd;
int namelen;
- uint32_t alloclen, phys_ofs;
+ uint32_t alloclen;
int ret;
mode |= S_IFDIR;
@@ -457,8 +486,8 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
* Just the node will do for now, though
*/
namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL,
- JFFS2_SUMMARY_INODE_SIZE);
+ ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL,
+ JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
@@ -483,7 +512,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
ri->data_crc = cpu_to_je32(0);
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
- fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+ fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL);
jffs2_free_raw_inode(ri);
@@ -501,8 +530,20 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
- ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+
+ ret = jffs2_init_security(inode, dir_i);
+ if (ret) {
+ jffs2_clear_inode(inode);
+ return ret;
+ }
+ ret = jffs2_init_acl(inode, dir_i);
+ if (ret) {
+ jffs2_clear_inode(inode);
+ return ret;
+ }
+
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
/* Eep. */
jffs2_clear_inode(inode);
@@ -534,7 +575,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
- fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
+ fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);
if (IS_ERR(fd)) {
/* dirent failed to write. Delete the inode normally
@@ -588,12 +629,12 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
struct jffs2_full_dnode *fn;
struct jffs2_full_dirent *fd;
int namelen;
- jint16_t dev;
+ union jffs2_device_node dev;
int devlen = 0;
- uint32_t alloclen, phys_ofs;
+ uint32_t alloclen;
int ret;
- if (!old_valid_dev(rdev))
+ if (!new_valid_dev(rdev))
return -EINVAL;
ri = jffs2_alloc_raw_inode();
@@ -602,17 +643,15 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
c = JFFS2_SB_INFO(dir_i->i_sb);
- if (S_ISBLK(mode) || S_ISCHR(mode)) {
- dev = cpu_to_je16(old_encode_dev(rdev));
- devlen = sizeof(dev);
- }
+ if (S_ISBLK(mode) || S_ISCHR(mode))
+ devlen = jffs2_encode_dev(&dev, rdev);
/* Try to reserve enough space for both node and dirent.
* Just the node will do for now, though
*/
namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen,
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
@@ -639,7 +678,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen));
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
- fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL);
+ fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, ALLOC_NORMAL);
jffs2_free_raw_inode(ri);
@@ -657,8 +696,20 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
- ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+
+ ret = jffs2_init_security(inode, dir_i);
+ if (ret) {
+ jffs2_clear_inode(inode);
+ return ret;
+ }
+ ret = jffs2_init_acl(inode, dir_i);
+ if (ret) {
+ jffs2_clear_inode(inode);
+ return ret;
+ }
+
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
/* Eep. */
jffs2_clear_inode(inode);
@@ -693,7 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
- fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
+ fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);
if (IS_ERR(fd)) {
/* dirent failed to write. Delete the inode normally
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index dad68fdffe9..1862e8bc101 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -30,7 +30,6 @@ static void jffs2_erase_callback(struct erase_info *);
#endif
static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
-static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_erase_block(struct jffs2_sb_info *c,
@@ -136,7 +135,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
c->used_size -= jeb->used_size;
c->dirty_size -= jeb->dirty_size;
jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
- jffs2_free_all_node_refs(c, jeb);
+ jffs2_free_jeb_node_refs(c, jeb);
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
@@ -231,6 +230,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
at the end of the linked list. Stash it and continue
from the beginning of the list */
ic = (struct jffs2_inode_cache *)(*prev);
+ BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
prev = &ic->nodes;
continue;
}
@@ -283,22 +283,27 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
jffs2_del_ino_cache(c, ic);
}
-static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
- struct jffs2_raw_node_ref *ref;
+ struct jffs2_raw_node_ref *block, *ref;
D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset));
- while(jeb->first_node) {
- ref = jeb->first_node;
- jeb->first_node = ref->next_phys;
- /* Remove from the inode-list */
- if (ref->next_in_ino)
+ block = ref = jeb->first_node;
+
+ while (ref) {
+ if (ref->flash_offset == REF_LINK_NODE) {
+ ref = ref->next_in_ino;
+ jffs2_free_refblock(block);
+ block = ref;
+ continue;
+ }
+ if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino)
jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
/* else it was a non-inode node or already removed, so don't bother */
- jffs2_free_raw_node_ref(ref);
+ ref++;
}
- jeb->last_node = NULL;
+ jeb->first_node = jeb->last_node = NULL;
}
static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
@@ -351,7 +356,6 @@ fail:
static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
- struct jffs2_raw_node_ref *marker_ref = NULL;
size_t retlen;
int ret;
uint32_t bad_offset;
@@ -373,12 +377,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
goto filebad;
}
- jeb->first_node = jeb->last_node = NULL;
+ /* Everything else got zeroed before the erase */
jeb->free_size = c->sector_size;
- jeb->used_size = 0;
- jeb->dirty_size = 0;
- jeb->wasted_size = 0;
-
} else {
struct kvec vecs[1];
@@ -388,11 +388,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
.totlen = cpu_to_je32(c->cleanmarker_size)
};
- marker_ref = jffs2_alloc_raw_node_ref();
- if (!marker_ref) {
- printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n");
- goto refile;
- }
+ jffs2_prealloc_raw_node_refs(c, jeb, 1);
marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
@@ -408,21 +404,13 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
jeb->offset, sizeof(marker), retlen);
- jffs2_free_raw_node_ref(marker_ref);
goto filebad;
}
- marker_ref->next_in_ino = NULL;
- marker_ref->next_phys = NULL;
- marker_ref->flash_offset = jeb->offset | REF_NORMAL;
- marker_ref->__totlen = c->cleanmarker_size;
-
- jeb->first_node = jeb->last_node = marker_ref;
-
- jeb->free_size = c->sector_size - c->cleanmarker_size;
- jeb->used_size = c->cleanmarker_size;
- jeb->dirty_size = 0;
- jeb->wasted_size = 0;
+ /* Everything else got zeroed before the erase */
+ jeb->free_size = c->sector_size;
+ /* FIXME Special case for cleanmarker in empty block */
+ jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
}
spin_lock(&c->erase_completion_lock);
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 9f4171213e5..bb8844f40e4 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -54,7 +54,12 @@ const struct file_operations jffs2_file_operations =
struct inode_operations jffs2_file_inode_operations =
{
- .setattr = jffs2_setattr
+ .permission = jffs2_permission,
+ .setattr = jffs2_setattr,
+ .setxattr = jffs2_setxattr,
+ .getxattr = jffs2_getxattr,
+ .listxattr = jffs2_listxattr,
+ .removexattr = jffs2_removexattr
};
struct address_space_operations jffs2_file_address_operations =
@@ -129,13 +134,13 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg,
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
- uint32_t phys_ofs, alloc_len;
+ uint32_t alloc_len;
D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
(unsigned int)inode->i_size, pageofs));
- ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len,
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret)
return ret;
@@ -161,7 +166,7 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg,
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
ri.data_crc = cpu_to_je32(0);
- fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+ fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL);
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
@@ -215,12 +220,20 @@ static int jffs2_commit_write (struct file *filp, struct page *pg,
D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
- if (!start && end == PAGE_CACHE_SIZE) {
- /* We need to avoid deadlock with page_cache_read() in
- jffs2_garbage_collect_pass(). So we have to mark the
- page up to date, to prevent page_cache_read() from
- trying to re-lock it. */
- SetPageUptodate(pg);
+ if (end == PAGE_CACHE_SIZE) {
+ if (!start) {
+ /* We need to avoid deadlock with page_cache_read() in
+ jffs2_garbage_collect_pass(). So we have to mark the
+ page up to date, to prevent page_cache_read() from
+ trying to re-lock it. */
+ SetPageUptodate(pg);
+ } else {
+ /* When writing out the end of a page, write out the
+ _whole_ page. This helps to reduce the number of
+ nodes in files which have many short writes, like
+ syslog files. */
+ start = aligned_start = 0;
+ }
}
ri = jffs2_alloc_raw_inode();
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 09e5d10b884..7b6c24b14f8 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -33,11 +33,11 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_raw_inode *ri;
- unsigned short dev;
+ union jffs2_device_node dev;
unsigned char *mdata = NULL;
int mdatalen = 0;
unsigned int ivalid;
- uint32_t phys_ofs, alloclen;
+ uint32_t alloclen;
int ret;
D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
ret = inode_change_ok(inode, iattr);
@@ -51,20 +51,24 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
it out again with the appropriate data attached */
if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
/* For these, we don't actually need to read the old node */
- dev = old_encode_dev(inode->i_rdev);
+ mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
mdata = (char *)&dev;
- mdatalen = sizeof(dev);
D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
} else if (S_ISLNK(inode->i_mode)) {
+ down(&f->sem);
mdatalen = f->metadata->size;
mdata = kmalloc(f->metadata->size, GFP_USER);
- if (!mdata)
+ if (!mdata) {
+ up(&f->sem);
return -ENOMEM;
+ }
ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
if (ret) {
+ up(&f->sem);
kfree(mdata);
return ret;
}
+ up(&f->sem);
D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
}
@@ -75,8 +79,8 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
return -ENOMEM;
}
- ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen,
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
if (S_ISLNK(inode->i_mode & S_IFMT))
@@ -127,7 +131,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
else
ri->data_crc = cpu_to_je32(0);
- new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL);
+ new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, ALLOC_NORMAL);
if (S_ISLNK(inode->i_mode))
kfree(mdata);
@@ -180,7 +184,12 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
{
- return jffs2_do_setattr(dentry->d_inode, iattr);
+ int rc;
+
+ rc = jffs2_do_setattr(dentry->d_inode, iattr);
+ if (!rc && (iattr->ia_valid & ATTR_MODE))
+ rc = jffs2_acl_chmod(dentry->d_inode);
+ return rc;
}
int jffs2_statfs(struct super_block *sb, struct kstatfs *buf)
@@ -219,6 +228,7 @@ void jffs2_clear_inode (struct inode *inode)
D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
+ jffs2_xattr_delete_inode(c, f->inocache);
jffs2_do_clear_inode(c, f);
}
@@ -227,6 +237,8 @@ void jffs2_read_inode (struct inode *inode)
struct jffs2_inode_info *f;
struct jffs2_sb_info *c;
struct jffs2_raw_inode latest_node;
+ union jffs2_device_node jdev;
+ dev_t rdev = 0;
int ret;
D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino));
@@ -258,7 +270,6 @@ void jffs2_read_inode (struct inode *inode)
inode->i_blocks = (inode->i_size + 511) >> 9;
switch (inode->i_mode & S_IFMT) {
- jint16_t rdev;
case S_IFLNK:
inode->i_op = &jffs2_symlink_inode_operations;
@@ -292,8 +303,16 @@ void jffs2_read_inode (struct inode *inode)
case S_IFBLK:
case S_IFCHR:
/* Read the device numbers from the media */
+ if (f->metadata->size != sizeof(jdev.old) &&
+ f->metadata->size != sizeof(jdev.new)) {
+ printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ make_bad_inode(inode);
+ return;
+ }
D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
- if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) {
+ if (jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size) < 0) {
/* Eep */
printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
up(&f->sem);
@@ -301,12 +320,15 @@ void jffs2_read_inode (struct inode *inode)
make_bad_inode(inode);
return;
}
+ if (f->metadata->size == sizeof(jdev.old))
+ rdev = old_decode_dev(je16_to_cpu(jdev.old));
+ else
+ rdev = new_decode_dev(je32_to_cpu(jdev.new));
case S_IFSOCK:
case S_IFIFO:
inode->i_op = &jffs2_file_inode_operations;
- init_special_inode(inode, inode->i_mode,
- old_decode_dev((je16_to_cpu(rdev))));
+ init_special_inode(inode, inode->i_mode, rdev);
break;
default:
@@ -492,6 +514,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
}
memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *));
+ jffs2_init_xattr_subsystem(c);
+
if ((ret = jffs2_do_mount_fs(c)))
goto out_inohash;
@@ -526,6 +550,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
else
kfree(c->blocks);
out_inohash:
+ jffs2_clear_xattr_subsystem(c);
kfree(c->inocache_list);
out_wbuf:
jffs2_flash_cleanup(c);
@@ -639,13 +664,6 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c) {
return ret;
}
- /* add setups for other bizarre flashes here... */
- if (jffs2_nor_ecc(c)) {
- ret = jffs2_nor_ecc_flash_setup(c);
- if (ret)
- return ret;
- }
-
/* and Dataflash */
if (jffs2_dataflash(c)) {
ret = jffs2_dataflash_setup(c);
@@ -669,11 +687,6 @@ void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
jffs2_nand_flash_cleanup(c);
}
- /* add cleanups for other bizarre flashes here... */
- if (jffs2_nor_ecc(c)) {
- jffs2_nor_ecc_flash_cleanup(c);
- }
-
/* and DataFlash */
if (jffs2_dataflash(c)) {
jffs2_dataflash_cleanup(c);
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index f9ffece453a..477c526d638 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -125,6 +125,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
struct jffs2_eraseblock *jeb;
struct jffs2_raw_node_ref *raw;
int ret = 0, inum, nlink;
+ int xattr = 0;
if (down_interruptible(&c->alloc_sem))
return -EINTR;
@@ -138,7 +139,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
the node CRCs etc. Do it now. */
/* checked_ino is protected by the alloc_sem */
- if (c->checked_ino > c->highest_ino) {
+ if (c->checked_ino > c->highest_ino && xattr) {
printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
c->unchecked_size);
jffs2_dbg_dump_block_lists_nolock(c);
@@ -148,6 +149,9 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
spin_unlock(&c->erase_completion_lock);
+ if (!xattr)
+ xattr = jffs2_verify_xattr(c);
+
spin_lock(&c->inocache_lock);
ic = jffs2_get_ino_cache(c, c->checked_ino++);
@@ -181,6 +185,10 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
and trigger the BUG() above while we haven't yet
finished checking all its nodes */
D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
+ /* We need to come back again for the _same_ inode. We've
+ made no progress in this case, but that should be OK */
+ c->checked_ino--;
+
up(&c->alloc_sem);
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
return 0;
@@ -231,7 +239,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
while(ref_obsolete(raw)) {
D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
- raw = raw->next_phys;
+ raw = ref_next(raw);
if (unlikely(!raw)) {
printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
@@ -248,16 +256,37 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
if (!raw->next_in_ino) {
/* Inode-less node. Clean marker, snapshot or something like that */
- /* FIXME: If it's something that needs to be copied, including something
- we don't grok that has JFFS2_NODETYPE_RWCOMPAT_COPY, we should do so */
spin_unlock(&c->erase_completion_lock);
- jffs2_mark_node_obsolete(c, raw);
+ if (ref_flags(raw) == REF_PRISTINE) {
+ /* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */
+ jffs2_garbage_collect_pristine(c, NULL, raw);
+ } else {
+ /* Just mark it obsolete */
+ jffs2_mark_node_obsolete(c, raw);
+ }
up(&c->alloc_sem);
goto eraseit_lock;
}
ic = jffs2_raw_ref_to_ic(raw);
+#ifdef CONFIG_JFFS2_FS_XATTR
+ /* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr.
+ * We can decide whether this node is inode or xattr by ic->class. */
+ if (ic->class == RAWNODE_CLASS_XATTR_DATUM
+ || ic->class == RAWNODE_CLASS_XATTR_REF) {
+ BUG_ON(raw->next_in_ino != (void *)ic);
+ spin_unlock(&c->erase_completion_lock);
+
+ if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
+ ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
+ } else {
+ ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
+ }
+ goto release_sem;
+ }
+#endif
+
/* We need to hold the inocache. Either the erase_completion_lock or
the inocache_lock are sufficient; we trade down since the inocache_lock
causes less contention. */
@@ -499,7 +528,6 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
struct jffs2_raw_node_ref *raw)
{
union jffs2_node_union *node;
- struct jffs2_raw_node_ref *nraw;
size_t retlen;
int ret;
uint32_t phys_ofs, alloclen;
@@ -508,15 +536,16 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
- rawlen = ref_totlen(c, c->gcblock, raw);
+ alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
/* Ask for a small amount of space (or the totlen if smaller) because we
don't want to force wastage of the end of a block if splitting would
work. */
- ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) +
- JFFS2_MIN_DATA_LEN, rawlen), &phys_ofs, &alloclen, rawlen);
- /* this is not the exact summary size of it,
- it is only an upper estimation */
+ if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
+ alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN;
+
+ ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen);
+ /* 'rawlen' is not the exact summary size; it is only an upper estimation */
if (ret)
return ret;
@@ -580,22 +609,17 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
}
break;
default:
- printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
- ref_offset(raw), je16_to_cpu(node->u.nodetype));
- goto bail;
- }
-
- nraw = jffs2_alloc_raw_node_ref();
- if (!nraw) {
- ret = -ENOMEM;
- goto out_node;
+ /* If it's inode-less, we don't _know_ what it is. Just copy it intact */
+ if (ic) {
+ printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
+ ref_offset(raw), je16_to_cpu(node->u.nodetype));
+ goto bail;
+ }
}
/* OK, all the CRCs are good; this node can just be copied as-is. */
retry:
- nraw->flash_offset = phys_ofs;
- nraw->__totlen = rawlen;
- nraw->next_phys = NULL;
+ phys_ofs = write_ofs(c);
ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
@@ -603,17 +627,11 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
rawlen, phys_ofs, ret, retlen);
if (retlen) {
- /* Doesn't belong to any inode */
- nraw->next_in_ino = NULL;
-
- nraw->flash_offset |= REF_OBSOLETE;
- jffs2_add_physical_node_ref(c, nraw);
- jffs2_mark_node_obsolete(c, nraw);
+ jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
} else {
- printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", nraw->flash_offset);
- jffs2_free_raw_node_ref(nraw);
+ printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs);
}
- if (!retried && (nraw = jffs2_alloc_raw_node_ref())) {
+ if (!retried) {
/* Try to reallocate space and retry */
uint32_t dummy;
struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size];
@@ -625,7 +643,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
- ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy, rawlen);
+ ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen);
/* this is not the exact summary size of it,
it is only an upper estimation */
@@ -638,25 +656,13 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
goto retry;
}
D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
- jffs2_free_raw_node_ref(nraw);
}
- jffs2_free_raw_node_ref(nraw);
if (!ret)
ret = -EIO;
goto out_node;
}
- nraw->flash_offset |= REF_PRISTINE;
- jffs2_add_physical_node_ref(c, nraw);
-
- /* Link into per-inode list. This is safe because of the ic
- state being INO_STATE_GC. Note that if we're doing this
- for an inode which is in-core, the 'nraw' pointer is then
- going to be fetched from ic->nodes by our caller. */
- spin_lock(&c->erase_completion_lock);
- nraw->next_in_ino = ic->nodes;
- ic->nodes = nraw;
- spin_unlock(&c->erase_completion_lock);
+ jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
jffs2_mark_node_obsolete(c, raw);
D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)));
@@ -675,19 +681,16 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
struct jffs2_full_dnode *new_fn;
struct jffs2_raw_inode ri;
struct jffs2_node_frag *last_frag;
- jint16_t dev;
+ union jffs2_device_node dev;
char *mdata = NULL, mdatalen = 0;
- uint32_t alloclen, phys_ofs, ilen;
+ uint32_t alloclen, ilen;
int ret;
if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
S_ISCHR(JFFS2_F_I_MODE(f)) ) {
/* For these, we don't actually need to read the old node */
- /* FIXME: for minor or major > 255. */
- dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) |
- JFFS2_F_I_RDEV_MIN(f)));
+ mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
mdata = (char *)&dev;
- mdatalen = sizeof(dev);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen));
} else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
mdatalen = fn->size;
@@ -706,7 +709,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
}
- ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen,
+ ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
@@ -744,7 +747,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
- new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, phys_ofs, ALLOC_GC);
+ new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
if (IS_ERR(new_fn)) {
printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
@@ -765,7 +768,7 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er
{
struct jffs2_full_dirent *new_fd;
struct jffs2_raw_dirent rd;
- uint32_t alloclen, phys_ofs;
+ uint32_t alloclen;
int ret;
rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -787,14 +790,14 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er
rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
- ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen,
+ ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
if (ret) {
printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
sizeof(rd)+rd.nsize, ret);
return ret;
}
- new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, phys_ofs, ALLOC_GC);
+ new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
if (IS_ERR(new_fd)) {
printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd));
@@ -922,7 +925,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
struct jffs2_raw_inode ri;
struct jffs2_node_frag *frag;
struct jffs2_full_dnode *new_fn;
- uint32_t alloclen, phys_ofs, ilen;
+ uint32_t alloclen, ilen;
int ret;
D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
@@ -1001,14 +1004,14 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
ri.data_crc = cpu_to_je32(0);
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
- ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen,
- JFFS2_SUMMARY_INODE_SIZE);
+ ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
+ JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
sizeof(ri), ret);
return ret;
}
- new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_GC);
+ new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
if (IS_ERR(new_fn)) {
printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn));
@@ -1070,7 +1073,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
{
struct jffs2_full_dnode *new_fn;
struct jffs2_raw_inode ri;
- uint32_t alloclen, phys_ofs, offset, orig_end, orig_start;
+ uint32_t alloclen, offset, orig_end, orig_start;
int ret = 0;
unsigned char *comprbuf = NULL, *writebuf;
unsigned long pg;
@@ -1227,7 +1230,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
uint32_t cdatalen;
uint16_t comprtype = JFFS2_COMPR_NONE;
- ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs,
+ ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN,
&alloclen, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
@@ -1264,7 +1267,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
- new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC);
+ new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC);
jffs2_free_comprbuf(comprbuf, writebuf);
diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h
deleted file mode 100644
index 22a93a08210..00000000000
--- a/fs/jffs2/histo.h
+++ /dev/null
@@ -1,3 +0,0 @@
-/* This file provides the bit-probabilities for the input file */
-#define BIT_DIVIDER 629
-static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */
diff --git a/fs/jffs2/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h
new file mode 100644
index 00000000000..2e0cc8e00b8
--- /dev/null
+++ b/fs/jffs2/jffs2_fs_i.h
@@ -0,0 +1,55 @@
+/* $Id: jffs2_fs_i.h,v 1.19 2005/11/07 11:14:52 gleixner Exp $ */
+
+#ifndef _JFFS2_FS_I
+#define _JFFS2_FS_I
+
+#include <linux/version.h>
+#include <linux/rbtree.h>
+#include <linux/posix_acl.h>
+#include <asm/semaphore.h>
+
+struct jffs2_inode_info {
+ /* We need an internal mutex similar to inode->i_mutex.
+ Unfortunately, we can't used the existing one, because
+ either the GC would deadlock, or we'd have to release it
+ before letting GC proceed. Or we'd have to put ugliness
+ into the GC code so it didn't attempt to obtain the i_mutex
+ for the inode(s) which are already locked */
+ struct semaphore sem;
+
+ /* The highest (datanode) version number used for this ino */
+ uint32_t highest_version;
+
+ /* List of data fragments which make up the file */
+ struct rb_root fragtree;
+
+ /* There may be one datanode which isn't referenced by any of the
+ above fragments, if it contains a metadata update but no actual
+ data - or if this is a directory inode */
+ /* This also holds the _only_ dnode for symlinks/device nodes,
+ etc. */
+ struct jffs2_full_dnode *metadata;
+
+ /* Directory entries */
+ struct jffs2_full_dirent *dents;
+
+ /* The target path if this is the inode of a symlink */
+ unsigned char *target;
+
+ /* Some stuff we just have to keep in-core at all times, for each inode. */
+ struct jffs2_inode_cache *inocache;
+
+ uint16_t flags;
+ uint8_t usercompr;
+#if !defined (__ECOS)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2)
+ struct inode vfs_inode;
+#endif
+#endif
+#ifdef CONFIG_JFFS2_FS_POSIX_ACL
+ struct posix_acl *i_acl_access;
+ struct posix_acl *i_acl_default;
+#endif
+};
+
+#endif /* _JFFS2_FS_I */
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
new file mode 100644
index 00000000000..935fec1b120
--- /dev/null
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -0,0 +1,133 @@
+/* $Id: jffs2_fs_sb.h,v 1.54 2005/09/21 13:37:34 dedekind Exp $ */
+
+#ifndef _JFFS2_FS_SB
+#define _JFFS2_FS_SB
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <asm/semaphore.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/rwsem.h>
+
+#define JFFS2_SB_FLAG_RO 1
+#define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */
+#define JFFS2_SB_FLAG_BUILDING 4 /* File system building is in progress */
+
+struct jffs2_inodirty;
+
+/* A struct for the overall file system control. Pointers to
+ jffs2_sb_info structs are named `c' in the source code.
+ Nee jffs_control
+*/
+struct jffs2_sb_info {
+ struct mtd_info *mtd;
+
+ uint32_t highest_ino;
+ uint32_t checked_ino;
+
+ unsigned int flags;
+
+ struct task_struct *gc_task; /* GC task struct */
+ struct completion gc_thread_start; /* GC thread start completion */
+ struct completion gc_thread_exit; /* GC thread exit completion port */
+
+ struct semaphore alloc_sem; /* Used to protect all the following
+ fields, and also to protect against
+ out-of-order writing of nodes. And GC. */
+ uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER
+ (i.e. zero for OOB CLEANMARKER */
+
+ uint32_t flash_size;
+ uint32_t used_size;
+ uint32_t dirty_size;
+ uint32_t wasted_size;
+ uint32_t free_size;
+ uint32_t erasing_size;
+ uint32_t bad_size;
+ uint32_t sector_size;
+ uint32_t unchecked_size;
+
+ uint32_t nr_free_blocks;
+ uint32_t nr_erasing_blocks;
+
+ /* Number of free blocks there must be before we... */
+ uint8_t resv_blocks_write; /* ... allow a normal filesystem write */
+ uint8_t resv_blocks_deletion; /* ... allow a normal filesystem deletion */
+ uint8_t resv_blocks_gctrigger; /* ... wake up the GC thread */
+ uint8_t resv_blocks_gcbad; /* ... pick a block from the bad_list to GC */
+ uint8_t resv_blocks_gcmerge; /* ... merge pages when garbage collecting */
+
+ uint32_t nospc_dirty_size;
+
+ uint32_t nr_blocks;
+ struct jffs2_eraseblock *blocks; /* The whole array of blocks. Used for getting blocks
+ * from the offset (blocks[ofs / sector_size]) */
+ struct jffs2_eraseblock *nextblock; /* The block we're currently filling */
+
+ struct jffs2_eraseblock *gcblock; /* The block we're currently garbage-collecting */
+
+ struct list_head clean_list; /* Blocks 100% full of clean data */
+ struct list_head very_dirty_list; /* Blocks with lots of dirty space */
+ struct list_head dirty_list; /* Blocks with some dirty space */
+ struct list_head erasable_list; /* Blocks which are completely dirty, and need erasing */
+ struct list_head erasable_pending_wbuf_list; /* Blocks which need erasing but only after the current wbuf is flushed */
+ struct list_head erasing_list; /* Blocks which are currently erasing */
+ struct list_head erase_pending_list; /* Blocks which need erasing now */
+ struct list_head erase_complete_list; /* Blocks which are erased and need the clean marker written to them */
+ struct list_head free_list; /* Blocks which are free and ready to be used */
+ struct list_head bad_list; /* Bad blocks. */
+ struct list_head bad_used_list; /* Bad blocks with valid data in. */
+
+ spinlock_t erase_completion_lock; /* Protect free_list and erasing_list
+ against erase completion handler */
+ wait_queue_head_t erase_wait; /* For waiting for erases to complete */
+
+ wait_queue_head_t inocache_wq;
+ struct jffs2_inode_cache **inocache_list;
+ spinlock_t inocache_lock;
+
+ /* Sem to allow jffs2_garbage_collect_deletion_dirent to
+ drop the erase_completion_lock while it's holding a pointer
+ to an obsoleted node. I don't like this. Alternatives welcomed. */
+ struct semaphore erase_free_sem;
+
+ uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */
+
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ /* Write-behind buffer for NAND flash */
+ unsigned char *wbuf;
+ unsigned char *oobbuf;
+ uint32_t wbuf_ofs;
+ uint32_t wbuf_len;
+ struct jffs2_inodirty *wbuf_inodes;
+
+ struct rw_semaphore wbuf_sem; /* Protects the write buffer */
+
+ /* Information about out-of-band area usage... */
+ struct nand_ecclayout *ecclayout;
+ uint32_t badblock_pos;
+ uint32_t fsdata_pos;
+ uint32_t fsdata_len;
+#endif
+
+ struct jffs2_summary *summary; /* Summary information */
+
+#ifdef CONFIG_JFFS2_FS_XATTR
+#define XATTRINDEX_HASHSIZE (57)
+ uint32_t highest_xid;
+ struct list_head xattrindex[XATTRINDEX_HASHSIZE];
+ struct list_head xattr_unchecked;
+ struct jffs2_xattr_ref *xref_temp;
+ struct rw_semaphore xattr_sem;
+ uint32_t xdatum_mem_usage;
+ uint32_t xdatum_mem_threshold;
+#endif
+ /* OS-private pointer for getting back to master superblock info */
+ void *os_priv;
+};
+
+#endif /* _JFFS2_FB_SB */
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 036cbd11c00..4889d0700c0 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -26,6 +26,10 @@ static kmem_cache_t *tmp_dnode_info_slab;
static kmem_cache_t *raw_node_ref_slab;
static kmem_cache_t *node_frag_slab;
static kmem_cache_t *inode_cache_slab;
+#ifdef CONFIG_JFFS2_FS_XATTR
+static kmem_cache_t *xattr_datum_cache;
+static kmem_cache_t *xattr_ref_cache;
+#endif
int __init jffs2_create_slab_caches(void)
{
@@ -53,8 +57,8 @@ int __init jffs2_create_slab_caches(void)
if (!tmp_dnode_info_slab)
goto err;
- raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref",
- sizeof(struct jffs2_raw_node_ref),
+ raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
+ sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
0, 0, NULL, NULL);
if (!raw_node_ref_slab)
goto err;
@@ -68,8 +72,24 @@ int __init jffs2_create_slab_caches(void)
inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
sizeof(struct jffs2_inode_cache),
0, 0, NULL, NULL);
- if (inode_cache_slab)
- return 0;
+ if (!inode_cache_slab)
+ goto err;
+
+#ifdef CONFIG_JFFS2_FS_XATTR
+ xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
+ sizeof(struct jffs2_xattr_datum),
+ 0, 0, NULL, NULL);
+ if (!xattr_datum_cache)
+ goto err;
+
+ xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
+ sizeof(struct jffs2_xattr_ref),
+ 0, 0, NULL, NULL);
+ if (!xattr_ref_cache)
+ goto err;
+#endif
+
+ return 0;
err:
jffs2_destroy_slab_caches();
return -ENOMEM;
@@ -91,6 +111,12 @@ void jffs2_destroy_slab_caches(void)
kmem_cache_destroy(node_frag_slab);
if(inode_cache_slab)
kmem_cache_destroy(inode_cache_slab);
+#ifdef CONFIG_JFFS2_FS_XATTR
+ if (xattr_datum_cache)
+ kmem_cache_destroy(xattr_datum_cache);
+ if (xattr_ref_cache)
+ kmem_cache_destroy(xattr_ref_cache);
+#endif
}
struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
@@ -164,15 +190,65 @@ void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
kmem_cache_free(tmp_dnode_info_slab, x);
}
-struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void)
+struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
{
struct jffs2_raw_node_ref *ret;
+
ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
- dbg_memalloc("%p\n", ret);
+ if (ret) {
+ int i = 0;
+ for (i=0; i < REFS_PER_BLOCK; i++) {
+ ret[i].flash_offset = REF_EMPTY_NODE;
+ ret[i].next_in_ino = NULL;
+ }
+ ret[i].flash_offset = REF_LINK_NODE;
+ ret[i].next_in_ino = NULL;
+ }
return ret;
}
-void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x)
+int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb, int nr)
+{
+ struct jffs2_raw_node_ref **p, *ref;
+ int i = nr;
+
+ dbg_memalloc("%d\n", nr);
+
+ p = &jeb->last_node;
+ ref = *p;
+
+ dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
+
+ /* If jeb->last_node is really a valid node then skip over it */
+ if (ref && ref->flash_offset != REF_EMPTY_NODE)
+ ref++;
+
+ while (i) {
+ if (!ref) {
+ dbg_memalloc("Allocating new refblock linked from %p\n", p);
+ ref = *p = jffs2_alloc_refblock();
+ if (!ref)
+ return -ENOMEM;
+ }
+ if (ref->flash_offset == REF_LINK_NODE) {
+ p = &ref->next_in_ino;
+ ref = *p;
+ continue;
+ }
+ i--;
+ ref++;
+ }
+ jeb->allocated_refs = nr;
+
+ dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
+ nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
+ jeb->last_node->next_in_ino);
+
+ return 0;
+}
+
+void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
{
dbg_memalloc("%p\n", x);
kmem_cache_free(raw_node_ref_slab, x);
@@ -205,3 +281,40 @@ void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
dbg_memalloc("%p\n", x);
kmem_cache_free(inode_cache_slab, x);
}
+
+#ifdef CONFIG_JFFS2_FS_XATTR
+struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
+{
+ struct jffs2_xattr_datum *xd;
+ xd = kmem_cache_alloc(xattr_datum_cache, GFP_KERNEL);
+ dbg_memalloc("%p\n", xd);
+
+ memset(xd, 0, sizeof(struct jffs2_xattr_datum));
+ xd->class = RAWNODE_CLASS_XATTR_DATUM;
+ INIT_LIST_HEAD(&xd->xindex);
+ return xd;
+}
+
+void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
+{
+ dbg_memalloc("%p\n", xd);
+ kmem_cache_free(xattr_datum_cache, xd);
+}
+
+struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
+{
+ struct jffs2_xattr_ref *ref;
+ ref = kmem_cache_alloc(xattr_ref_cache, GFP_KERNEL);
+ dbg_memalloc("%p\n", ref);
+
+ memset(ref, 0, sizeof(struct jffs2_xattr_ref));
+ ref->class = RAWNODE_CLASS_XATTR_REF;
+ return ref;
+}
+
+void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
+{
+ dbg_memalloc("%p\n", ref);
+ kmem_cache_free(xattr_ref_cache, ref);
+}
+#endif
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 1d46677afd1..927dfe42ba7 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -438,8 +438,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
if (c->mtd->point) {
err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
if (!err && retlen < tn->csize) {
- JFFS2_WARNING("MTD point returned len too short: %zu "
- "instead of %u.\n", retlen, tn->csize);
+ JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
c->mtd->unpoint(c->mtd, buffer, ofs, len);
} else if (err)
JFFS2_WARNING("MTD point failed: error code %d.\n", err);
@@ -462,8 +461,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
}
if (retlen != len) {
- JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n",
- ofs, retlen, len);
+ JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
err = -EIO;
goto free_out;
}
@@ -940,6 +938,7 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c)
this = c->inocache_list[i];
while (this) {
next = this->next;
+ jffs2_xattr_free_inode(c, this);
jffs2_free_inode_cache(this);
this = next;
}
@@ -954,9 +953,13 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
for (i=0; i<c->nr_blocks; i++) {
this = c->blocks[i].first_node;
- while(this) {
- next = this->next_phys;
- jffs2_free_raw_node_ref(this);
+ while (this) {
+ if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE)
+ next = this[REFS_PER_BLOCK].next_in_ino;
+ else
+ next = NULL;
+
+ jffs2_free_refblock(this);
this = next;
}
c->blocks[i].first_node = c->blocks[i].last_node = NULL;
@@ -1047,3 +1050,169 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
cond_resched();
}
}
+
+struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb,
+ uint32_t ofs, uint32_t len,
+ struct jffs2_inode_cache *ic)
+{
+ struct jffs2_raw_node_ref *ref;
+
+ BUG_ON(!jeb->allocated_refs);
+ jeb->allocated_refs--;
+
+ ref = jeb->last_node;
+
+ dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset,
+ ref->next_in_ino);
+
+ while (ref->flash_offset != REF_EMPTY_NODE) {
+ if (ref->flash_offset == REF_LINK_NODE)
+ ref = ref->next_in_ino;
+ else
+ ref++;
+ }
+
+ dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref,
+ ref->flash_offset, ofs, ref->next_in_ino, len);
+
+ ref->flash_offset = ofs;
+
+ if (!jeb->first_node) {
+ jeb->first_node = ref;
+ BUG_ON(ref_offset(ref) != jeb->offset);
+ } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) {
+ uint32_t last_len = ref_totlen(c, jeb, jeb->last_node);
+
+ JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n",
+ ref, ref_offset(ref), ref_offset(ref)+len,
+ ref_offset(jeb->last_node),
+ ref_offset(jeb->last_node)+last_len);
+ BUG();
+ }
+ jeb->last_node = ref;
+
+ if (ic) {
+ ref->next_in_ino = ic->nodes;
+ ic->nodes = ref;
+ } else {
+ ref->next_in_ino = NULL;
+ }
+
+ switch(ref_flags(ref)) {
+ case REF_UNCHECKED:
+ c->unchecked_size += len;
+ jeb->unchecked_size += len;
+ break;
+
+ case REF_NORMAL:
+ case REF_PRISTINE:
+ c->used_size += len;
+ jeb->used_size += len;
+ break;
+
+ case REF_OBSOLETE:
+ c->dirty_size += len;
+ jeb->dirty_size += len;
+ break;
+ }
+ c->free_size -= len;
+ jeb->free_size -= len;
+
+#ifdef TEST_TOTLEN
+ /* Set (and test) __totlen field... for now */
+ ref->__totlen = len;
+ ref_totlen(c, jeb, ref);
+#endif
+ return ref;
+}
+
+/* No locking, no reservation of 'ref'. Do not use on a live file system */
+int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ uint32_t size)
+{
+ if (!size)
+ return 0;
+ if (unlikely(size > jeb->free_size)) {
+ printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
+ size, jeb->free_size, jeb->wasted_size);
+ BUG();
+ }
+ /* REF_EMPTY_NODE is !obsolete, so that works OK */
+ if (jeb->last_node && ref_obsolete(jeb->last_node)) {
+#ifdef TEST_TOTLEN
+ jeb->last_node->__totlen += size;
+#endif
+ c->dirty_size += size;
+ c->free_size -= size;
+ jeb->dirty_size += size;
+ jeb->free_size -= size;
+ } else {
+ uint32_t ofs = jeb->offset + c->sector_size - jeb->free_size;
+ ofs |= REF_OBSOLETE;
+
+ jffs2_link_node_ref(c, jeb, ofs, size, NULL);
+ }
+
+ return 0;
+}
+
+/* Calculate totlen from surrounding nodes or eraseblock */
+static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_node_ref *ref)
+{
+ uint32_t ref_end;
+ struct jffs2_raw_node_ref *next_ref = ref_next(ref);
+
+ if (next_ref)
+ ref_end = ref_offset(next_ref);
+ else {
+ if (!jeb)
+ jeb = &c->blocks[ref->flash_offset / c->sector_size];
+
+ /* Last node in block. Use free_space */
+ if (unlikely(ref != jeb->last_node)) {
+ printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
+ ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0);
+ BUG();
+ }
+ ref_end = jeb->offset + c->sector_size - jeb->free_size;
+ }
+ return ref_end - ref_offset(ref);
+}
+
+uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_node_ref *ref)
+{
+ uint32_t ret;
+
+ ret = __ref_totlen(c, jeb, ref);
+
+#ifdef TEST_TOTLEN
+ if (unlikely(ret != ref->__totlen)) {
+ if (!jeb)
+ jeb = &c->blocks[ref->flash_offset / c->sector_size];
+
+ printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
+ ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
+ ret, ref->__totlen);
+ if (ref_next(ref)) {
+ printk(KERN_CRIT "next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)),
+ ref_offset(ref_next(ref))+ref->__totlen);
+ } else
+ printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node);
+
+ printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size);
+
+#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
+ __jffs2_dbg_dump_node_refs_nolock(c, jeb);
+#endif
+
+ WARN_ON(1);
+
+ ret = ref->__totlen;
+ }
+#endif /* TEST_TOTLEN */
+ return ret;
+}
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index 23a67bb3052..b16c60bbcf6 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -18,8 +18,10 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/jffs2.h>
-#include <linux/jffs2_fs_sb.h>
-#include <linux/jffs2_fs_i.h>
+#include "jffs2_fs_sb.h"
+#include "jffs2_fs_i.h"
+#include "xattr.h"
+#include "acl.h"
#include "summary.h"
#ifdef __ECOS
@@ -75,14 +77,50 @@
struct jffs2_raw_node_ref
{
struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref
- for this inode. If this is the last, it points to the inode_cache
- for this inode instead. The inode_cache will have NULL in the first
- word so you know when you've got there :) */
- struct jffs2_raw_node_ref *next_phys;
+ for this object. If this _is_ the last, it points to the inode_cache,
+ xattr_ref or xattr_datum instead. The common part of those structures
+ has NULL in the first word. See jffs2_raw_ref_to_ic() below */
uint32_t flash_offset;
+#define TEST_TOTLEN
+#ifdef TEST_TOTLEN
uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */
+#endif
};
+#define REF_LINK_NODE ((int32_t)-1)
+#define REF_EMPTY_NODE ((int32_t)-2)
+
+/* Use blocks of about 256 bytes */
+#define REFS_PER_BLOCK ((255/sizeof(struct jffs2_raw_node_ref))-1)
+
+static inline struct jffs2_raw_node_ref *ref_next(struct jffs2_raw_node_ref *ref)
+{
+ ref++;
+
+ /* Link to another block of refs */
+ if (ref->flash_offset == REF_LINK_NODE) {
+ ref = ref->next_in_ino;
+ if (!ref)
+ return ref;
+ }
+
+ /* End of chain */
+ if (ref->flash_offset == REF_EMPTY_NODE)
+ return NULL;
+
+ return ref;
+}
+
+static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw)
+{
+ while(raw->next_in_ino)
+ raw = raw->next_in_ino;
+
+ /* NB. This can be a jffs2_xattr_datum or jffs2_xattr_ref and
+ not actually a jffs2_inode_cache. Check ->class */
+ return ((struct jffs2_inode_cache *)raw);
+}
+
/* flash_offset & 3 always has to be zero, because nodes are
always aligned at 4 bytes. So we have a couple of extra bits
to play with, which indicate the node's status; see below: */
@@ -95,6 +133,11 @@ struct jffs2_raw_node_ref
#define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE)
#define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0)
+/* NB: REF_PRISTINE for an inode-less node (ref->next_in_ino == NULL) indicates
+ it is an unknown node of type JFFS2_NODETYPE_RWCOMPAT_COPY, so it'll get
+ copied. If you need to do anything different to GC inode-less nodes, then
+ you need to modify gc.c accordingly. */
+
/* For each inode in the filesystem, we need to keep a record of
nlink, because it would be a PITA to scan the whole directory tree
at read_inode() time to calculate it, and to keep sufficient information
@@ -103,15 +146,27 @@ struct jffs2_raw_node_ref
a pointer to the first physical node which is part of this inode, too.
*/
struct jffs2_inode_cache {
+ /* First part of structure is shared with other objects which
+ can terminate the raw node refs' next_in_ino list -- which
+ currently struct jffs2_xattr_datum and struct jffs2_xattr_ref. */
+
struct jffs2_full_dirent *scan_dents; /* Used during scan to hold
temporary lists of dirents, and later must be set to
NULL to mark the end of the raw_node_ref->next_in_ino
chain. */
- struct jffs2_inode_cache *next;
struct jffs2_raw_node_ref *nodes;
+ uint8_t class; /* It's used for identification */
+
+ /* end of shared structure */
+
+ uint8_t flags;
+ uint16_t state;
uint32_t ino;
+ struct jffs2_inode_cache *next;
+#ifdef CONFIG_JFFS2_FS_XATTR
+ struct jffs2_xattr_ref *xref;
+#endif
int nlink;
- int state;
};
/* Inode states for 'state' above. We need the 'GC' state to prevent
@@ -125,8 +180,16 @@ struct jffs2_inode_cache {
#define INO_STATE_READING 5 /* In read_inode() */
#define INO_STATE_CLEARING 6 /* In clear_inode() */
+#define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
+
+#define RAWNODE_CLASS_INODE_CACHE 0
+#define RAWNODE_CLASS_XATTR_DATUM 1
+#define RAWNODE_CLASS_XATTR_REF 2
+
#define INOCACHE_HASHSIZE 128
+#define write_ofs(c) ((c)->nextblock->offset + (c)->sector_size - (c)->nextblock->free_size)
+
/*
Larger representation of a raw node, kept in-core only when the
struct inode for this particular ino is instantiated.
@@ -192,6 +255,7 @@ struct jffs2_eraseblock
uint32_t wasted_size;
uint32_t free_size; /* Note that sector_size - free_size
is the address of the first free space */
+ uint32_t allocated_refs;
struct jffs2_raw_node_ref *first_node;
struct jffs2_raw_node_ref *last_node;
@@ -203,57 +267,7 @@ static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c)
return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024);
}
-/* Calculate totlen from surrounding nodes or eraseblock */
-static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
- struct jffs2_eraseblock *jeb,
- struct jffs2_raw_node_ref *ref)
-{
- uint32_t ref_end;
-
- if (ref->next_phys)
- ref_end = ref_offset(ref->next_phys);
- else {
- if (!jeb)
- jeb = &c->blocks[ref->flash_offset / c->sector_size];
-
- /* Last node in block. Use free_space */
- BUG_ON(ref != jeb->last_node);
- ref_end = jeb->offset + c->sector_size - jeb->free_size;
- }
- return ref_end - ref_offset(ref);
-}
-
-static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
- struct jffs2_eraseblock *jeb,
- struct jffs2_raw_node_ref *ref)
-{
- uint32_t ret;
-
-#if CONFIG_JFFS2_FS_DEBUG > 0
- if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) {
- printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n",
- jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref));
- BUG();
- }
-#endif
-
-#if 1
- ret = ref->__totlen;
-#else
- /* This doesn't actually work yet */
- ret = __ref_totlen(c, jeb, ref);
- if (ret != ref->__totlen) {
- printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
- ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
- ret, ref->__totlen);
- if (!jeb)
- jeb = &c->blocks[ref->flash_offset / c->sector_size];
- jffs2_dbg_dump_node_refs_nolock(c, jeb);
- BUG();
- }
-#endif
- return ret;
-}
+#define ref_totlen(a, b, c) __jffs2_ref_totlen((a), (b), (c))
#define ALLOC_NORMAL 0 /* Normal allocation */
#define ALLOC_DELETION 1 /* Deletion node. Best to allow it */
@@ -268,13 +282,15 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
#define PAD(x) (((x)+3)&~3)
-static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw)
+static inline int jffs2_encode_dev(union jffs2_device_node *jdev, dev_t rdev)
{
- while(raw->next_in_ino) {
- raw = raw->next_in_ino;
+ if (old_valid_dev(rdev)) {
+ jdev->old = cpu_to_je16(old_encode_dev(rdev));
+ return sizeof(jdev->old);
+ } else {
+ jdev->new = cpu_to_je32(new_encode_dev(rdev));
+ return sizeof(jdev->new);
}
-
- return ((struct jffs2_inode_cache *)raw);
}
static inline struct jffs2_node_frag *frag_first(struct rb_root *root)
@@ -299,7 +315,6 @@ static inline struct jffs2_node_frag *frag_last(struct rb_root *root)
return rb_entry(node, struct jffs2_node_frag, rb);
}
-#define rb_parent(rb) ((rb)->rb_parent)
#define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
#define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
#define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
@@ -324,28 +339,44 @@ void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *t
int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn);
+struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb,
+ uint32_t ofs, uint32_t len,
+ struct jffs2_inode_cache *ic);
+extern uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_node_ref *ref);
/* nodemgmt.c */
int jffs2_thread_should_wake(struct jffs2_sb_info *c);
-int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, int prio, uint32_t sumsize);
-int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, uint32_t sumsize);
-int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new);
+struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
+ uint32_t ofs, uint32_t len,
+ struct jffs2_inode_cache *ic);
void jffs2_complete_reservation(struct jffs2_sb_info *c);
void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw);
/* write.c */
int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri);
-struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode);
-struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode);
+struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_raw_inode *ri, const unsigned char *data,
+ uint32_t datalen, int alloc_mode);
+struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_raw_dirent *rd, const unsigned char *name,
+ uint32_t namelen, int alloc_mode);
int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_raw_inode *ri, unsigned char *buf,
uint32_t offset, uint32_t writelen, uint32_t *retlen);
-int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen);
-int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f, uint32_t time);
-int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time);
+int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f,
+ struct jffs2_raw_inode *ri, const char *name, int namelen);
+int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name,
+ int namelen, struct jffs2_inode_info *dead_f, uint32_t time);
+int jffs2_do_link(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino,
+ uint8_t type, const char *name, int namelen, uint32_t time);
/* readinode.c */
@@ -368,12 +399,19 @@ struct jffs2_raw_inode *jffs2_alloc_raw_inode(void);
void jffs2_free_raw_inode(struct jffs2_raw_inode *);
struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void);
void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *);
-struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void);
-void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *);
+int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb, int nr);
+void jffs2_free_refblock(struct jffs2_raw_node_ref *);
struct jffs2_node_frag *jffs2_alloc_node_frag(void);
void jffs2_free_node_frag(struct jffs2_node_frag *);
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void);
void jffs2_free_inode_cache(struct jffs2_inode_cache *);
+#ifdef CONFIG_JFFS2_FS_XATTR
+struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void);
+void jffs2_free_xattr_datum(struct jffs2_xattr_datum *);
+struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void);
+void jffs2_free_xattr_ref(struct jffs2_xattr_ref *);
+#endif
/* gc.c */
int jffs2_garbage_collect_pass(struct jffs2_sb_info *c);
@@ -393,12 +431,14 @@ int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
uint32_t ofs, uint32_t len);
struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size);
/* build.c */
int jffs2_do_mount_fs(struct jffs2_sb_info *c);
/* erase.c */
void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count);
+void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
/* wbuf.c */
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 49127a1f045..8bedfd2ff68 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -23,13 +23,12 @@
* jffs2_reserve_space - request physical space to write nodes to flash
* @c: superblock info
* @minsize: Minimum acceptable size of allocation
- * @ofs: Returned value of node offset
* @len: Returned value of allocation length
* @prio: Allocation type - ALLOC_{NORMAL,DELETION}
*
* Requests a block of physical space on the flash. Returns zero for success
- * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
- * or other error if appropriate.
+ * and puts 'len' into the appropriate place, or returns -ENOSPC or other
+ * error if appropriate. Doesn't return len since that's
*
* If it returns zero, jffs2_reserve_space() also downs the per-filesystem
* allocation semaphore, to prevent more than one allocation from being
@@ -40,9 +39,9 @@
*/
static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
- uint32_t *ofs, uint32_t *len, uint32_t sumsize);
+ uint32_t *len, uint32_t sumsize);
-int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, int prio, uint32_t sumsize)
{
int ret = -EAGAIN;
@@ -132,19 +131,21 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs
spin_lock(&c->erase_completion_lock);
}
- ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
+ ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
}
}
spin_unlock(&c->erase_completion_lock);
+ if (!ret)
+ ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
if (ret)
up(&c->alloc_sem);
return ret;
}
-int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
- uint32_t *len, uint32_t sumsize)
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
+ uint32_t *len, uint32_t sumsize)
{
int ret = -EAGAIN;
minsize = PAD(minsize);
@@ -153,12 +154,15 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *
spin_lock(&c->erase_completion_lock);
while(ret == -EAGAIN) {
- ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
+ ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
}
}
spin_unlock(&c->erase_completion_lock);
+ if (!ret)
+ ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+
return ret;
}
@@ -259,10 +263,11 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
}
/* Called with alloc sem _and_ erase_completion_lock */
-static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize)
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ uint32_t *len, uint32_t sumsize)
{
struct jffs2_eraseblock *jeb = c->nextblock;
- uint32_t reserved_size; /* for summary information at the end of the jeb */
+ uint32_t reserved_size; /* for summary information at the end of the jeb */
int ret;
restart:
@@ -312,6 +317,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
}
} else {
if (jeb && minsize > jeb->free_size) {
+ uint32_t waste;
+
/* Skip the end of this block and file it as having some dirty space */
/* If there's a pending write to it, flush now */
@@ -324,10 +331,26 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
goto restart;
}
- c->wasted_size += jeb->free_size;
- c->free_size -= jeb->free_size;
- jeb->wasted_size += jeb->free_size;
- jeb->free_size = 0;
+ spin_unlock(&c->erase_completion_lock);
+
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ if (ret)
+ return ret;
+ /* Just lock it again and continue. Nothing much can change because
+ we hold c->alloc_sem anyway. In fact, it's not entirely clear why
+ we hold c->erase_completion_lock in the majority of this function...
+ but that's a question for another (more caffeine-rich) day. */
+ spin_lock(&c->erase_completion_lock);
+
+ waste = jeb->free_size;
+ jffs2_link_node_ref(c, jeb,
+ (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
+ waste, NULL);
+ /* FIXME: that made it count as dirty. Convert to wasted */
+ jeb->dirty_size -= waste;
+ c->dirty_size -= waste;
+ jeb->wasted_size += waste;
+ c->wasted_size += waste;
jffs2_close_nextblock(c, jeb);
jeb = NULL;
@@ -349,7 +372,6 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
}
/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
enough space */
- *ofs = jeb->offset + (c->sector_size - jeb->free_size);
*len = jeb->free_size - reserved_size;
if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
@@ -365,7 +387,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
spin_lock(&c->erase_completion_lock);
}
- D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
+ D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
+ *len, jeb->offset + (c->sector_size - jeb->free_size)));
return 0;
}
@@ -374,7 +397,6 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
* @c: superblock info
* @new: new node reference to add
* @len: length of this physical node
- * @dirty: dirty flag for new node
*
* Should only be used to report nodes for which space has been allocated
* by jffs2_reserve_space.
@@ -382,42 +404,30 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
* Must be called with the alloc_sem held.
*/
-int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
+struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
+ uint32_t ofs, uint32_t len,
+ struct jffs2_inode_cache *ic)
{
struct jffs2_eraseblock *jeb;
- uint32_t len;
+ struct jffs2_raw_node_ref *new;
- jeb = &c->blocks[new->flash_offset / c->sector_size];
- len = ref_totlen(c, jeb, new);
+ jeb = &c->blocks[ofs / c->sector_size];
- D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
+ D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
+ ofs & ~3, ofs & 3, len));
#if 1
- /* we could get some obsolete nodes after nextblock was refiled
- in wbuf.c */
- if ((c->nextblock || !ref_obsolete(new))
- &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
+ /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
+ if c->nextblock is set. Note that wbuf.c will file obsolete nodes
+ even after refiling c->nextblock */
+ if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
+ && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
printk(KERN_WARNING "argh. node added in wrong place\n");
- jffs2_free_raw_node_ref(new);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
#endif
spin_lock(&c->erase_completion_lock);
- if (!jeb->first_node)
- jeb->first_node = new;
- if (jeb->last_node)
- jeb->last_node->next_phys = new;
- jeb->last_node = new;
-
- jeb->free_size -= len;
- c->free_size -= len;
- if (ref_obsolete(new)) {
- jeb->dirty_size += len;
- c->dirty_size += len;
- } else {
- jeb->used_size += len;
- c->used_size += len;
- }
+ new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
@@ -438,7 +448,7 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r
spin_unlock(&c->erase_completion_lock);
- return 0;
+ return new;
}
@@ -470,8 +480,9 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
struct jffs2_unknown_node n;
int ret, addedsize;
size_t retlen;
+ uint32_t freed_len;
- if(!ref) {
+ if(unlikely(!ref)) {
printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
return;
}
@@ -499,32 +510,34 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
spin_lock(&c->erase_completion_lock);
+ freed_len = ref_totlen(c, jeb, ref);
+
if (ref_flags(ref) == REF_UNCHECKED) {
- D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
+ D1(if (unlikely(jeb->unchecked_size < freed_len)) {
printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
- ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+ freed_len, blocknr, ref->flash_offset, jeb->used_size);
BUG();
})
- D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
- jeb->unchecked_size -= ref_totlen(c, jeb, ref);
- c->unchecked_size -= ref_totlen(c, jeb, ref);
+ D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
+ jeb->unchecked_size -= freed_len;
+ c->unchecked_size -= freed_len;
} else {
- D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
+ D1(if (unlikely(jeb->used_size < freed_len)) {
printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
- ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+ freed_len, blocknr, ref->flash_offset, jeb->used_size);
BUG();
})
- D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
- jeb->used_size -= ref_totlen(c, jeb, ref);
- c->used_size -= ref_totlen(c, jeb, ref);
+ D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
+ jeb->used_size -= freed_len;
+ c->used_size -= freed_len;
}
// Take care, that wasted size is taken into concern
- if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
- D1(printk(KERN_DEBUG "Dirtying\n"));
- addedsize = ref_totlen(c, jeb, ref);
- jeb->dirty_size += ref_totlen(c, jeb, ref);
- c->dirty_size += ref_totlen(c, jeb, ref);
+ if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
+ D1(printk("Dirtying\n"));
+ addedsize = freed_len;
+ jeb->dirty_size += freed_len;
+ c->dirty_size += freed_len;
/* Convert wasted space to dirty, if not a bad block */
if (jeb->wasted_size) {
@@ -543,10 +556,10 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
}
}
} else {
- D1(printk(KERN_DEBUG "Wasting\n"));
+ D1(printk("Wasting\n"));
addedsize = 0;
- jeb->wasted_size += ref_totlen(c, jeb, ref);
- c->wasted_size += ref_totlen(c, jeb, ref);
+ jeb->wasted_size += freed_len;
+ c->wasted_size += freed_len;
}
ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
@@ -622,7 +635,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
/* The erase_free_sem is locked, and has been since before we marked the node obsolete
and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
- by jffs2_free_all_node_refs() in erase.c. Which is nice. */
+ by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
@@ -634,8 +647,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
goto out_erase_sem;
}
- if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
- printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
+ if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
+ printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
goto out_erase_sem;
}
if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
@@ -671,6 +684,10 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
spin_lock(&c->erase_completion_lock);
ic = jffs2_raw_ref_to_ic(ref);
+ /* It seems we should never call jffs2_mark_node_obsolete() for
+ XATTR nodes.... yet. Make sure we notice if/when we change
+ that :) */
+ BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
;
@@ -683,51 +700,6 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
spin_unlock(&c->erase_completion_lock);
}
-
- /* Merge with the next node in the physical list, if there is one
- and if it's also obsolete and if it doesn't belong to any inode */
- if (ref->next_phys && ref_obsolete(ref->next_phys) &&
- !ref->next_phys->next_in_ino) {
- struct jffs2_raw_node_ref *n = ref->next_phys;
-
- spin_lock(&c->erase_completion_lock);
-
- ref->__totlen += n->__totlen;
- ref->next_phys = n->next_phys;
- if (jeb->last_node == n) jeb->last_node = ref;
- if (jeb->gc_node == n) {
- /* gc will be happy continuing gc on this node */
- jeb->gc_node=ref;
- }
- spin_unlock(&c->erase_completion_lock);
-
- jffs2_free_raw_node_ref(n);
- }
-
- /* Also merge with the previous node in the list, if there is one
- and that one is obsolete */
- if (ref != jeb->first_node ) {
- struct jffs2_raw_node_ref *p = jeb->first_node;
-
- spin_lock(&c->erase_completion_lock);
-
- while (p->next_phys != ref)
- p = p->next_phys;
-
- if (ref_obsolete(p) && !ref->next_in_ino) {
- p->__totlen += ref->__totlen;
- if (jeb->last_node == ref) {
- jeb->last_node = p;
- }
- if (jeb->gc_node == ref) {
- /* gc will be happy continuing gc on this node */
- jeb->gc_node=p;
- }
- p->next_phys = ref->next_phys;
- jffs2_free_raw_node_ref(ref);
- }
- spin_unlock(&c->erase_completion_lock);
- }
out_erase_sem:
up(&c->erase_free_sem);
}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index d307cf54862..cd4021bcb94 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -31,9 +31,7 @@ struct kvec;
#define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode)
#define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid)
#define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid)
-
-#define JFFS2_F_I_RDEV_MIN(f) (iminor(OFNI_EDONI_2SFFJ(f)))
-#define JFFS2_F_I_RDEV_MAJ(f) (imajor(OFNI_EDONI_2SFFJ(f)))
+#define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev)
#define ITIME(sec) ((struct timespec){sec, 0})
#define I_SEC(tv) ((tv).tv_sec)
@@ -60,6 +58,10 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
f->target = NULL;
f->flags = 0;
f->usercompr = 0;
+#ifdef CONFIG_JFFS2_FS_POSIX_ACL
+ f->i_acl_access = JFFS2_ACL_NOT_CACHED;
+ f->i_acl_default = JFFS2_ACL_NOT_CACHED;
+#endif
}
@@ -90,13 +92,10 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
#define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e)
#define jffs2_wbuf_timeout NULL
#define jffs2_wbuf_process NULL
-#define jffs2_nor_ecc(c) (0)
#define jffs2_dataflash(c) (0)
-#define jffs2_nor_wbuf_flash(c) (0)
-#define jffs2_nor_ecc_flash_setup(c) (0)
-#define jffs2_nor_ecc_flash_cleanup(c) do {} while (0)
#define jffs2_dataflash_setup(c) (0)
#define jffs2_dataflash_cleanup(c) do {} while (0)
+#define jffs2_nor_wbuf_flash(c) (0)
#define jffs2_nor_wbuf_flash_setup(c) (0)
#define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0)
@@ -107,9 +106,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
#ifdef CONFIG_JFFS2_SUMMARY
#define jffs2_can_mark_obsolete(c) (0)
#else
-#define jffs2_can_mark_obsolete(c) \
- ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & (MTD_ECC|MTD_PROGRAM_REGIONS))) || \
- c->mtd->type == MTD_RAM)
+#define jffs2_can_mark_obsolete(c) (c->mtd->flags & (MTD_BIT_WRITEABLE))
#endif
#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
@@ -133,15 +130,11 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
int jffs2_nand_flash_setup(struct jffs2_sb_info *c);
void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c);
-#define jffs2_nor_ecc(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_ECC))
-int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c);
-void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c);
-
#define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH)
int jffs2_dataflash_setup(struct jffs2_sb_info *c);
void jffs2_dataflash_cleanup(struct jffs2_sb_info *c);
-#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_PROGRAM_REGIONS))
+#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE))
int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index f1695642d0f..5fec012b02e 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -66,7 +66,7 @@ static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
jffs2_free_full_dnode(tn->fn);
jffs2_free_tmp_dnode_info(tn);
- this = this->rb_parent;
+ this = rb_parent(this);
if (!this)
break;
@@ -116,19 +116,42 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r
uint32_t *latest_mctime, uint32_t *mctime_ver)
{
struct jffs2_full_dirent *fd;
+ uint32_t crc;
- /* The direntry nodes are checked during the flash scanning */
- BUG_ON(ref_flags(ref) == REF_UNCHECKED);
/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
BUG_ON(ref_obsolete(ref));
- /* Sanity check */
- if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
- JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
- ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
+ crc = crc32(0, rd, sizeof(*rd) - 8);
+ if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
+ JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
+ ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
return 1;
}
+ /* If we've never checked the CRCs on this node, check them now */
+ if (ref_flags(ref) == REF_UNCHECKED) {
+ struct jffs2_eraseblock *jeb;
+ int len;
+
+ /* Sanity check */
+ if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
+ JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
+ ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
+ return 1;
+ }
+
+ jeb = &c->blocks[ref->flash_offset / c->sector_size];
+ len = ref_totlen(c, jeb, ref);
+
+ spin_lock(&c->erase_completion_lock);
+ jeb->used_size += len;
+ jeb->unchecked_size -= len;
+ c->used_size += len;
+ c->unchecked_size -= len;
+ ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
+ spin_unlock(&c->erase_completion_lock);
+ }
+
fd = jffs2_alloc_full_dirent(rd->nsize + 1);
if (unlikely(!fd))
return -ENOMEM;
@@ -198,13 +221,21 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
struct jffs2_tmp_dnode_info *tn;
uint32_t len, csize;
int ret = 1;
+ uint32_t crc;
/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
BUG_ON(ref_obsolete(ref));
+ crc = crc32(0, rd, sizeof(*rd) - 8);
+ if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
+ JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
+ ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
+ return 1;
+ }
+
tn = jffs2_alloc_tmp_dnode_info();
if (!tn) {
- JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn));
+ JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
return -ENOMEM;
}
@@ -213,14 +244,6 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
/* If we've never checked the CRCs on this node, check them now */
if (ref_flags(ref) == REF_UNCHECKED) {
- uint32_t crc;
-
- crc = crc32(0, rd, sizeof(*rd) - 8);
- if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
- JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
- ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
- goto free_out;
- }
/* Sanity checks */
if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
@@ -343,7 +366,7 @@ free_out:
* Helper function for jffs2_get_inode_nodes().
* It is called every time an unknown node is found.
*
- * Returns: 0 on succes;
+ * Returns: 0 on success;
* 1 if the node should be marked obsolete;
* negative error code on failure.
*/
@@ -354,37 +377,30 @@ static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_re
un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
- if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) {
- /* Hmmm. This should have been caught at scan time. */
- JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref));
- jffs2_dbg_dump_node(c, ref_offset(ref));
- return 1;
- } else {
- switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
+ switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
- case JFFS2_FEATURE_INCOMPAT:
- JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
- je16_to_cpu(un->nodetype), ref_offset(ref));
- /* EEP */
- BUG();
- break;
+ case JFFS2_FEATURE_INCOMPAT:
+ JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ /* EEP */
+ BUG();
+ break;
- case JFFS2_FEATURE_ROCOMPAT:
- JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
- je16_to_cpu(un->nodetype), ref_offset(ref));
- BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
- break;
+ case JFFS2_FEATURE_ROCOMPAT:
+ JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
+ break;
- case JFFS2_FEATURE_RWCOMPAT_COPY:
- JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
- je16_to_cpu(un->nodetype), ref_offset(ref));
- break;
+ case JFFS2_FEATURE_RWCOMPAT_COPY:
+ JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ break;
- case JFFS2_FEATURE_RWCOMPAT_DELETE:
- JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
- je16_to_cpu(un->nodetype), ref_offset(ref));
- return 1;
- }
+ case JFFS2_FEATURE_RWCOMPAT_DELETE:
+ JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ return 1;
}
return 0;
@@ -434,7 +450,7 @@ static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
}
if (retlen < len) {
- JFFS2_ERROR("short read at %#08x: %d instead of %d.\n",
+ JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
offs, retlen, len);
return -EIO;
}
@@ -542,13 +558,25 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf
}
if (retlen < len) {
- JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len);
+ JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
err = -EIO;
goto free_out;
}
node = (union jffs2_node_union *)bufstart;
+ /* No need to mask in the valid bit; it shouldn't be invalid */
+ if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
+ JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
+ ref_offset(ref), je16_to_cpu(node->u.magic),
+ je16_to_cpu(node->u.nodetype),
+ je32_to_cpu(node->u.totlen),
+ je32_to_cpu(node->u.hdr_crc));
+ jffs2_dbg_dump_node(c, ref_offset(ref));
+ jffs2_mark_node_obsolete(c, ref);
+ goto cont;
+ }
+
switch (je16_to_cpu(node->u.nodetype)) {
case JFFS2_NODETYPE_DIRENT:
@@ -606,6 +634,7 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf
goto free_out;
}
+ cont:
spin_lock(&c->erase_completion_lock);
}
@@ -679,12 +708,12 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
jffs2_mark_node_obsolete(c, fn->raw);
BUG_ON(rb->rb_left);
- if (rb->rb_parent && rb->rb_parent->rb_left == rb) {
+ if (rb_parent(rb) && rb_parent(rb)->rb_left == rb) {
/* We were then left-hand child of our parent. We need
* to move our own right-hand child into our place. */
repl_rb = rb->rb_right;
if (repl_rb)
- repl_rb->rb_parent = rb->rb_parent;
+ rb_set_parent(repl_rb, rb_parent(rb));
} else
repl_rb = NULL;
@@ -692,14 +721,14 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
/* Remove the spent tn from the tree; don't bother rebalancing
* but put our right-hand child in our own place. */
- if (tn->rb.rb_parent) {
- if (tn->rb.rb_parent->rb_left == &tn->rb)
- tn->rb.rb_parent->rb_left = repl_rb;
- else if (tn->rb.rb_parent->rb_right == &tn->rb)
- tn->rb.rb_parent->rb_right = repl_rb;
+ if (rb_parent(&tn->rb)) {
+ if (rb_parent(&tn->rb)->rb_left == &tn->rb)
+ rb_parent(&tn->rb)->rb_left = repl_rb;
+ else if (rb_parent(&tn->rb)->rb_right == &tn->rb)
+ rb_parent(&tn->rb)->rb_right = repl_rb;
else BUG();
} else if (tn->rb.rb_right)
- tn->rb.rb_right->rb_parent = NULL;
+ rb_set_parent(tn->rb.rb_right, NULL);
jffs2_free_tmp_dnode_info(tn);
if (ret) {
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index cf55b221fc2..61618080b86 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -65,6 +65,28 @@ static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
return DEFAULT_EMPTY_SCAN_SIZE;
}
+static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ int ret;
+
+ if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
+ return ret;
+ if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
+ return ret;
+ /* Turned wasted size into dirty, since we apparently
+ think it's recoverable now. */
+ jeb->dirty_size += jeb->wasted_size;
+ c->dirty_size += jeb->wasted_size;
+ c->wasted_size -= jeb->wasted_size;
+ jeb->wasted_size = 0;
+ if (VERYDIRTY(c, jeb->dirty_size)) {
+ list_add(&jeb->list, &c->very_dirty_list);
+ } else {
+ list_add(&jeb->list, &c->dirty_list);
+ }
+ return 0;
+}
+
int jffs2_scan_medium(struct jffs2_sb_info *c)
{
int i, ret;
@@ -170,34 +192,20 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
(!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
/* Better candidate for the next writes to go to */
if (c->nextblock) {
- c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
- c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
- c->free_size -= c->nextblock->free_size;
- c->wasted_size -= c->nextblock->wasted_size;
- c->nextblock->free_size = c->nextblock->wasted_size = 0;
- if (VERYDIRTY(c, c->nextblock->dirty_size)) {
- list_add(&c->nextblock->list, &c->very_dirty_list);
- } else {
- list_add(&c->nextblock->list, &c->dirty_list);
- }
+ ret = file_dirty(c, c->nextblock);
+ if (ret)
+ return ret;
/* deleting summary information of the old nextblock */
jffs2_sum_reset_collected(c->summary);
}
- /* update collected summary infromation for the current nextblock */
+ /* update collected summary information for the current nextblock */
jffs2_sum_move_collected(c, s);
D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
c->nextblock = jeb;
} else {
- jeb->dirty_size += jeb->free_size + jeb->wasted_size;
- c->dirty_size += jeb->free_size + jeb->wasted_size;
- c->free_size -= jeb->free_size;
- c->wasted_size -= jeb->wasted_size;
- jeb->free_size = jeb->wasted_size = 0;
- if (VERYDIRTY(c, jeb->dirty_size)) {
- list_add(&jeb->list, &c->very_dirty_list);
- } else {
- list_add(&jeb->list, &c->dirty_list);
- }
+ ret = file_dirty(c, jeb);
+ if (ret)
+ return ret;
}
break;
@@ -222,9 +230,6 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
}
}
- if (jffs2_sum_active() && s)
- kfree(s);
-
/* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
if (c->nextblock && (c->nextblock->dirty_size)) {
c->nextblock->wasted_size += c->nextblock->dirty_size;
@@ -242,11 +247,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
skip));
- c->nextblock->wasted_size += skip;
- c->wasted_size += skip;
-
- c->nextblock->free_size -= skip;
- c->free_size -= skip;
+ jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+ jffs2_scan_dirty_space(c, c->nextblock, skip);
}
#endif
if (c->nr_erasing_blocks) {
@@ -266,6 +268,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
else
c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
#endif
+ if (s)
+ kfree(s);
+
return ret;
}
@@ -290,7 +295,7 @@ int jffs2_fill_scan_buf (struct jffs2_sb_info *c, void *buf,
int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
- && (!jeb->first_node || !jeb->first_node->next_phys) )
+ && (!jeb->first_node || !ref_next(jeb->first_node)) )
return BLK_STATE_CLEANMARKER;
/* move blocks with max 4 byte dirty space to cleanlist */
@@ -306,11 +311,119 @@ int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *je
return BLK_STATE_ALLDIRTY;
}
+#ifdef CONFIG_JFFS2_FS_XATTR
+static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_xattr *rx, uint32_t ofs,
+ struct jffs2_summary *s)
+{
+ struct jffs2_xattr_datum *xd;
+ uint32_t totlen, crc;
+ int err;
+
+ crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
+ if (crc != je32_to_cpu(rx->node_crc)) {
+ if (je32_to_cpu(rx->node_crc) != 0xffffffff)
+ JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ ofs, je32_to_cpu(rx->node_crc), crc);
+ if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
+ return err;
+ return 0;
+ }
+
+ totlen = PAD(sizeof(*rx) + rx->name_len + 1 + je16_to_cpu(rx->value_len));
+ if (totlen != je32_to_cpu(rx->totlen)) {
+ JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
+ ofs, je32_to_cpu(rx->totlen), totlen);
+ if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
+ return err;
+ return 0;
+ }
+
+ xd = jffs2_setup_xattr_datum(c, je32_to_cpu(rx->xid), je32_to_cpu(rx->version));
+ if (IS_ERR(xd)) {
+ if (PTR_ERR(xd) == -EEXIST) {
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rx->totlen)))))
+ return err;
+ return 0;
+ }
+ return PTR_ERR(xd);
+ }
+ xd->xprefix = rx->xprefix;
+ xd->name_len = rx->name_len;
+ xd->value_len = je16_to_cpu(rx->value_len);
+ xd->data_crc = je32_to_cpu(rx->data_crc);
+
+ xd->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
+ /* FIXME */ xd->node->next_in_ino = (void *)xd;
+
+ if (jffs2_sum_active())
+ jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
+ dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n",
+ ofs, xd->xid, xd->version);
+ return 0;
+}
+
+static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_xref *rr, uint32_t ofs,
+ struct jffs2_summary *s)
+{
+ struct jffs2_xattr_ref *ref;
+ uint32_t crc;
+ int err;
+
+ crc = crc32(0, rr, sizeof(*rr) - 4);
+ if (crc != je32_to_cpu(rr->node_crc)) {
+ if (je32_to_cpu(rr->node_crc) != 0xffffffff)
+ JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ ofs, je32_to_cpu(rr->node_crc), crc);
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
+ return err;
+ return 0;
+ }
+
+ if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
+ JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n",
+ ofs, je32_to_cpu(rr->totlen),
+ PAD(sizeof(struct jffs2_raw_xref)));
+ if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
+ return err;
+ return 0;
+ }
+
+ ref = jffs2_alloc_xattr_ref();
+ if (!ref)
+ return -ENOMEM;
+
+ /* BEFORE jffs2_build_xattr_subsystem() called,
+ * ref->xid is used to store 32bit xid, xd is not used
+ * ref->ino is used to store 32bit inode-number, ic is not used
+ * Thoes variables are declared as union, thus using those
+ * are exclusive. In a similar way, ref->next is temporarily
+ * used to chain all xattr_ref object. It's re-chained to
+ * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly.
+ */
+ ref->ino = je32_to_cpu(rr->ino);
+ ref->xid = je32_to_cpu(rr->xid);
+ ref->next = c->xref_temp;
+ c->xref_temp = ref;
+
+ ref->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), NULL);
+ /* FIXME */ ref->node->next_in_ino = (void *)ref;
+
+ if (jffs2_sum_active())
+ jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
+ dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n",
+ ofs, ref->xid, ref->ino);
+ return 0;
+}
+#endif
+
+/* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
+ the flash, XIP-style */
static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
+ unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
struct jffs2_unknown_node *node;
struct jffs2_unknown_node crcnode;
- struct jffs2_sum_marker *sm;
uint32_t ofs, prevofs;
uint32_t hdr_crc, buf_ofs, buf_len;
int err;
@@ -344,44 +457,75 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
#endif
if (jffs2_sum_active()) {
- sm = kmalloc(sizeof(struct jffs2_sum_marker), GFP_KERNEL);
- if (!sm) {
- return -ENOMEM;
- }
-
- err = jffs2_fill_scan_buf(c, (unsigned char *) sm, jeb->offset + c->sector_size -
- sizeof(struct jffs2_sum_marker), sizeof(struct jffs2_sum_marker));
- if (err) {
- kfree(sm);
- return err;
- }
-
- if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC ) {
- err = jffs2_sum_scan_sumnode(c, jeb, je32_to_cpu(sm->offset), &pseudo_random);
- if (err) {
- kfree(sm);
+ struct jffs2_sum_marker *sm;
+ void *sumptr = NULL;
+ uint32_t sumlen;
+
+ if (!buf_size) {
+ /* XIP case. Just look, point at the summary if it's there */
+ sm = (void *)buf + c->sector_size - sizeof(*sm);
+ if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
+ sumptr = buf + je32_to_cpu(sm->offset);
+ sumlen = c->sector_size - je32_to_cpu(sm->offset);
+ }
+ } else {
+ /* If NAND flash, read a whole page of it. Else just the end */
+ if (c->wbuf_pagesize)
+ buf_len = c->wbuf_pagesize;
+ else
+ buf_len = sizeof(*sm);
+
+ /* Read as much as we want into the _end_ of the preallocated buffer */
+ err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
+ jeb->offset + c->sector_size - buf_len,
+ buf_len);
+ if (err)
return err;
+
+ sm = (void *)buf + buf_size - sizeof(*sm);
+ if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
+ sumlen = c->sector_size - je32_to_cpu(sm->offset);
+ sumptr = buf + buf_size - sumlen;
+
+ /* Now, make sure the summary itself is available */
+ if (sumlen > buf_size) {
+ /* Need to kmalloc for this. */
+ sumptr = kmalloc(sumlen, GFP_KERNEL);
+ if (!sumptr)
+ return -ENOMEM;
+ memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
+ }
+ if (buf_len < sumlen) {
+ /* Need to read more so that the entire summary node is present */
+ err = jffs2_fill_scan_buf(c, sumptr,
+ jeb->offset + c->sector_size - sumlen,
+ sumlen - buf_len);
+ if (err)
+ return err;
+ }
}
+
}
- kfree(sm);
+ if (sumptr) {
+ err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);
- ofs = jeb->offset;
- prevofs = jeb->offset - 1;
+ if (buf_size && sumlen > buf_size)
+ kfree(sumptr);
+ /* If it returns with a real error, bail.
+ If it returns positive, that's a block classification
+ (i.e. BLK_STATE_xxx) so return that too.
+ If it returns zero, fall through to full scan. */
+ if (err)
+ return err;
+ }
}
buf_ofs = jeb->offset;
if (!buf_size) {
+ /* This is the XIP case -- we're reading _directly_ from the flash chip */
buf_len = c->sector_size;
-
- if (jffs2_sum_active()) {
- /* must reread because of summary test */
- err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
- if (err)
- return err;
- }
-
} else {
buf_len = EMPTY_SCAN_SIZE(c->sector_size);
err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
@@ -418,7 +562,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
if (ofs) {
D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
jeb->offset + ofs));
- DIRTY_SPACE(ofs);
+ if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
+ return err;
+ if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
+ return err;
}
/* Now ofs is a complete physical flash offset as it always was... */
@@ -433,6 +580,11 @@ scan_more:
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+ /* Make sure there are node refs available for use */
+ err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
+ if (err)
+ return err;
+
cond_resched();
if (ofs & 3) {
@@ -442,7 +594,8 @@ scan_more:
}
if (ofs == prevofs) {
printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
- DIRTY_SPACE(4);
+ if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
+ return err;
ofs += 4;
continue;
}
@@ -451,7 +604,8 @@ scan_more:
if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
jeb->offset, c->sector_size, ofs, sizeof(*node)));
- DIRTY_SPACE((jeb->offset + c->sector_size)-ofs);
+ if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
+ return err;
break;
}
@@ -481,7 +635,8 @@ scan_more:
if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
empty_start, ofs);
- DIRTY_SPACE(ofs-empty_start);
+ if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
+ return err;
goto scan_more;
}
@@ -494,7 +649,7 @@ scan_more:
/* If we're only checking the beginning of a block with a cleanmarker,
bail now */
if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
- c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) {
+ c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
return BLK_STATE_CLEANMARKER;
}
@@ -518,20 +673,23 @@ scan_more:
if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
- DIRTY_SPACE(4);
+ if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
+ return err;
ofs += 4;
continue;
}
if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
- DIRTY_SPACE(4);
+ if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
+ return err;
ofs += 4;
continue;
}
if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
- DIRTY_SPACE(4);
+ if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
+ return err;
ofs += 4;
continue;
}
@@ -540,7 +698,8 @@ scan_more:
noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
JFFS2_MAGIC_BITMASK, ofs,
je16_to_cpu(node->magic));
- DIRTY_SPACE(4);
+ if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
+ return err;
ofs += 4;
continue;
}
@@ -557,7 +716,8 @@ scan_more:
je32_to_cpu(node->totlen),
je32_to_cpu(node->hdr_crc),
hdr_crc);
- DIRTY_SPACE(4);
+ if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
+ return err;
ofs += 4;
continue;
}
@@ -568,7 +728,8 @@ scan_more:
printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
ofs, je32_to_cpu(node->totlen));
printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
- DIRTY_SPACE(4);
+ if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
+ return err;
ofs += 4;
continue;
}
@@ -576,7 +737,8 @@ scan_more:
if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
/* Wheee. This is an obsoleted node */
D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
- DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
+ return err;
ofs += PAD(je32_to_cpu(node->totlen));
continue;
}
@@ -614,30 +776,59 @@ scan_more:
ofs += PAD(je32_to_cpu(node->totlen));
break;
+#ifdef CONFIG_JFFS2_FS_XATTR
+ case JFFS2_NODETYPE_XATTR:
+ if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)"
+ " left to end of buf. Reading 0x%x at 0x%08x\n",
+ je32_to_cpu(node->totlen), buf_len, ofs));
+ err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+ if (err)
+ return err;
+ buf_ofs = ofs;
+ node = (void *)buf;
+ }
+ err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
+ if (err)
+ return err;
+ ofs += PAD(je32_to_cpu(node->totlen));
+ break;
+ case JFFS2_NODETYPE_XREF:
+ if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)"
+ " left to end of buf. Reading 0x%x at 0x%08x\n",
+ je32_to_cpu(node->totlen), buf_len, ofs));
+ err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+ if (err)
+ return err;
+ buf_ofs = ofs;
+ node = (void *)buf;
+ }
+ err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
+ if (err)
+ return err;
+ ofs += PAD(je32_to_cpu(node->totlen));
+ break;
+#endif /* CONFIG_JFFS2_FS_XATTR */
+
case JFFS2_NODETYPE_CLEANMARKER:
D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
- DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
+ return err;
ofs += PAD(sizeof(struct jffs2_unknown_node));
} else if (jeb->first_node) {
printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
- DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
+ return err;
ofs += PAD(sizeof(struct jffs2_unknown_node));
} else {
- struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
- if (!marker_ref) {
- printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n");
- return -ENOMEM;
- }
- marker_ref->next_in_ino = NULL;
- marker_ref->next_phys = NULL;
- marker_ref->flash_offset = ofs | REF_NORMAL;
- marker_ref->__totlen = c->cleanmarker_size;
- jeb->first_node = jeb->last_node = marker_ref;
+ jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);
- USED_SPACE(PAD(c->cleanmarker_size));
ofs += PAD(c->cleanmarker_size);
}
break;
@@ -645,7 +836,8 @@ scan_more:
case JFFS2_NODETYPE_PADDING:
if (jffs2_sum_active())
jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
- DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
+ return err;
ofs += PAD(je32_to_cpu(node->totlen));
break;
@@ -656,7 +848,8 @@ scan_more:
c->flags |= JFFS2_SB_FLAG_RO;
if (!(jffs2_is_readonly(c)))
return -EROFS;
- DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
+ return err;
ofs += PAD(je32_to_cpu(node->totlen));
break;
@@ -666,15 +859,21 @@ scan_more:
case JFFS2_FEATURE_RWCOMPAT_DELETE:
D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
- DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
+ return err;
ofs += PAD(je32_to_cpu(node->totlen));
break;
- case JFFS2_FEATURE_RWCOMPAT_COPY:
+ case JFFS2_FEATURE_RWCOMPAT_COPY: {
D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
- USED_SPACE(PAD(je32_to_cpu(node->totlen)));
+
+ jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
+
+ /* We can't summarise nodes we don't grok */
+ jffs2_sum_disable_collecting(s);
ofs += PAD(je32_to_cpu(node->totlen));
break;
+ }
}
}
}
@@ -687,9 +886,9 @@ scan_more:
}
}
- D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset,
- jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size));
-
+ D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
+ jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
+
/* mark_node_obsolete can add to wasted !! */
if (jeb->wasted_size) {
jeb->dirty_size += jeb->wasted_size;
@@ -730,9 +929,9 @@ struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uin
static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
{
- struct jffs2_raw_node_ref *raw;
struct jffs2_inode_cache *ic;
uint32_t ino = je32_to_cpu(ri->ino);
+ int err;
D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
@@ -745,12 +944,6 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
Which means that the _full_ amount of time to get to proper write mode with GC
operational may actually be _longer_ than before. Sucks to be me. */
- raw = jffs2_alloc_raw_node_ref();
- if (!raw) {
- printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n");
- return -ENOMEM;
- }
-
ic = jffs2_get_ino_cache(c, ino);
if (!ic) {
/* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
@@ -762,30 +955,17 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
ofs, je32_to_cpu(ri->node_crc), crc);
/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
- DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen)));
- jffs2_free_raw_node_ref(raw);
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(ri->totlen)))))
+ return err;
return 0;
}
ic = jffs2_scan_make_ino_cache(c, ino);
- if (!ic) {
- jffs2_free_raw_node_ref(raw);
+ if (!ic)
return -ENOMEM;
- }
}
/* Wheee. It worked */
-
- raw->flash_offset = ofs | REF_UNCHECKED;
- raw->__totlen = PAD(je32_to_cpu(ri->totlen));
- raw->next_phys = NULL;
- raw->next_in_ino = ic->nodes;
-
- ic->nodes = raw;
- if (!jeb->first_node)
- jeb->first_node = raw;
- if (jeb->last_node)
- jeb->last_node->next_phys = raw;
- jeb->last_node = raw;
+ jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
@@ -794,8 +974,6 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
pseudo_random += je32_to_cpu(ri->version);
- UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen)));
-
if (jffs2_sum_active()) {
jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
}
@@ -806,10 +984,10 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
{
- struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
struct jffs2_inode_cache *ic;
uint32_t crc;
+ int err;
D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
@@ -821,7 +999,8 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
ofs, je32_to_cpu(rd->node_crc), crc);
/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
- DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
+ return err;
return 0;
}
@@ -842,40 +1021,23 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
jffs2_free_full_dirent(fd);
/* FIXME: Why do we believe totlen? */
/* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
- DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
+ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
+ return err;
return 0;
}
- raw = jffs2_alloc_raw_node_ref();
- if (!raw) {
- jffs2_free_full_dirent(fd);
- printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n");
- return -ENOMEM;
- }
ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
if (!ic) {
jffs2_free_full_dirent(fd);
- jffs2_free_raw_node_ref(raw);
return -ENOMEM;
}
- raw->__totlen = PAD(je32_to_cpu(rd->totlen));
- raw->flash_offset = ofs | REF_PRISTINE;
- raw->next_phys = NULL;
- raw->next_in_ino = ic->nodes;
- ic->nodes = raw;
- if (!jeb->first_node)
- jeb->first_node = raw;
- if (jeb->last_node)
- jeb->last_node->next_phys = raw;
- jeb->last_node = raw;
+ fd->raw = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rd->totlen)), ic);
- fd->raw = raw;
fd->next = NULL;
fd->version = je32_to_cpu(rd->version);
fd->ino = je32_to_cpu(rd->ino);
fd->nhash = full_name_hash(fd->name, rd->nsize);
fd->type = rd->type;
- USED_SPACE(PAD(je32_to_cpu(rd->totlen)));
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
if (jffs2_sum_active()) {
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
new file mode 100644
index 00000000000..52a9894a636
--- /dev/null
+++ b/fs/jffs2/security.c
@@ -0,0 +1,82 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2006 NEC Corporation
+ *
+ * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/crc32.h>
+#include <linux/jffs2.h>
+#include <linux/xattr.h>
+#include <linux/mtd/mtd.h>
+#include <linux/security.h>
+#include "nodelist.h"
+
+/* ---- Initial Security Label Attachment -------------- */
+int jffs2_init_security(struct inode *inode, struct inode *dir)
+{
+ int rc;
+ size_t len;
+ void *value;
+ char *name;
+
+ rc = security_inode_init_security(inode, dir, &name, &value, &len);
+ if (rc) {
+ if (rc == -EOPNOTSUPP)
+ return 0;
+ return rc;
+ }
+ rc = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, value, len, 0);
+
+ kfree(name);
+ kfree(value);
+ return rc;
+}
+
+/* ---- XATTR Handler for "security.*" ----------------- */
+static int jffs2_security_getxattr(struct inode *inode, const char *name,
+ void *buffer, size_t size)
+{
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size);
+}
+
+static int jffs2_security_setxattr(struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags)
+{
+ if (!strcmp(name, ""))
+ return -EINVAL;
+
+ return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size, flags);
+}
+
+static size_t jffs2_security_listxattr(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
+{
+ size_t retlen = XATTR_SECURITY_PREFIX_LEN + name_len + 1;
+
+ if (list && retlen <= list_size) {
+ strcpy(list, XATTR_SECURITY_PREFIX);
+ strcpy(list + XATTR_SECURITY_PREFIX_LEN, name);
+ }
+
+ return retlen;
+}
+
+struct xattr_handler jffs2_security_xattr_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .list = jffs2_security_listxattr,
+ .set = jffs2_security_setxattr,
+ .get = jffs2_security_getxattr
+};
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index fb9cec61fcf..0b02fc79e4d 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -5,6 +5,7 @@
* Zoltan Sogor <weth@inf.u-szeged.hu>,
* Patrik Kluba <pajko@halom.u-szeged.hu>,
* University of Szeged, Hungary
+ * 2005 KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
@@ -81,6 +82,19 @@ static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item)
dbg_summary("dirent (%u) added to summary\n",
je32_to_cpu(item->d.ino));
break;
+#ifdef CONFIG_JFFS2_FS_XATTR
+ case JFFS2_NODETYPE_XATTR:
+ s->sum_size += JFFS2_SUMMARY_XATTR_SIZE;
+ s->sum_num++;
+ dbg_summary("xattr (xid=%u, version=%u) added to summary\n",
+ je32_to_cpu(item->x.xid), je32_to_cpu(item->x.version));
+ break;
+ case JFFS2_NODETYPE_XREF:
+ s->sum_size += JFFS2_SUMMARY_XREF_SIZE;
+ s->sum_num++;
+ dbg_summary("xref added to summary\n");
+ break;
+#endif
default:
JFFS2_WARNING("UNKNOWN node type %u\n",
je16_to_cpu(item->u.nodetype));
@@ -141,6 +155,40 @@ int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *r
return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
}
+#ifdef CONFIG_JFFS2_FS_XATTR
+int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs)
+{
+ struct jffs2_sum_xattr_mem *temp;
+
+ temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ temp->nodetype = rx->nodetype;
+ temp->xid = rx->xid;
+ temp->version = rx->version;
+ temp->offset = cpu_to_je32(ofs);
+ temp->totlen = rx->totlen;
+ temp->next = NULL;
+
+ return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
+}
+
+int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs)
+{
+ struct jffs2_sum_xref_mem *temp;
+
+ temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ temp->nodetype = rr->nodetype;
+ temp->offset = cpu_to_je32(ofs);
+ temp->next = NULL;
+
+ return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
+}
+#endif
/* Cleanup every collected summary information */
static void jffs2_sum_clean_collected(struct jffs2_summary *s)
@@ -259,7 +307,40 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
}
+#ifdef CONFIG_JFFS2_FS_XATTR
+ case JFFS2_NODETYPE_XATTR: {
+ struct jffs2_sum_xattr_mem *temp;
+ if (je32_to_cpu(node->x.version) == 0xffffffff)
+ return 0;
+ temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
+ if (!temp)
+ goto no_mem;
+
+ temp->nodetype = node->x.nodetype;
+ temp->xid = node->x.xid;
+ temp->version = node->x.version;
+ temp->totlen = node->x.totlen;
+ temp->offset = cpu_to_je32(ofs);
+ temp->next = NULL;
+
+ return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
+ }
+ case JFFS2_NODETYPE_XREF: {
+ struct jffs2_sum_xref_mem *temp;
+
+ if (je32_to_cpu(node->r.ino) == 0xffffffff
+ && je32_to_cpu(node->r.xid) == 0xffffffff)
+ return 0;
+ temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
+ if (!temp)
+ goto no_mem;
+ temp->nodetype = node->r.nodetype;
+ temp->offset = cpu_to_je32(ofs);
+ temp->next = NULL;
+ return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
+ }
+#endif
case JFFS2_NODETYPE_PADDING:
dbg_summary("node PADDING\n");
c->summary->sum_padded += je32_to_cpu(node->u.totlen);
@@ -288,23 +369,41 @@ no_mem:
return -ENOMEM;
}
+static struct jffs2_raw_node_ref *sum_link_node_ref(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb,
+ uint32_t ofs, uint32_t len,
+ struct jffs2_inode_cache *ic)
+{
+ /* If there was a gap, mark it dirty */
+ if ((ofs & ~3) > c->sector_size - jeb->free_size) {
+ /* Ew. Summary doesn't actually tell us explicitly about dirty space */
+ jffs2_scan_dirty_space(c, jeb, (ofs & ~3) - (c->sector_size - jeb->free_size));
+ }
+
+ return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic);
+}
/* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */
static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_raw_summary *summary, uint32_t *pseudo_random)
{
- struct jffs2_raw_node_ref *raw;
struct jffs2_inode_cache *ic;
struct jffs2_full_dirent *fd;
void *sp;
int i, ino;
+ int err;
sp = summary->sum;
for (i=0; i<je32_to_cpu(summary->sum_num); i++) {
dbg_summary("processing summary index %d\n", i);
+ /* Make sure there's a spare ref for dirty space */
+ err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
+ if (err)
+ return err;
+
switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) {
case JFFS2_NODETYPE_INODE: {
struct jffs2_sum_inode_flash *spi;
@@ -312,38 +411,20 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
ino = je32_to_cpu(spi->inode);
- dbg_summary("Inode at 0x%08x\n",
- jeb->offset + je32_to_cpu(spi->offset));
-
- raw = jffs2_alloc_raw_node_ref();
- if (!raw) {
- JFFS2_NOTICE("allocation of node reference failed\n");
- kfree(summary);
- return -ENOMEM;
- }
+ dbg_summary("Inode at 0x%08x-0x%08x\n",
+ jeb->offset + je32_to_cpu(spi->offset),
+ jeb->offset + je32_to_cpu(spi->offset) + je32_to_cpu(spi->totlen));
ic = jffs2_scan_make_ino_cache(c, ino);
if (!ic) {
JFFS2_NOTICE("scan_make_ino_cache failed\n");
- jffs2_free_raw_node_ref(raw);
- kfree(summary);
return -ENOMEM;
}
- raw->flash_offset = (jeb->offset + je32_to_cpu(spi->offset)) | REF_UNCHECKED;
- raw->__totlen = PAD(je32_to_cpu(spi->totlen));
- raw->next_phys = NULL;
- raw->next_in_ino = ic->nodes;
-
- ic->nodes = raw;
- if (!jeb->first_node)
- jeb->first_node = raw;
- if (jeb->last_node)
- jeb->last_node->next_phys = raw;
- jeb->last_node = raw;
- *pseudo_random += je32_to_cpu(spi->version);
+ sum_link_node_ref(c, jeb, je32_to_cpu(spi->offset) | REF_UNCHECKED,
+ PAD(je32_to_cpu(spi->totlen)), ic);
- UNCHECKED_SPACE(PAD(je32_to_cpu(spi->totlen)));
+ *pseudo_random += je32_to_cpu(spi->version);
sp += JFFS2_SUMMARY_INODE_SIZE;
@@ -354,52 +435,33 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
struct jffs2_sum_dirent_flash *spd;
spd = sp;
- dbg_summary("Dirent at 0x%08x\n",
- jeb->offset + je32_to_cpu(spd->offset));
+ dbg_summary("Dirent at 0x%08x-0x%08x\n",
+ jeb->offset + je32_to_cpu(spd->offset),
+ jeb->offset + je32_to_cpu(spd->offset) + je32_to_cpu(spd->totlen));
+
fd = jffs2_alloc_full_dirent(spd->nsize+1);
- if (!fd) {
- kfree(summary);
+ if (!fd)
return -ENOMEM;
- }
memcpy(&fd->name, spd->name, spd->nsize);
fd->name[spd->nsize] = 0;
- raw = jffs2_alloc_raw_node_ref();
- if (!raw) {
- jffs2_free_full_dirent(fd);
- JFFS2_NOTICE("allocation of node reference failed\n");
- kfree(summary);
- return -ENOMEM;
- }
-
ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino));
if (!ic) {
jffs2_free_full_dirent(fd);
- jffs2_free_raw_node_ref(raw);
- kfree(summary);
return -ENOMEM;
}
- raw->__totlen = PAD(je32_to_cpu(spd->totlen));
- raw->flash_offset = (jeb->offset + je32_to_cpu(spd->offset)) | REF_PRISTINE;
- raw->next_phys = NULL;
- raw->next_in_ino = ic->nodes;
- ic->nodes = raw;
- if (!jeb->first_node)
- jeb->first_node = raw;
- if (jeb->last_node)
- jeb->last_node->next_phys = raw;
- jeb->last_node = raw;
-
- fd->raw = raw;
+ fd->raw = sum_link_node_ref(c, jeb, je32_to_cpu(spd->offset) | REF_UNCHECKED,
+ PAD(je32_to_cpu(spd->totlen)), ic);
+
fd->next = NULL;
fd->version = je32_to_cpu(spd->version);
fd->ino = je32_to_cpu(spd->ino);
fd->nhash = full_name_hash(fd->name, spd->nsize);
fd->type = spd->type;
- USED_SPACE(PAD(je32_to_cpu(spd->totlen)));
+
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
*pseudo_random += je32_to_cpu(spd->version);
@@ -408,48 +470,105 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
break;
}
+#ifdef CONFIG_JFFS2_FS_XATTR
+ case JFFS2_NODETYPE_XATTR: {
+ struct jffs2_xattr_datum *xd;
+ struct jffs2_sum_xattr_flash *spx;
+
+ spx = (struct jffs2_sum_xattr_flash *)sp;
+ dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n",
+ jeb->offset + je32_to_cpu(spx->offset),
+ jeb->offset + je32_to_cpu(spx->offset) + je32_to_cpu(spx->totlen),
+ je32_to_cpu(spx->xid), je32_to_cpu(spx->version));
+
+ xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid),
+ je32_to_cpu(spx->version));
+ if (IS_ERR(xd)) {
+ if (PTR_ERR(xd) == -EEXIST) {
+ /* a newer version of xd exists */
+ if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(spx->totlen))))
+ return err;
+ sp += JFFS2_SUMMARY_XATTR_SIZE;
+ break;
+ }
+ JFFS2_NOTICE("allocation of xattr_datum failed\n");
+ return PTR_ERR(xd);
+ }
+
+ xd->node = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
+ PAD(je32_to_cpu(spx->totlen)), NULL);
+ /* FIXME */ xd->node->next_in_ino = (void *)xd;
+
+ *pseudo_random += je32_to_cpu(spx->xid);
+ sp += JFFS2_SUMMARY_XATTR_SIZE;
+
+ break;
+ }
+ case JFFS2_NODETYPE_XREF: {
+ struct jffs2_xattr_ref *ref;
+ struct jffs2_sum_xref_flash *spr;
+
+ spr = (struct jffs2_sum_xref_flash *)sp;
+ dbg_summary("xref at %#08x-%#08x\n",
+ jeb->offset + je32_to_cpu(spr->offset),
+ jeb->offset + je32_to_cpu(spr->offset) +
+ (uint32_t)PAD(sizeof(struct jffs2_raw_xref)));
+
+ ref = jffs2_alloc_xattr_ref();
+ if (!ref) {
+ JFFS2_NOTICE("allocation of xattr_datum failed\n");
+ return -ENOMEM;
+ }
+ ref->ino = 0xfffffffe;
+ ref->xid = 0xfffffffd;
+ ref->next = c->xref_temp;
+ c->xref_temp = ref;
+ ref->node = sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED,
+ PAD(sizeof(struct jffs2_raw_xref)), NULL);
+ /* FIXME */ ref->node->next_in_ino = (void *)ref;
+
+ *pseudo_random += ref->node->flash_offset;
+ sp += JFFS2_SUMMARY_XREF_SIZE;
+
+ break;
+ }
+#endif
default : {
- JFFS2_WARNING("Unsupported node type found in summary! Exiting...");
- kfree(summary);
- return -EIO;
+ uint16_t nodetype = je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype);
+ JFFS2_WARNING("Unsupported node type %x found in summary! Exiting...\n", nodetype);
+ if ((nodetype & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_INCOMPAT)
+ return -EIO;
+
+ /* For compatible node types, just fall back to the full scan */
+ c->wasted_size -= jeb->wasted_size;
+ c->free_size += c->sector_size - jeb->free_size;
+ c->used_size -= jeb->used_size;
+ c->dirty_size -= jeb->dirty_size;
+ jeb->wasted_size = jeb->used_size = jeb->dirty_size = 0;
+ jeb->free_size = c->sector_size;
+
+ jffs2_free_jeb_node_refs(c, jeb);
+ return -ENOTRECOVERABLE;
}
}
}
-
- kfree(summary);
return 0;
}
/* Process the summary node - called from jffs2_scan_eraseblock() */
-
int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- uint32_t ofs, uint32_t *pseudo_random)
+ struct jffs2_raw_summary *summary, uint32_t sumsize,
+ uint32_t *pseudo_random)
{
struct jffs2_unknown_node crcnode;
- struct jffs2_raw_node_ref *cache_ref;
- struct jffs2_raw_summary *summary;
- int ret, sumsize;
+ int ret, ofs;
uint32_t crc;
- sumsize = c->sector_size - ofs;
- ofs += jeb->offset;
+ ofs = c->sector_size - sumsize;
dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n",
- jeb->offset, ofs, sumsize);
-
- summary = kmalloc(sumsize, GFP_KERNEL);
-
- if (!summary) {
- return -ENOMEM;
- }
-
- ret = jffs2_fill_scan_buf(c, (unsigned char *)summary, ofs, sumsize);
-
- if (ret) {
- kfree(summary);
- return ret;
- }
+ jeb->offset, jeb->offset + ofs, sumsize);
/* OK, now check for node validity and CRC */
crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -486,66 +605,49 @@ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
dbg_summary("Summary : CLEANMARKER node \n");
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ if (ret)
+ return ret;
+
if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) {
dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n",
je32_to_cpu(summary->cln_mkr), c->cleanmarker_size);
- UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr)));
+ if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr)))))
+ return ret;
} else if (jeb->first_node) {
dbg_summary("CLEANMARKER node not first node in block "
"(0x%08x)\n", jeb->offset);
- UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr)));
+ if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr)))))
+ return ret;
} else {
- struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
-
- if (!marker_ref) {
- JFFS2_NOTICE("Failed to allocate node ref for clean marker\n");
- kfree(summary);
- return -ENOMEM;
- }
-
- marker_ref->next_in_ino = NULL;
- marker_ref->next_phys = NULL;
- marker_ref->flash_offset = jeb->offset | REF_NORMAL;
- marker_ref->__totlen = je32_to_cpu(summary->cln_mkr);
- jeb->first_node = jeb->last_node = marker_ref;
-
- USED_SPACE( PAD(je32_to_cpu(summary->cln_mkr)) );
+ jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL,
+ je32_to_cpu(summary->cln_mkr), NULL);
}
}
- if (je32_to_cpu(summary->padded)) {
- DIRTY_SPACE(je32_to_cpu(summary->padded));
- }
-
ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random);
+ /* -ENOTRECOVERABLE isn't a fatal error -- it means we should do a full
+ scan of this eraseblock. So return zero */
+ if (ret == -ENOTRECOVERABLE)
+ return 0;
if (ret)
- return ret;
+ return ret; /* real error */
/* for PARANOIA_CHECK */
- cache_ref = jffs2_alloc_raw_node_ref();
-
- if (!cache_ref) {
- JFFS2_NOTICE("Failed to allocate node ref for cache\n");
- return -ENOMEM;
- }
-
- cache_ref->next_in_ino = NULL;
- cache_ref->next_phys = NULL;
- cache_ref->flash_offset = ofs | REF_NORMAL;
- cache_ref->__totlen = sumsize;
-
- if (!jeb->first_node)
- jeb->first_node = cache_ref;
- if (jeb->last_node)
- jeb->last_node->next_phys = cache_ref;
- jeb->last_node = cache_ref;
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 2);
+ if (ret)
+ return ret;
- USED_SPACE(sumsize);
+ sum_link_node_ref(c, jeb, ofs | REF_NORMAL, sumsize, NULL);
- jeb->wasted_size += jeb->free_size;
- c->wasted_size += jeb->free_size;
- c->free_size -= jeb->free_size;
- jeb->free_size = 0;
+ if (unlikely(jeb->free_size)) {
+ JFFS2_WARNING("Free size 0x%x bytes in eraseblock @0x%08x with summary?\n",
+ jeb->free_size, jeb->offset);
+ jeb->wasted_size += jeb->free_size;
+ c->wasted_size += jeb->free_size;
+ c->free_size -= jeb->free_size;
+ jeb->free_size = 0;
+ }
return jffs2_scan_classify_jeb(c, jeb);
@@ -564,6 +666,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
union jffs2_sum_mem *temp;
struct jffs2_sum_marker *sm;
struct kvec vecs[2];
+ uint32_t sum_ofs;
void *wpage;
int ret;
size_t retlen;
@@ -581,16 +684,17 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
wpage = c->summary->sum_buf;
while (c->summary->sum_num) {
+ temp = c->summary->sum_list_head;
- switch (je16_to_cpu(c->summary->sum_list_head->u.nodetype)) {
+ switch (je16_to_cpu(temp->u.nodetype)) {
case JFFS2_NODETYPE_INODE: {
struct jffs2_sum_inode_flash *sino_ptr = wpage;
- sino_ptr->nodetype = c->summary->sum_list_head->i.nodetype;
- sino_ptr->inode = c->summary->sum_list_head->i.inode;
- sino_ptr->version = c->summary->sum_list_head->i.version;
- sino_ptr->offset = c->summary->sum_list_head->i.offset;
- sino_ptr->totlen = c->summary->sum_list_head->i.totlen;
+ sino_ptr->nodetype = temp->i.nodetype;
+ sino_ptr->inode = temp->i.inode;
+ sino_ptr->version = temp->i.version;
+ sino_ptr->offset = temp->i.offset;
+ sino_ptr->totlen = temp->i.totlen;
wpage += JFFS2_SUMMARY_INODE_SIZE;
@@ -600,30 +704,60 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
case JFFS2_NODETYPE_DIRENT: {
struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage;
- sdrnt_ptr->nodetype = c->summary->sum_list_head->d.nodetype;
- sdrnt_ptr->totlen = c->summary->sum_list_head->d.totlen;
- sdrnt_ptr->offset = c->summary->sum_list_head->d.offset;
- sdrnt_ptr->pino = c->summary->sum_list_head->d.pino;
- sdrnt_ptr->version = c->summary->sum_list_head->d.version;
- sdrnt_ptr->ino = c->summary->sum_list_head->d.ino;
- sdrnt_ptr->nsize = c->summary->sum_list_head->d.nsize;
- sdrnt_ptr->type = c->summary->sum_list_head->d.type;
+ sdrnt_ptr->nodetype = temp->d.nodetype;
+ sdrnt_ptr->totlen = temp->d.totlen;
+ sdrnt_ptr->offset = temp->d.offset;
+ sdrnt_ptr->pino = temp->d.pino;
+ sdrnt_ptr->version = temp->d.version;
+ sdrnt_ptr->ino = temp->d.ino;
+ sdrnt_ptr->nsize = temp->d.nsize;
+ sdrnt_ptr->type = temp->d.type;
- memcpy(sdrnt_ptr->name, c->summary->sum_list_head->d.name,
- c->summary->sum_list_head->d.nsize);
+ memcpy(sdrnt_ptr->name, temp->d.name,
+ temp->d.nsize);
- wpage += JFFS2_SUMMARY_DIRENT_SIZE(c->summary->sum_list_head->d.nsize);
+ wpage += JFFS2_SUMMARY_DIRENT_SIZE(temp->d.nsize);
break;
}
+#ifdef CONFIG_JFFS2_FS_XATTR
+ case JFFS2_NODETYPE_XATTR: {
+ struct jffs2_sum_xattr_flash *sxattr_ptr = wpage;
+
+ temp = c->summary->sum_list_head;
+ sxattr_ptr->nodetype = temp->x.nodetype;
+ sxattr_ptr->xid = temp->x.xid;
+ sxattr_ptr->version = temp->x.version;
+ sxattr_ptr->offset = temp->x.offset;
+ sxattr_ptr->totlen = temp->x.totlen;
+
+ wpage += JFFS2_SUMMARY_XATTR_SIZE;
+ break;
+ }
+ case JFFS2_NODETYPE_XREF: {
+ struct jffs2_sum_xref_flash *sxref_ptr = wpage;
+ temp = c->summary->sum_list_head;
+ sxref_ptr->nodetype = temp->r.nodetype;
+ sxref_ptr->offset = temp->r.offset;
+
+ wpage += JFFS2_SUMMARY_XREF_SIZE;
+ break;
+ }
+#endif
default : {
- BUG(); /* unknown node in summary information */
+ if ((je16_to_cpu(temp->u.nodetype) & JFFS2_COMPAT_MASK)
+ == JFFS2_FEATURE_RWCOMPAT_COPY) {
+ dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
+ je16_to_cpu(temp->u.nodetype));
+ jffs2_sum_disable_collecting(c->summary);
+ } else {
+ BUG(); /* unknown node in summary information */
+ }
}
}
- temp = c->summary->sum_list_head;
- c->summary->sum_list_head = c->summary->sum_list_head->u.next;
+ c->summary->sum_list_head = temp->u.next;
kfree(temp);
c->summary->sum_num--;
@@ -645,25 +779,34 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
vecs[1].iov_base = c->summary->sum_buf;
vecs[1].iov_len = datasize;
- dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n",
- jeb->offset + c->sector_size - jeb->free_size);
+ sum_ofs = jeb->offset + c->sector_size - jeb->free_size;
- spin_unlock(&c->erase_completion_lock);
- ret = jffs2_flash_writev(c, vecs, 2, jeb->offset + c->sector_size -
- jeb->free_size, &retlen, 0);
- spin_lock(&c->erase_completion_lock);
+ dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n",
+ sum_ofs);
+ ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0);
if (ret || (retlen != infosize)) {
- JFFS2_WARNING("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
- infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen);
+
+ JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n",
+ infosize, sum_ofs, ret, retlen);
+
+ if (retlen) {
+ /* Waste remaining space */
+ spin_lock(&c->erase_completion_lock);
+ jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL);
+ spin_unlock(&c->erase_completion_lock);
+ }
c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
- WASTED_SPACE(infosize);
- return 1;
+ return 0;
}
+ spin_lock(&c->erase_completion_lock);
+ jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL);
+ spin_unlock(&c->erase_completion_lock);
+
return 0;
}
@@ -671,13 +814,16 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
{
- struct jffs2_raw_node_ref *summary_ref;
- int datasize, infosize, padsize, ret;
+ int datasize, infosize, padsize;
struct jffs2_eraseblock *jeb;
+ int ret;
dbg_summary("called\n");
+ spin_unlock(&c->erase_completion_lock);
+
jeb = c->nextblock;
+ jffs2_prealloc_raw_node_refs(c, jeb, 1);
if (!c->summary->sum_num || !c->summary->sum_list_head) {
JFFS2_WARNING("Empty summary info!!!\n");
@@ -696,35 +842,11 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
jffs2_sum_disable_collecting(c->summary);
JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize);
+ spin_lock(&c->erase_completion_lock);
return 0;
}
ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize);
- if (ret)
- return 0; /* can't write out summary, block is marked as NOSUM_SIZE */
-
- /* for ACCT_PARANOIA_CHECK */
- spin_unlock(&c->erase_completion_lock);
- summary_ref = jffs2_alloc_raw_node_ref();
spin_lock(&c->erase_completion_lock);
-
- if (!summary_ref) {
- JFFS2_NOTICE("Failed to allocate node ref for summary\n");
- return -ENOMEM;
- }
-
- summary_ref->next_in_ino = NULL;
- summary_ref->next_phys = NULL;
- summary_ref->flash_offset = (jeb->offset + c->sector_size - jeb->free_size) | REF_NORMAL;
- summary_ref->__totlen = infosize;
-
- if (!jeb->first_node)
- jeb->first_node = summary_ref;
- if (jeb->last_node)
- jeb->last_node->next_phys = summary_ref;
- jeb->last_node = summary_ref;
-
- USED_SPACE(infosize);
-
- return 0;
+ return ret;
}
diff --git a/fs/jffs2/summary.h b/fs/jffs2/summary.h
index b7a678be170..6bf1f6aa455 100644
--- a/fs/jffs2/summary.h
+++ b/fs/jffs2/summary.h
@@ -18,23 +18,6 @@
#include <linux/uio.h>
#include <linux/jffs2.h>
-#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
- c->free_size -= _x; c->dirty_size += _x; \
- jeb->free_size -= _x ; jeb->dirty_size += _x; \
- }while(0)
-#define USED_SPACE(x) do { typeof(x) _x = (x); \
- c->free_size -= _x; c->used_size += _x; \
- jeb->free_size -= _x ; jeb->used_size += _x; \
- }while(0)
-#define WASTED_SPACE(x) do { typeof(x) _x = (x); \
- c->free_size -= _x; c->wasted_size += _x; \
- jeb->free_size -= _x ; jeb->wasted_size += _x; \
- }while(0)
-#define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
- c->free_size -= _x; c->unchecked_size += _x; \
- jeb->free_size -= _x ; jeb->unchecked_size += _x; \
- }while(0)
-
#define BLK_STATE_ALLFF 0
#define BLK_STATE_CLEAN 1
#define BLK_STATE_PARTDIRTY 2
@@ -45,6 +28,8 @@
#define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff
#define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash))
#define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x))
+#define JFFS2_SUMMARY_XATTR_SIZE (sizeof(struct jffs2_sum_xattr_flash))
+#define JFFS2_SUMMARY_XREF_SIZE (sizeof(struct jffs2_sum_xref_flash))
/* Summary structures used on flash */
@@ -75,11 +60,28 @@ struct jffs2_sum_dirent_flash
uint8_t name[0]; /* dirent name */
} __attribute__((packed));
+struct jffs2_sum_xattr_flash
+{
+ jint16_t nodetype; /* == JFFS2_NODETYPE_XATR */
+ jint32_t xid; /* xattr identifier */
+ jint32_t version; /* version number */
+ jint32_t offset; /* offset on jeb */
+ jint32_t totlen; /* node length */
+} __attribute__((packed));
+
+struct jffs2_sum_xref_flash
+{
+ jint16_t nodetype; /* == JFFS2_NODETYPE_XREF */
+ jint32_t offset; /* offset on jeb */
+} __attribute__((packed));
+
union jffs2_sum_flash
{
struct jffs2_sum_unknown_flash u;
struct jffs2_sum_inode_flash i;
struct jffs2_sum_dirent_flash d;
+ struct jffs2_sum_xattr_flash x;
+ struct jffs2_sum_xref_flash r;
};
/* Summary structures used in the memory */
@@ -114,11 +116,30 @@ struct jffs2_sum_dirent_mem
uint8_t name[0]; /* dirent name */
} __attribute__((packed));
+struct jffs2_sum_xattr_mem
+{
+ union jffs2_sum_mem *next;
+ jint16_t nodetype;
+ jint32_t xid;
+ jint32_t version;
+ jint32_t offset;
+ jint32_t totlen;
+} __attribute__((packed));
+
+struct jffs2_sum_xref_mem
+{
+ union jffs2_sum_mem *next;
+ jint16_t nodetype;
+ jint32_t offset;
+} __attribute__((packed));
+
union jffs2_sum_mem
{
struct jffs2_sum_unknown_mem u;
struct jffs2_sum_inode_mem i;
struct jffs2_sum_dirent_mem d;
+ struct jffs2_sum_xattr_mem x;
+ struct jffs2_sum_xref_mem r;
};
/* Summary related information stored in superblock */
@@ -159,8 +180,11 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c);
int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size);
int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs);
int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs);
+int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs);
+int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs);
int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- uint32_t ofs, uint32_t *pseudo_random);
+ struct jffs2_raw_summary *summary, uint32_t sumlen,
+ uint32_t *pseudo_random);
#else /* SUMMARY DISABLED */
@@ -176,7 +200,9 @@ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
#define jffs2_sum_add_padding_mem(a,b)
#define jffs2_sum_add_inode_mem(a,b,c)
#define jffs2_sum_add_dirent_mem(a,b,c)
-#define jffs2_sum_scan_sumnode(a,b,c,d) (0)
+#define jffs2_sum_add_xattr_mem(a,b,c)
+#define jffs2_sum_add_xref_mem(a,b,c)
+#define jffs2_sum_scan_sumnode(a,b,c,d,e) (0)
#endif /* CONFIG_JFFS2_SUMMARY */
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index ffd8e84b22c..9d0521451f5 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -151,7 +151,10 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
sb->s_op = &jffs2_super_operations;
sb->s_flags = flags | MS_NOATIME;
-
+ sb->s_xattr = jffs2_xattr_handlers;
+#ifdef CONFIG_JFFS2_FS_POSIX_ACL
+ sb->s_flags |= MS_POSIXACL;
+#endif
ret = jffs2_do_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (ret) {
@@ -293,6 +296,7 @@ static void jffs2_put_super (struct super_block *sb)
kfree(c->blocks);
jffs2_flash_cleanup(c);
kfree(c->inocache_list);
+ jffs2_clear_xattr_subsystem(c);
if (c->mtd->sync)
c->mtd->sync(c->mtd);
@@ -320,6 +324,18 @@ static int __init init_jffs2_fs(void)
{
int ret;
+ /* Paranoia checks for on-medium structures. If we ask GCC
+ to pack them with __attribute__((packed)) then it _also_
+ assumes that they're not aligned -- so it emits crappy
+ code on some architectures. Ideally we want an attribute
+ which means just 'no padding', without the alignment
+ thing. But GCC doesn't have that -- we have to just
+ hope the structs are the right sizes, instead. */
+ BUG_ON(sizeof(struct jffs2_unknown_node) != 12);
+ BUG_ON(sizeof(struct jffs2_raw_dirent) != 40);
+ BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
+ BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
+
printk(KERN_INFO "JFFS2 version 2.2."
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
" (NAND)"
@@ -327,7 +343,7 @@ static int __init init_jffs2_fs(void)
#ifdef CONFIG_JFFS2_SUMMARY
" (SUMMARY) "
#endif
- " (C) 2001-2003 Red Hat, Inc.\n");
+ " (C) 2001-2006 Red Hat, Inc.\n");
jffs2_inode_cachep = kmem_cache_create("jffs2_i",
sizeof(struct jffs2_inode_info),
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index d55754fe892..fc211b6e9b0 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -24,7 +24,12 @@ struct inode_operations jffs2_symlink_inode_operations =
{
.readlink = generic_readlink,
.follow_link = jffs2_follow_link,
- .setattr = jffs2_setattr
+ .permission = jffs2_permission,
+ .setattr = jffs2_setattr,
+ .setxattr = jffs2_setxattr,
+ .getxattr = jffs2_getxattr,
+ .listxattr = jffs2_listxattr,
+ .removexattr = jffs2_removexattr
};
static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 4cebf0e57c4..a7f153f79ec 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -156,69 +156,130 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
jffs2_erase_pending_trigger(c);
}
- /* Adjust its size counts accordingly */
- c->wasted_size += jeb->free_size;
- c->free_size -= jeb->free_size;
- jeb->wasted_size += jeb->free_size;
- jeb->free_size = 0;
+ if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
+ uint32_t oldfree = jeb->free_size;
+
+ jffs2_link_node_ref(c, jeb,
+ (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
+ oldfree, NULL);
+ /* convert to wasted */
+ c->wasted_size += oldfree;
+ jeb->wasted_size += oldfree;
+ c->dirty_size -= oldfree;
+ jeb->dirty_size -= oldfree;
+ }
jffs2_dbg_dump_block_lists_nolock(c);
jffs2_dbg_acct_sanity_check_nolock(c,jeb);
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
}
+static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
+ struct jffs2_inode_info *f,
+ struct jffs2_raw_node_ref *raw,
+ union jffs2_node_union *node)
+{
+ struct jffs2_node_frag *frag;
+ struct jffs2_full_dirent *fd;
+
+ dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
+ node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
+
+ BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
+ je16_to_cpu(node->u.magic) != 0);
+
+ switch (je16_to_cpu(node->u.nodetype)) {
+ case JFFS2_NODETYPE_INODE:
+ if (f->metadata && f->metadata->raw == raw) {
+ dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
+ return &f->metadata->raw;
+ }
+ frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
+ BUG_ON(!frag);
+ /* Find a frag which refers to the full_dnode we want to modify */
+ while (!frag->node || frag->node->raw != raw) {
+ frag = frag_next(frag);
+ BUG_ON(!frag);
+ }
+ dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
+ return &frag->node->raw;
+
+ case JFFS2_NODETYPE_DIRENT:
+ for (fd = f->dents; fd; fd = fd->next) {
+ if (fd->raw == raw) {
+ dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
+ return &fd->raw;
+ }
+ }
+ BUG();
+
+ default:
+ dbg_noderef("Don't care about replacing raw for nodetype %x\n",
+ je16_to_cpu(node->u.nodetype));
+ break;
+ }
+ return NULL;
+}
+
/* Recover from failure to write wbuf. Recover the nodes up to the
* wbuf, not the one which we were starting to try to write. */
static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
{
struct jffs2_eraseblock *jeb, *new_jeb;
- struct jffs2_raw_node_ref **first_raw, **raw;
+ struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
size_t retlen;
int ret;
+ int nr_refile = 0;
unsigned char *buf;
uint32_t start, end, ofs, len;
- spin_lock(&c->erase_completion_lock);
-
jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
+ spin_lock(&c->erase_completion_lock);
jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
+ spin_unlock(&c->erase_completion_lock);
+
+ BUG_ON(!ref_obsolete(jeb->last_node));
/* Find the first node to be recovered, by skipping over every
node which ends before the wbuf starts, or which is obsolete. */
- first_raw = &jeb->first_node;
- while (*first_raw &&
- (ref_obsolete(*first_raw) ||
- (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
- D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
- ref_offset(*first_raw), ref_flags(*first_raw),
- (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
- c->wbuf_ofs));
- first_raw = &(*first_raw)->next_phys;
+ for (next = raw = jeb->first_node; next; raw = next) {
+ next = ref_next(raw);
+
+ if (ref_obsolete(raw) ||
+ (next && ref_offset(next) <= c->wbuf_ofs)) {
+ dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
+ ref_offset(raw), ref_flags(raw),
+ (ref_offset(raw) + ref_totlen(c, jeb, raw)),
+ c->wbuf_ofs);
+ continue;
+ }
+ dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
+ ref_offset(raw), ref_flags(raw),
+ (ref_offset(raw) + ref_totlen(c, jeb, raw)));
+
+ first_raw = raw;
+ break;
}
- if (!*first_raw) {
+ if (!first_raw) {
/* All nodes were obsolete. Nothing to recover. */
D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
- spin_unlock(&c->erase_completion_lock);
+ c->wbuf_len = 0;
return;
}
- start = ref_offset(*first_raw);
- end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
-
- /* Find the last node to be recovered */
- raw = first_raw;
- while ((*raw)) {
- if (!ref_obsolete(*raw))
- end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
+ start = ref_offset(first_raw);
+ end = ref_offset(jeb->last_node);
+ nr_refile = 1;
- raw = &(*raw)->next_phys;
- }
- spin_unlock(&c->erase_completion_lock);
+ /* Count the number of refs which need to be copied */
+ while ((raw = ref_next(raw)) != jeb->last_node)
+ nr_refile++;
- D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
+ dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
+ start, end, end - start, nr_refile);
buf = NULL;
if (start < c->wbuf_ofs) {
@@ -233,28 +294,37 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
}
/* Do the read... */
- if (jffs2_cleanmarker_oob(c))
- ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
- else
- ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
+ ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
- if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
- /* ECC recovered */
+ /* ECC recovered ? */
+ if ((ret == -EUCLEAN || ret == -EBADMSG) &&
+ (retlen == c->wbuf_ofs - start))
ret = 0;
- }
+
if (ret || retlen != c->wbuf_ofs - start) {
printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
kfree(buf);
buf = NULL;
read_failed:
- first_raw = &(*first_raw)->next_phys;
+ first_raw = ref_next(first_raw);
+ nr_refile--;
+ while (first_raw && ref_obsolete(first_raw)) {
+ first_raw = ref_next(first_raw);
+ nr_refile--;
+ }
+
/* If this was the only node to be recovered, give up */
- if (!(*first_raw))
+ if (!first_raw) {
+ c->wbuf_len = 0;
return;
+ }
/* It wasn't. Go on and try to recover nodes complete in the wbuf */
- start = ref_offset(*first_raw);
+ start = ref_offset(first_raw);
+ dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
+ start, end, end - start, nr_refile);
+
} else {
/* Read succeeded. Copy the remaining data from the wbuf */
memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
@@ -263,14 +333,23 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
Either 'buf' contains the data, or we find it in the wbuf */
-
/* ... and get an allocation of space from a shiny new block instead */
- ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE);
+ ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
if (ret) {
printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
kfree(buf);
return;
}
+
+ ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
+ if (ret) {
+ printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
+ kfree(buf);
+ return;
+ }
+
+ ofs = write_ofs(c);
+
if (end-start >= c->wbuf_pagesize) {
/* Need to do another write immediately, but it's possible
that this is just because the wbuf itself is completely
@@ -288,36 +367,22 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
if (breakme++ == 20) {
printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
breakme = 0;
- c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
- brokenbuf, NULL, c->oobinfo);
+ c->mtd->write(c->mtd, ofs, towrite, &retlen,
+ brokenbuf);
ret = -EIO;
} else
#endif
- if (jffs2_cleanmarker_oob(c))
- ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
- rewrite_buf, NULL, c->oobinfo);
- else
- ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf);
+ ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
+ rewrite_buf);
if (ret || retlen != towrite) {
/* Argh. We tried. Really we did. */
printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
kfree(buf);
- if (retlen) {
- struct jffs2_raw_node_ref *raw2;
-
- raw2 = jffs2_alloc_raw_node_ref();
- if (!raw2)
- return;
+ if (retlen)
+ jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
- raw2->flash_offset = ofs | REF_OBSOLETE;
- raw2->__totlen = ref_totlen(c, jeb, *first_raw);
- raw2->next_phys = NULL;
- raw2->next_in_ino = NULL;
-
- jffs2_add_physical_node_ref(c, raw2);
- }
return;
}
printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
@@ -326,12 +391,10 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
c->wbuf_ofs = ofs + towrite;
memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
- kfree(buf);
} else {
/* OK, now we're left with the dregs in whichever buffer we're using */
if (buf) {
memcpy(c->wbuf, buf, end-start);
- kfree(buf);
} else {
memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
}
@@ -343,62 +406,111 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
new_jeb = &c->blocks[ofs / c->sector_size];
spin_lock(&c->erase_completion_lock);
- if (new_jeb->first_node) {
- /* Odd, but possible with ST flash later maybe */
- new_jeb->last_node->next_phys = *first_raw;
- } else {
- new_jeb->first_node = *first_raw;
- }
-
- raw = first_raw;
- while (*raw) {
- uint32_t rawlen = ref_totlen(c, jeb, *raw);
+ for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
+ uint32_t rawlen = ref_totlen(c, jeb, raw);
+ struct jffs2_inode_cache *ic;
+ struct jffs2_raw_node_ref *new_ref;
+ struct jffs2_raw_node_ref **adjust_ref = NULL;
+ struct jffs2_inode_info *f = NULL;
D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
- rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
+ rawlen, ref_offset(raw), ref_flags(raw), ofs));
+
+ ic = jffs2_raw_ref_to_ic(raw);
+
+ /* Ick. This XATTR mess should be fixed shortly... */
+ if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
+ struct jffs2_xattr_datum *xd = (void *)ic;
+ BUG_ON(xd->node != raw);
+ adjust_ref = &xd->node;
+ raw->next_in_ino = NULL;
+ ic = NULL;
+ } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
+ struct jffs2_xattr_datum *xr = (void *)ic;
+ BUG_ON(xr->node != raw);
+ adjust_ref = &xr->node;
+ raw->next_in_ino = NULL;
+ ic = NULL;
+ } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
+ struct jffs2_raw_node_ref **p = &ic->nodes;
+
+ /* Remove the old node from the per-inode list */
+ while (*p && *p != (void *)ic) {
+ if (*p == raw) {
+ (*p) = (raw->next_in_ino);
+ raw->next_in_ino = NULL;
+ break;
+ }
+ p = &((*p)->next_in_ino);
+ }
- if (ref_obsolete(*raw)) {
- /* Shouldn't really happen much */
- new_jeb->dirty_size += rawlen;
- new_jeb->free_size -= rawlen;
- c->dirty_size += rawlen;
- } else {
- new_jeb->used_size += rawlen;
- new_jeb->free_size -= rawlen;
+ if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
+ /* If it's an in-core inode, then we have to adjust any
+ full_dirent or full_dnode structure to point to the
+ new version instead of the old */
+ f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
+ if (IS_ERR(f)) {
+ /* Should never happen; it _must_ be present */
+ JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
+ ic->ino, PTR_ERR(f));
+ BUG();
+ }
+ /* We don't lock f->sem. There's a number of ways we could
+ end up in here with it already being locked, and nobody's
+ going to modify it on us anyway because we hold the
+ alloc_sem. We're only changing one ->raw pointer too,
+ which we can get away with without upsetting readers. */
+ adjust_ref = jffs2_incore_replace_raw(c, f, raw,
+ (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
+ } else if (unlikely(ic->state != INO_STATE_PRESENT &&
+ ic->state != INO_STATE_CHECKEDABSENT &&
+ ic->state != INO_STATE_GC)) {
+ JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
+ BUG();
+ }
+ }
+
+ new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
+
+ if (adjust_ref) {
+ BUG_ON(*adjust_ref != raw);
+ *adjust_ref = new_ref;
+ }
+ if (f)
+ jffs2_gc_release_inode(c, f);
+
+ if (!ref_obsolete(raw)) {
jeb->dirty_size += rawlen;
jeb->used_size -= rawlen;
c->dirty_size += rawlen;
+ c->used_size -= rawlen;
+ raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
+ BUG_ON(raw->next_in_ino);
}
- c->free_size -= rawlen;
- (*raw)->flash_offset = ofs | ref_flags(*raw);
ofs += rawlen;
- new_jeb->last_node = *raw;
-
- raw = &(*raw)->next_phys;
}
+ kfree(buf);
+
/* Fix up the original jeb now it's on the bad_list */
- *first_raw = NULL;
- if (first_raw == &jeb->first_node) {
- jeb->last_node = NULL;
+ if (first_raw == jeb->first_node) {
D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
list_del(&jeb->list);
list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
jffs2_erase_pending_trigger(c);
}
- else
- jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
jffs2_dbg_acct_sanity_check_nolock(c, jeb);
- jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
- jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
+ D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
+
}
/* Meaning of pad argument:
@@ -412,6 +524,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
{
+ struct jffs2_eraseblock *wbuf_jeb;
int ret;
size_t retlen;
@@ -429,6 +542,10 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
if (!c->wbuf_len) /* already checked c->wbuf above */
return 0;
+ wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
+ if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
+ return -ENOMEM;
+
/* claim remaining space on the page
this happens, if we have a change to a new block,
or if fsync forces us to flush the writebuffer.
@@ -458,15 +575,12 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
if (breakme++ == 20) {
printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
breakme = 0;
- c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
- &retlen, brokenbuf, NULL, c->oobinfo);
+ c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
+ brokenbuf);
ret = -EIO;
} else
#endif
- if (jffs2_cleanmarker_oob(c))
- ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
- else
ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
if (ret || retlen != c->wbuf_pagesize) {
@@ -483,32 +597,34 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
return ret;
}
- spin_lock(&c->erase_completion_lock);
-
/* Adjust free size of the block if we padded. */
if (pad) {
- struct jffs2_eraseblock *jeb;
-
- jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
+ uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
- (jeb==c->nextblock)?"next":"", jeb->offset));
+ (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
/* wbuf_pagesize - wbuf_len is the amount of space that's to be
padded. If there is less free space in the block than that,
something screwed up */
- if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) {
+ if (wbuf_jeb->free_size < waste) {
printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
- c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len);
+ c->wbuf_ofs, c->wbuf_len, waste);
printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
- jeb->offset, jeb->free_size);
+ wbuf_jeb->offset, wbuf_jeb->free_size);
BUG();
}
- jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len);
- c->free_size -= (c->wbuf_pagesize - c->wbuf_len);
- jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
- c->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
- }
+
+ spin_lock(&c->erase_completion_lock);
+
+ jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
+ /* FIXME: that made it count as dirty. Convert to wasted */
+ wbuf_jeb->dirty_size -= waste;
+ c->dirty_size -= waste;
+ wbuf_jeb->wasted_size += waste;
+ c->wasted_size += waste;
+ } else
+ spin_lock(&c->erase_completion_lock);
/* Stick any now-obsoleted blocks on the erase_pending_list */
jffs2_refile_wbuf_blocks(c);
@@ -603,20 +719,30 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
return ret;
}
-int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
+
+static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
+ size_t len)
{
- struct kvec outvecs[3];
- uint32_t totlen = 0;
- uint32_t split_ofs = 0;
- uint32_t old_totlen;
- int ret, splitvec = -1;
- int invec, outvec;
- size_t wbuf_retlen;
- unsigned char *wbuf_ptr;
- size_t donelen = 0;
+ if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
+ return 0;
+
+ if (len > (c->wbuf_pagesize - c->wbuf_len))
+ len = c->wbuf_pagesize - c->wbuf_len;
+ memcpy(c->wbuf + c->wbuf_len, buf, len);
+ c->wbuf_len += (uint32_t) len;
+ return len;
+}
+
+int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
+ unsigned long count, loff_t to, size_t *retlen,
+ uint32_t ino)
+{
+ struct jffs2_eraseblock *jeb;
+ size_t wbuf_retlen, donelen = 0;
uint32_t outvec_to = to;
+ int ret, invec;
- /* If not NAND flash, don't bother */
+ /* If not writebuffered flash, don't bother */
if (!jffs2_is_writebuffered(c))
return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
@@ -629,34 +755,22 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
memset(c->wbuf,0xff,c->wbuf_pagesize);
}
- /* Fixup the wbuf if we are moving to a new eraseblock. The checks below
- fail for ECC'd NOR because cleanmarker == 16, so a block starts at
- xxx0010. */
- if (jffs2_nor_ecc(c)) {
- if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) {
- c->wbuf_ofs = PAGE_DIV(to);
- c->wbuf_len = PAGE_MOD(to);
- memset(c->wbuf,0xff,c->wbuf_pagesize);
- }
- }
-
- /* Sanity checks on target address.
- It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
- and it's permitted to write at the beginning of a new
- erase block. Anything else, and you die.
- New block starts at xxx000c (0-b = block header)
- */
+ /*
+ * Sanity checks on target address. It's permitted to write
+ * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
+ * write at the beginning of a new erase block. Anything else,
+ * and you die. New block starts at xxx000c (0-b = block
+ * header)
+ */
if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
/* It's a write to a new block */
if (c->wbuf_len) {
- D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs));
+ D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
+ "causes flush of wbuf at 0x%08x\n",
+ (unsigned long)to, c->wbuf_ofs));
ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
- if (ret) {
- /* the underlying layer has to check wbuf_len to do the cleanup */
- D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
- *retlen = 0;
- goto exit;
- }
+ if (ret)
+ goto outerr;
}
/* set pointer to new block */
c->wbuf_ofs = PAGE_DIV(to);
@@ -665,165 +779,70 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
/* We're not writing immediately after the writebuffer. Bad. */
- printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to);
+ printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
+ "to %08lx\n", (unsigned long)to);
if (c->wbuf_len)
printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
- c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
+ c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
BUG();
}
- /* Note outvecs[3] above. We know count is never greater than 2 */
- if (count > 2) {
- printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count);
- BUG();
- }
-
- invec = 0;
- outvec = 0;
-
- /* Fill writebuffer first, if already in use */
- if (c->wbuf_len) {
- uint32_t invec_ofs = 0;
-
- /* adjust alignment offset */
- if (c->wbuf_len != PAGE_MOD(to)) {
- c->wbuf_len = PAGE_MOD(to);
- /* take care of alignment to next page */
- if (!c->wbuf_len)
- c->wbuf_len = c->wbuf_pagesize;
- }
-
- while(c->wbuf_len < c->wbuf_pagesize) {
- uint32_t thislen;
-
- if (invec == count)
- goto alldone;
-
- thislen = c->wbuf_pagesize - c->wbuf_len;
-
- if (thislen >= invecs[invec].iov_len)
- thislen = invecs[invec].iov_len;
-
- invec_ofs = thislen;
-
- memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
- c->wbuf_len += thislen;
- donelen += thislen;
- /* Get next invec, if actual did not fill the buffer */
- if (c->wbuf_len < c->wbuf_pagesize)
- invec++;
- }
-
- /* write buffer is full, flush buffer */
- ret = __jffs2_flush_wbuf(c, NOPAD);
- if (ret) {
- /* the underlying layer has to check wbuf_len to do the cleanup */
- D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
- /* Retlen zero to make sure our caller doesn't mark the space dirty.
- We've already done everything that's necessary */
- *retlen = 0;
- goto exit;
- }
- outvec_to += donelen;
- c->wbuf_ofs = outvec_to;
-
- /* All invecs done ? */
- if (invec == count)
- goto alldone;
-
- /* Set up the first outvec, containing the remainder of the
- invec we partially used */
- if (invecs[invec].iov_len > invec_ofs) {
- outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
- totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
- if (totlen > c->wbuf_pagesize) {
- splitvec = outvec;
- split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
- }
- outvec++;
- }
- invec++;
- }
-
- /* OK, now we've flushed the wbuf and the start of the bits
- we have been asked to write, now to write the rest.... */
-
- /* totlen holds the amount of data still to be written */
- old_totlen = totlen;
- for ( ; invec < count; invec++,outvec++ ) {
- outvecs[outvec].iov_base = invecs[invec].iov_base;
- totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
- if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
- splitvec = outvec;
- split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
- old_totlen = totlen;
+ /* adjust alignment offset */
+ if (c->wbuf_len != PAGE_MOD(to)) {
+ c->wbuf_len = PAGE_MOD(to);
+ /* take care of alignment to next page */
+ if (!c->wbuf_len) {
+ c->wbuf_len = c->wbuf_pagesize;
+ ret = __jffs2_flush_wbuf(c, NOPAD);
+ if (ret)
+ goto outerr;
}
}
- /* Now the outvecs array holds all the remaining data to write */
- /* Up to splitvec,split_ofs is to be written immediately. The rest
- goes into the (now-empty) wbuf */
-
- if (splitvec != -1) {
- uint32_t remainder;
-
- remainder = outvecs[splitvec].iov_len - split_ofs;
- outvecs[splitvec].iov_len = split_ofs;
-
- /* We did cross a page boundary, so we write some now */
- if (jffs2_cleanmarker_oob(c))
- ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
- else
- ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
-
- if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
- /* At this point we have no problem,
- c->wbuf is empty. However refile nextblock to avoid
- writing again to same address.
- */
- struct jffs2_eraseblock *jeb;
+ for (invec = 0; invec < count; invec++) {
+ int vlen = invecs[invec].iov_len;
+ uint8_t *v = invecs[invec].iov_base;
- spin_lock(&c->erase_completion_lock);
+ wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
- jeb = &c->blocks[outvec_to / c->sector_size];
- jffs2_block_refile(c, jeb, REFILE_ANYWAY);
-
- *retlen = 0;
- spin_unlock(&c->erase_completion_lock);
- goto exit;
+ if (c->wbuf_len == c->wbuf_pagesize) {
+ ret = __jffs2_flush_wbuf(c, NOPAD);
+ if (ret)
+ goto outerr;
}
-
+ vlen -= wbuf_retlen;
+ outvec_to += wbuf_retlen;
donelen += wbuf_retlen;
- c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
-
- if (remainder) {
- outvecs[splitvec].iov_base += split_ofs;
- outvecs[splitvec].iov_len = remainder;
- } else {
- splitvec++;
+ v += wbuf_retlen;
+
+ if (vlen >= c->wbuf_pagesize) {
+ ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
+ &wbuf_retlen, v);
+ if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
+ goto outfile;
+
+ vlen -= wbuf_retlen;
+ outvec_to += wbuf_retlen;
+ c->wbuf_ofs = outvec_to;
+ donelen += wbuf_retlen;
+ v += wbuf_retlen;
}
- } else {
- splitvec = 0;
- }
-
- /* Now splitvec points to the start of the bits we have to copy
- into the wbuf */
- wbuf_ptr = c->wbuf;
+ wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
+ if (c->wbuf_len == c->wbuf_pagesize) {
+ ret = __jffs2_flush_wbuf(c, NOPAD);
+ if (ret)
+ goto outerr;
+ }
- for ( ; splitvec < outvec; splitvec++) {
- /* Don't copy the wbuf into itself */
- if (outvecs[splitvec].iov_base == c->wbuf)
- continue;
- memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
- wbuf_ptr += outvecs[splitvec].iov_len;
- donelen += outvecs[splitvec].iov_len;
+ outvec_to += wbuf_retlen;
+ donelen += wbuf_retlen;
}
- c->wbuf_len = wbuf_ptr - c->wbuf;
- /* If there's a remainder in the wbuf and it's a non-GC write,
- remember that the wbuf affects this ino */
-alldone:
+ /*
+ * If there's a remainder in the wbuf and it's a non-GC write,
+ * remember that the wbuf affects this ino
+ */
*retlen = donelen;
if (jffs2_sum_active()) {
@@ -836,8 +855,24 @@ alldone:
jffs2_wbuf_dirties_inode(c, ino);
ret = 0;
+ up_write(&c->wbuf_sem);
+ return ret;
-exit:
+outfile:
+ /*
+ * At this point we have no problem, c->wbuf is empty. However
+ * refile nextblock to avoid writing again to same address.
+ */
+
+ spin_lock(&c->erase_completion_lock);
+
+ jeb = &c->blocks[outvec_to / c->sector_size];
+ jffs2_block_refile(c, jeb, REFILE_ANYWAY);
+
+ spin_unlock(&c->erase_completion_lock);
+
+outerr:
+ *retlen = 0;
up_write(&c->wbuf_sem);
return ret;
}
@@ -846,7 +881,8 @@ exit:
* This is the entry for flash write.
* Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
*/
-int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
+int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
+ size_t *retlen, const u_char *buf)
{
struct kvec vecs[1];
@@ -871,25 +907,23 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
/* Read flash */
down_read(&c->wbuf_sem);
- if (jffs2_cleanmarker_oob(c))
- ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo);
- else
- ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
-
- if ( (ret == -EBADMSG) && (*retlen == len) ) {
- printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
- len, ofs);
+ ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
+
+ if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
+ if (ret == -EBADMSG)
+ printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
+ " returned ECC error\n", len, ofs);
/*
- * We have the raw data without ECC correction in the buffer, maybe
- * we are lucky and all data or parts are correct. We check the node.
- * If data are corrupted node check will sort it out.
- * We keep this block, it will fail on write or erase and the we
- * mark it bad. Or should we do that now? But we should give him a chance.
- * Maybe we had a system crash or power loss before the ecc write or
- * a erase was completed.
+ * We have the raw data without ECC correction in the buffer,
+ * maybe we are lucky and all data or parts are correct. We
+ * check the node. If data are corrupted node check will sort
+ * it out. We keep this block, it will fail on write or erase
+ * and the we mark it bad. Or should we do that now? But we
+ * should give him a chance. Maybe we had a system crash or
+ * power loss before the ecc write or a erase was completed.
* So we return success. :)
*/
- ret = 0;
+ ret = 0;
}
/* if no writebuffer available or write buffer empty, return */
@@ -911,7 +945,7 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
if (orbf > len) /* is write beyond write buffer ? */
goto exit;
- lwbf = len - orbf; /* number of bytes to copy */
+ lwbf = len - orbf; /* number of bytes to copy */
if (lwbf > c->wbuf_len)
lwbf = c->wbuf_len;
}
@@ -923,158 +957,159 @@ exit:
return ret;
}
+#define NR_OOB_SCAN_PAGES 4
+
/*
- * Check, if the out of band area is empty
+ * Check, if the out of band area is empty
*/
-int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
+int jffs2_check_oob_empty(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb, int mode)
{
- unsigned char *buf;
- int ret = 0;
- int i,len,page;
- size_t retlen;
- int oob_size;
-
- /* allocate a buffer for all oob data in this sector */
- oob_size = c->mtd->oobsize;
- len = 4 * oob_size;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
- return -ENOMEM;
- }
- /*
- * if mode = 0, we scan for a total empty oob area, else we have
- * to take care of the cleanmarker in the first page of the block
- */
- ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
+ int i, page, ret;
+ int oobsize = c->mtd->oobsize;
+ struct mtd_oob_ops ops;
+
+ ops.len = NR_OOB_SCAN_PAGES * oobsize;
+ ops.ooblen = oobsize;
+ ops.oobbuf = c->oobbuf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
if (ret) {
- D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
- goto out;
+ D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB "
+ "failed %d for block at %08x\n", ret, jeb->offset));
+ return ret;
}
- if (retlen < len) {
- D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
- "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
- ret = -EIO;
- goto out;
+ if (ops.retlen < ops.len) {
+ D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB "
+ "returned short read (%zd bytes not %d) for block "
+ "at %08x\n", ops.retlen, ops.len, jeb->offset));
+ return -EIO;
}
/* Special check for first page */
- for(i = 0; i < oob_size ; i++) {
+ for(i = 0; i < oobsize ; i++) {
/* Yeah, we know about the cleanmarker. */
if (mode && i >= c->fsdata_pos &&
i < c->fsdata_pos + c->fsdata_len)
continue;
- if (buf[i] != 0xFF) {
- D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
- buf[i], i, jeb->offset));
- ret = 1;
- goto out;
+ if (ops.oobbuf[i] != 0xFF) {
+ D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
+ "%08x\n", ops.oobbuf[i], i, jeb->offset));
+ return 1;
}
}
/* we know, we are aligned :) */
- for (page = oob_size; page < len; page += sizeof(long)) {
- unsigned long dat = *(unsigned long *)(&buf[page]);
- if(dat != -1) {
- ret = 1;
- goto out;
- }
+ for (page = oobsize; page < ops.len; page += sizeof(long)) {
+ long dat = *(long *)(&ops.oobbuf[page]);
+ if(dat != -1)
+ return 1;
}
-
-out:
- kfree(buf);
-
- return ret;
+ return 0;
}
/*
-* Scan for a valid cleanmarker and for bad blocks
-* For virtual blocks (concatenated physical blocks) check the cleanmarker
-* only in the first page of the first physical block, but scan for bad blocks in all
-* physical blocks
-*/
-int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+ * Scan for a valid cleanmarker and for bad blocks
+ */
+int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
{
struct jffs2_unknown_node n;
- unsigned char buf[2 * NAND_MAX_OOBSIZE];
- unsigned char *p;
- int ret, i, cnt, retval = 0;
- size_t retlen, offset;
- int oob_size;
-
- offset = jeb->offset;
- oob_size = c->mtd->oobsize;
-
- /* Loop through the physical blocks */
- for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
- /* Check first if the block is bad. */
- if (c->mtd->block_isbad (c->mtd, offset)) {
- D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
- return 2;
- }
- /*
- * We read oob data from page 0 and 1 of the block.
- * page 0 contains cleanmarker and badblock info
- * page 1 contains failure count of this block
- */
- ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
+ struct mtd_oob_ops ops;
+ int oobsize = c->mtd->oobsize;
+ unsigned char *p,*b;
+ int i, ret;
+ size_t offset = jeb->offset;
+
+ /* Check first if the block is bad. */
+ if (c->mtd->block_isbad(c->mtd, offset)) {
+ D1 (printk(KERN_WARNING "jffs2_check_nand_cleanmarker()"
+ ": Bad block at %08x\n", jeb->offset));
+ return 2;
+ }
- if (ret) {
- D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
- return ret;
- }
- if (retlen < (oob_size << 1)) {
- D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
- return -EIO;
- }
+ ops.len = oobsize;
+ ops.ooblen = oobsize;
+ ops.oobbuf = c->oobbuf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
- /* Check cleanmarker only on the first physical block */
- if (!cnt) {
- n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
- n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
- n.totlen = cpu_to_je32 (8);
- p = (unsigned char *) &n;
+ ret = c->mtd->read_oob(c->mtd, offset, &ops);
+ if (ret) {
+ D1 (printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): "
+ "Read OOB failed %d for block at %08x\n",
+ ret, jeb->offset));
+ return ret;
+ }
- for (i = 0; i < c->fsdata_len; i++) {
- if (buf[c->fsdata_pos + i] != p[i]) {
- retval = 1;
- }
- }
- D1(if (retval == 1) {
- printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
- printk(KERN_WARNING "OOB at %08x was ", offset);
- for (i=0; i < oob_size; i++) {
- printk("%02x ", buf[i]);
- }
- printk("\n");
- })
- }
- offset += c->mtd->erasesize;
+ if (ops.retlen < ops.len) {
+ D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): "
+ "Read OOB return short read (%zd bytes not %d) "
+ "for block at %08x\n", ops.retlen, ops.len,
+ jeb->offset));
+ return -EIO;
}
- return retval;
+
+ n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
+ n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
+ n.totlen = cpu_to_je32 (8);
+ p = (unsigned char *) &n;
+ b = c->oobbuf + c->fsdata_pos;
+
+ for (i = c->fsdata_len; i; i--) {
+ if (*b++ != *p++)
+ ret = 1;
+ }
+
+ D1(if (ret == 1) {
+ printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): "
+ "Cleanmarker node not detected in block at %08x\n",
+ offset);
+ printk(KERN_WARNING "OOB at %08zx was ", offset);
+ for (i=0; i < oobsize; i++)
+ printk("%02x ", c->oobbuf[i]);
+ printk("\n");
+ });
+ return ret;
}
-int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
{
- struct jffs2_unknown_node n;
- int ret;
- size_t retlen;
+ struct jffs2_unknown_node n;
+ int ret;
+ struct mtd_oob_ops ops;
n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
n.totlen = cpu_to_je32(8);
- ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
+ ops.len = c->fsdata_len;
+ ops.ooblen = c->fsdata_len;;
+ ops.oobbuf = (uint8_t *)&n;
+ ops.ooboffs = c->fsdata_pos;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
if (ret) {
- D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+ D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): "
+ "Write failed for block at %08x: error %d\n",
+ jeb->offset, ret));
return ret;
}
- if (retlen != c->fsdata_len) {
- D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
- return ret;
+ if (ops.retlen != ops.len) {
+ D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): "
+ "Short write for block at %08x: %zd not %d\n",
+ jeb->offset, ops.retlen, ops.len));
+ return -EIO;
}
return 0;
}
@@ -1108,18 +1143,9 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
return 1;
}
-#define NAND_JFFS2_OOB16_FSDALEN 8
-
-static struct nand_oobinfo jffs2_oobinfo_docecc = {
- .useecc = MTD_NANDECC_PLACE,
- .eccbytes = 6,
- .eccpos = {0,1,2,3,4,5}
-};
-
-
static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
{
- struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
+ struct nand_ecclayout *oinfo = c->mtd->ecclayout;
/* Do this only, if we have an oob buffer */
if (!c->mtd->oobsize)
@@ -1129,33 +1155,23 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
c->cleanmarker_size = 0;
/* Should we use autoplacement ? */
- if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
- D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
- /* Get the position of the free bytes */
- if (!oinfo->oobfree[0][1]) {
- printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
- return -ENOSPC;
- }
- c->fsdata_pos = oinfo->oobfree[0][0];
- c->fsdata_len = oinfo->oobfree[0][1];
- if (c->fsdata_len > 8)
- c->fsdata_len = 8;
- } else {
- /* This is just a legacy fallback and should go away soon */
- switch(c->mtd->ecctype) {
- case MTD_ECC_RS_DiskOnChip:
- printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
- c->oobinfo = &jffs2_oobinfo_docecc;
- c->fsdata_pos = 6;
- c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
- c->badblock_pos = 15;
- break;
+ if (!oinfo) {
+ D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
+ return -EINVAL;
+ }
- default:
- D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
- return -EINVAL;
- }
+ D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
+ /* Get the position of the free bytes */
+ if (!oinfo->oobfree[0].length) {
+ printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep."
+ " Autoplacement selected and no empty space in oob\n");
+ return -ENOSPC;
}
+ c->fsdata_pos = oinfo->oobfree[0].offset;
+ c->fsdata_len = oinfo->oobfree[0].length;
+ if (c->fsdata_len > 8)
+ c->fsdata_len = 8;
+
return 0;
}
@@ -1165,13 +1181,17 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
/* Initialise write buffer */
init_rwsem(&c->wbuf_sem);
- c->wbuf_pagesize = c->mtd->oobblock;
+ c->wbuf_pagesize = c->mtd->writesize;
c->wbuf_ofs = 0xFFFFFFFF;
c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
if (!c->wbuf)
return -ENOMEM;
+ c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->mtd->oobsize, GFP_KERNEL);
+ if (!c->oobbuf)
+ return -ENOMEM;
+
res = jffs2_nand_set_oobinfo(c);
#ifdef BREAKME
@@ -1189,6 +1209,7 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
{
kfree(c->wbuf);
+ kfree(c->oobbuf);
}
int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
@@ -1236,33 +1257,14 @@ void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
kfree(c->wbuf);
}
-int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
- /* Cleanmarker is actually larger on the flashes */
- c->cleanmarker_size = 16;
-
- /* Initialize write buffer */
- init_rwsem(&c->wbuf_sem);
- c->wbuf_pagesize = c->mtd->eccsize;
- c->wbuf_ofs = 0xFFFFFFFF;
-
- c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
- if (!c->wbuf)
- return -ENOMEM;
-
- return 0;
-}
-
-void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
- kfree(c->wbuf);
-}
-
int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
- /* Cleanmarker currently occupies a whole programming region */
- c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd);
+ /* Cleanmarker currently occupies whole programming regions,
+ * either one or 2 for 8Byte STMicro flashes. */
+ c->cleanmarker_size = max(16u, c->mtd->writesize);
/* Initialize write buffer */
init_rwsem(&c->wbuf_sem);
- c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd);
+ c->wbuf_pagesize = c->mtd->writesize;
c->wbuf_ofs = 0xFFFFFFFF;
c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 1342f0158e9..67176792e13 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -37,7 +37,6 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint
f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
f->inocache->state = INO_STATE_PRESENT;
-
jffs2_add_ino_cache(c, f->inocache);
D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino));
ri->ino = cpu_to_je32(f->inocache->ino);
@@ -57,12 +56,14 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint
/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it,
write it to the flash, link it into the existing inode/fragment list */
-struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode)
+struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_raw_inode *ri, const unsigned char *data,
+ uint32_t datalen, int alloc_mode)
{
- struct jffs2_raw_node_ref *raw;
struct jffs2_full_dnode *fn;
size_t retlen;
+ uint32_t flash_ofs;
struct kvec vecs[2];
int ret;
int retried = 0;
@@ -78,34 +79,21 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
vecs[1].iov_base = (unsigned char *)data;
vecs[1].iov_len = datalen;
- jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
-
if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen);
}
- raw = jffs2_alloc_raw_node_ref();
- if (!raw)
- return ERR_PTR(-ENOMEM);
fn = jffs2_alloc_full_dnode();
- if (!fn) {
- jffs2_free_raw_node_ref(raw);
+ if (!fn)
return ERR_PTR(-ENOMEM);
- }
-
- fn->ofs = je32_to_cpu(ri->offset);
- fn->size = je32_to_cpu(ri->dsize);
- fn->frags = 0;
/* check number of valid vecs */
if (!datalen || !data)
cnt = 1;
retry:
- fn->raw = raw;
+ flash_ofs = write_ofs(c);
- raw->flash_offset = flash_ofs;
- raw->__totlen = PAD(sizeof(*ri)+datalen);
- raw->next_phys = NULL;
+ jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) {
BUG_ON(!retried);
@@ -125,22 +113,16 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
/* Mark the space as dirtied */
if (retlen) {
- /* Doesn't belong to any inode */
- raw->next_in_ino = NULL;
-
/* Don't change raw->size to match retlen. We may have
written the node header already, and only the data will
seem corrupted, in which case the scan would skip over
any node we write before the original intended end of
this node */
- raw->flash_offset |= REF_OBSOLETE;
- jffs2_add_physical_node_ref(c, raw);
- jffs2_mark_node_obsolete(c, raw);
+ jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL);
} else {
- printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset);
- jffs2_free_raw_node_ref(raw);
+ printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs);
}
- if (!retried && alloc_mode != ALLOC_NORETRY && (raw = jffs2_alloc_raw_node_ref())) {
+ if (!retried && alloc_mode != ALLOC_NORETRY) {
/* Try to reallocate space and retry */
uint32_t dummy;
struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
@@ -153,19 +135,20 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
jffs2_dbg_acct_paranoia_check(c, jeb);
if (alloc_mode == ALLOC_GC) {
- ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs,
- &dummy, JFFS2_SUMMARY_INODE_SIZE);
+ ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &dummy,
+ JFFS2_SUMMARY_INODE_SIZE);
} else {
/* Locking pain */
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs,
- &dummy, alloc_mode, JFFS2_SUMMARY_INODE_SIZE);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy,
+ alloc_mode, JFFS2_SUMMARY_INODE_SIZE);
down(&f->sem);
}
if (!ret) {
+ flash_ofs = write_ofs(c);
D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
jffs2_dbg_acct_sanity_check(c,jeb);
@@ -174,7 +157,6 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
goto retry;
}
D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
- jffs2_free_raw_node_ref(raw);
}
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dnode(fn);
@@ -188,20 +170,17 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
(je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) {
- raw->flash_offset |= REF_PRISTINE;
+ flash_ofs |= REF_PRISTINE;
} else {
- raw->flash_offset |= REF_NORMAL;
+ flash_ofs |= REF_NORMAL;
}
- jffs2_add_physical_node_ref(c, raw);
-
- /* Link into per-inode list */
- spin_lock(&c->erase_completion_lock);
- raw->next_in_ino = f->inocache->nodes;
- f->inocache->nodes = raw;
- spin_unlock(&c->erase_completion_lock);
+ fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache);
+ fn->ofs = je32_to_cpu(ri->offset);
+ fn->size = je32_to_cpu(ri->dsize);
+ fn->frags = 0;
D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
- flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize),
+ flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize),
je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)));
@@ -212,12 +191,14 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
return fn;
}
-struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode)
+struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_raw_dirent *rd, const unsigned char *name,
+ uint32_t namelen, int alloc_mode)
{
- struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
size_t retlen;
struct kvec vecs[2];
+ uint32_t flash_ofs;
int retried = 0;
int ret;
@@ -228,26 +209,16 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n");
BUG();
- }
- );
+ });
vecs[0].iov_base = rd;
vecs[0].iov_len = sizeof(*rd);
vecs[1].iov_base = (unsigned char *)name;
vecs[1].iov_len = namelen;
- jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
-
- raw = jffs2_alloc_raw_node_ref();
-
- if (!raw)
- return ERR_PTR(-ENOMEM);
-
fd = jffs2_alloc_full_dirent(namelen+1);
- if (!fd) {
- jffs2_free_raw_node_ref(raw);
+ if (!fd)
return ERR_PTR(-ENOMEM);
- }
fd->version = je32_to_cpu(rd->version);
fd->ino = je32_to_cpu(rd->ino);
@@ -257,11 +228,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
fd->name[namelen]=0;
retry:
- fd->raw = raw;
+ flash_ofs = write_ofs(c);
- raw->flash_offset = flash_ofs;
- raw->__totlen = PAD(sizeof(*rd)+namelen);
- raw->next_phys = NULL;
+ jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) {
BUG_ON(!retried);
@@ -280,15 +249,11 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
sizeof(*rd)+namelen, flash_ofs, ret, retlen);
/* Mark the space as dirtied */
if (retlen) {
- raw->next_in_ino = NULL;
- raw->flash_offset |= REF_OBSOLETE;
- jffs2_add_physical_node_ref(c, raw);
- jffs2_mark_node_obsolete(c, raw);
+ jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL);
} else {
- printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset);
- jffs2_free_raw_node_ref(raw);
+ printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs);
}
- if (!retried && (raw = jffs2_alloc_raw_node_ref())) {
+ if (!retried) {
/* Try to reallocate space and retry */
uint32_t dummy;
struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
@@ -301,39 +266,33 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
jffs2_dbg_acct_paranoia_check(c, jeb);
if (alloc_mode == ALLOC_GC) {
- ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs,
- &dummy, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+ ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &dummy,
+ JFFS2_SUMMARY_DIRENT_SIZE(namelen));
} else {
/* Locking pain */
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs,
- &dummy, alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+ ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy,
+ alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
down(&f->sem);
}
if (!ret) {
+ flash_ofs = write_ofs(c);
D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
- jffs2_free_raw_node_ref(raw);
}
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dirent(fd);
return ERR_PTR(ret?ret:-EIO);
}
/* Mark the space used */
- raw->flash_offset |= REF_PRISTINE;
- jffs2_add_physical_node_ref(c, raw);
-
- spin_lock(&c->erase_completion_lock);
- raw->next_in_ino = f->inocache->nodes;
- f->inocache->nodes = raw;
- spin_unlock(&c->erase_completion_lock);
+ fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | REF_PRISTINE, PAD(sizeof(*rd)+namelen), f->inocache);
if (retried) {
jffs2_dbg_acct_sanity_check(c,NULL);
@@ -359,14 +318,14 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
struct jffs2_full_dnode *fn;
unsigned char *comprbuf = NULL;
uint16_t comprtype = JFFS2_COMPR_NONE;
- uint32_t phys_ofs, alloclen;
+ uint32_t alloclen;
uint32_t datalen, cdatalen;
int retried = 0;
retry:
D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset));
- ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs,
+ ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN,
&alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
@@ -394,7 +353,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
- fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, phys_ofs, ALLOC_NORETRY);
+ fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY);
jffs2_free_comprbuf(comprbuf, buf);
@@ -448,13 +407,13 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
struct jffs2_raw_dirent *rd;
struct jffs2_full_dnode *fn;
struct jffs2_full_dirent *fd;
- uint32_t alloclen, phys_ofs;
+ uint32_t alloclen;
int ret;
/* Try to reserve enough space for both node and dirent.
* Just the node will do for now, though
*/
- ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL,
+ ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL,
JFFS2_SUMMARY_INODE_SIZE);
D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
if (ret) {
@@ -465,7 +424,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
ri->data_crc = cpu_to_je32(0);
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
- fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+ fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL);
D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n",
jemode_to_cpu(ri->mode)));
@@ -484,7 +443,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
@@ -516,7 +475,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
- fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
+ fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL);
jffs2_free_raw_dirent(rd);
@@ -545,7 +504,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
{
struct jffs2_raw_dirent *rd;
struct jffs2_full_dirent *fd;
- uint32_t alloclen, phys_ofs;
+ uint32_t alloclen;
int ret;
if (1 /* alternative branch needs testing */ ||
@@ -556,7 +515,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
if (!rd)
return -ENOMEM;
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
jffs2_free_raw_dirent(rd);
@@ -580,7 +539,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
- fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION);
+ fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_DELETION);
jffs2_free_raw_dirent(rd);
@@ -659,14 +618,14 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
{
struct jffs2_raw_dirent *rd;
struct jffs2_full_dirent *fd;
- uint32_t alloclen, phys_ofs;
+ uint32_t alloclen;
int ret;
rd = jffs2_alloc_raw_dirent();
if (!rd)
return -ENOMEM;
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
jffs2_free_raw_dirent(rd);
@@ -692,7 +651,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
- fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
+ fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL);
jffs2_free_raw_dirent(rd);
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
new file mode 100644
index 00000000000..2d82e250be3
--- /dev/null
+++ b/fs/jffs2/xattr.c
@@ -0,0 +1,1238 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2006 NEC Corporation
+ *
+ * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/crc32.h>
+#include <linux/jffs2.h>
+#include <linux/xattr.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+/* -------- xdatum related functions ----------------
+ * xattr_datum_hashkey(xprefix, xname, xvalue, xsize)
+ * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is
+ * the index of the xattr name/value pair cache (c->xattrindex).
+ * unload_xattr_datum(c, xd)
+ * is used to release xattr name/value pair and detach from c->xattrindex.
+ * reclaim_xattr_datum(c)
+ * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
+ * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold
+ * is hard coded as 32KiB.
+ * delete_xattr_datum_node(c, xd)
+ * is used to delete a jffs2 node is dominated by xdatum. When EBS(Erase Block Summary) is
+ * enabled, it overwrites the obsolete node by myself.
+ * delete_xattr_datum(c, xd)
+ * is used to delete jffs2_xattr_datum object. It must be called with 0-value of reference
+ * counter. (It means how many jffs2_xattr_ref object refers this xdatum.)
+ * do_verify_xattr_datum(c, xd)
+ * is used to load the xdatum informations without name/value pair from the medium.
+ * It's necessary once, because those informations are not collected during mounting
+ * process when EBS is enabled.
+ * 0 will be returned, if success. An negative return value means recoverable error, and
+ * positive return value means unrecoverable error. Thus, caller must remove this xdatum
+ * and xref when it returned positive value.
+ * do_load_xattr_datum(c, xd)
+ * is used to load name/value pair from the medium.
+ * The meanings of return value is same as do_verify_xattr_datum().
+ * load_xattr_datum(c, xd)
+ * is used to be as a wrapper of do_verify_xattr_datum() and do_load_xattr_datum().
+ * If xd need to call do_verify_xattr_datum() at first, it's called before calling
+ * do_load_xattr_datum(). The meanings of return value is same as do_verify_xattr_datum().
+ * save_xattr_datum(c, xd)
+ * is used to write xdatum to medium. xd->version will be incremented.
+ * create_xattr_datum(c, xprefix, xname, xvalue, xsize)
+ * is used to create new xdatum and write to medium.
+ * -------------------------------------------------- */
+
+static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize)
+{
+ int name_len = strlen(xname);
+
+ return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize);
+}
+
+static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem) */
+ D1(dbg_xattr("%s: xid=%u, version=%u\n", __FUNCTION__, xd->xid, xd->version));
+ if (xd->xname) {
+ c->xdatum_mem_usage -= (xd->name_len + 1 + xd->value_len);
+ kfree(xd->xname);
+ }
+
+ list_del_init(&xd->xindex);
+ xd->hashkey = 0;
+ xd->xname = NULL;
+ xd->xvalue = NULL;
+}
+
+static void reclaim_xattr_datum(struct jffs2_sb_info *c)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_xattr_datum *xd, *_xd;
+ uint32_t target, before;
+ static int index = 0;
+ int count;
+
+ if (c->xdatum_mem_threshold > c->xdatum_mem_usage)
+ return;
+
+ before = c->xdatum_mem_usage;
+ target = c->xdatum_mem_usage * 4 / 5; /* 20% reduction */
+ for (count = 0; count < XATTRINDEX_HASHSIZE; count++) {
+ list_for_each_entry_safe(xd, _xd, &c->xattrindex[index], xindex) {
+ if (xd->flags & JFFS2_XFLAGS_HOT) {
+ xd->flags &= ~JFFS2_XFLAGS_HOT;
+ } else if (!(xd->flags & JFFS2_XFLAGS_BIND)) {
+ unload_xattr_datum(c, xd);
+ }
+ if (c->xdatum_mem_usage <= target)
+ goto out;
+ }
+ index = (index+1) % XATTRINDEX_HASHSIZE;
+ }
+ out:
+ JFFS2_NOTICE("xdatum_mem_usage from %u byte to %u byte (%u byte reclaimed)\n",
+ before, c->xdatum_mem_usage, before - c->xdatum_mem_usage);
+}
+
+static void delete_xattr_datum_node(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_raw_xattr rx;
+ size_t length;
+ int rc;
+
+ if (!xd->node) {
+ JFFS2_WARNING("xdatum (xid=%u) is removed twice.\n", xd->xid);
+ return;
+ }
+ if (jffs2_sum_active()) {
+ memset(&rx, 0xff, sizeof(struct jffs2_raw_xattr));
+ rc = jffs2_flash_read(c, ref_offset(xd->node),
+ sizeof(struct jffs2_unknown_node),
+ &length, (char *)&rx);
+ if (rc || length != sizeof(struct jffs2_unknown_node)) {
+ JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
+ rc, sizeof(struct jffs2_unknown_node),
+ length, ref_offset(xd->node));
+ }
+ rc = jffs2_flash_write(c, ref_offset(xd->node), sizeof(rx),
+ &length, (char *)&rx);
+ if (rc || length != sizeof(struct jffs2_raw_xattr)) {
+ JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu ar %#08x\n",
+ rc, sizeof(rx), length, ref_offset(xd->node));
+ }
+ }
+ spin_lock(&c->erase_completion_lock);
+ xd->node->next_in_ino = NULL;
+ spin_unlock(&c->erase_completion_lock);
+ jffs2_mark_node_obsolete(c, xd->node);
+ xd->node = NULL;
+}
+
+static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem) */
+ BUG_ON(xd->refcnt);
+
+ unload_xattr_datum(c, xd);
+ if (xd->node) {
+ delete_xattr_datum_node(c, xd);
+ xd->node = NULL;
+ }
+ jffs2_free_xattr_datum(xd);
+}
+
+static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_eraseblock *jeb;
+ struct jffs2_raw_xattr rx;
+ size_t readlen;
+ uint32_t crc, totlen;
+ int rc;
+
+ BUG_ON(!xd->node);
+ BUG_ON(ref_flags(xd->node) != REF_UNCHECKED);
+
+ rc = jffs2_flash_read(c, ref_offset(xd->node), sizeof(rx), &readlen, (char *)&rx);
+ if (rc || readlen != sizeof(rx)) {
+ JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
+ rc, sizeof(rx), readlen, ref_offset(xd->node));
+ return rc ? rc : -EIO;
+ }
+ crc = crc32(0, &rx, sizeof(rx) - 4);
+ if (crc != je32_to_cpu(rx.node_crc)) {
+ if (je32_to_cpu(rx.node_crc) != 0xffffffff)
+ JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ ref_offset(xd->node), je32_to_cpu(rx.hdr_crc), crc);
+ return EIO;
+ }
+ totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
+ if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
+ || je16_to_cpu(rx.nodetype) != JFFS2_NODETYPE_XATTR
+ || je32_to_cpu(rx.totlen) != totlen
+ || je32_to_cpu(rx.xid) != xd->xid
+ || je32_to_cpu(rx.version) != xd->version) {
+ JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, "
+ "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n",
+ ref_offset(xd->node), je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK,
+ je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR,
+ je32_to_cpu(rx.totlen), totlen,
+ je32_to_cpu(rx.xid), xd->xid,
+ je32_to_cpu(rx.version), xd->version);
+ return EIO;
+ }
+ xd->xprefix = rx.xprefix;
+ xd->name_len = rx.name_len;
+ xd->value_len = je16_to_cpu(rx.value_len);
+ xd->data_crc = je32_to_cpu(rx.data_crc);
+
+ /* This JFFS2_NODETYPE_XATTR node is checked */
+ jeb = &c->blocks[ref_offset(xd->node) / c->sector_size];
+ totlen = PAD(je32_to_cpu(rx.totlen));
+
+ spin_lock(&c->erase_completion_lock);
+ c->unchecked_size -= totlen; c->used_size += totlen;
+ jeb->unchecked_size -= totlen; jeb->used_size += totlen;
+ xd->node->flash_offset = ref_offset(xd->node) | REF_PRISTINE;
+ spin_unlock(&c->erase_completion_lock);
+
+ /* unchecked xdatum is chained with c->xattr_unchecked */
+ list_del_init(&xd->xindex);
+
+ dbg_xattr("success on verfying xdatum (xid=%u, version=%u)\n",
+ xd->xid, xd->version);
+
+ return 0;
+}
+
+static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem) */
+ char *data;
+ size_t readlen;
+ uint32_t crc, length;
+ int i, ret, retry = 0;
+
+ BUG_ON(!xd->node);
+ BUG_ON(ref_flags(xd->node) != REF_PRISTINE);
+ BUG_ON(!list_empty(&xd->xindex));
+ retry:
+ length = xd->name_len + 1 + xd->value_len;
+ data = kmalloc(length, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = jffs2_flash_read(c, ref_offset(xd->node)+sizeof(struct jffs2_raw_xattr),
+ length, &readlen, data);
+
+ if (ret || length!=readlen) {
+ JFFS2_WARNING("jffs2_flash_read() returned %d, request=%d, readlen=%zu, at %#08x\n",
+ ret, length, readlen, ref_offset(xd->node));
+ kfree(data);
+ return ret ? ret : -EIO;
+ }
+
+ data[xd->name_len] = '\0';
+ crc = crc32(0, data, length);
+ if (crc != xd->data_crc) {
+ JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XREF)"
+ " at %#08x, read: 0x%08x calculated: 0x%08x\n",
+ ref_offset(xd->node), xd->data_crc, crc);
+ kfree(data);
+ return EIO;
+ }
+
+ xd->flags |= JFFS2_XFLAGS_HOT;
+ xd->xname = data;
+ xd->xvalue = data + xd->name_len+1;
+
+ c->xdatum_mem_usage += length;
+
+ xd->hashkey = xattr_datum_hashkey(xd->xprefix, xd->xname, xd->xvalue, xd->value_len);
+ i = xd->hashkey % XATTRINDEX_HASHSIZE;
+ list_add(&xd->xindex, &c->xattrindex[i]);
+ if (!retry) {
+ retry = 1;
+ reclaim_xattr_datum(c);
+ if (!xd->xname)
+ goto retry;
+ }
+
+ dbg_xattr("success on loading xdatum (xid=%u, xprefix=%u, xname='%s')\n",
+ xd->xid, xd->xprefix, xd->xname);
+
+ return 0;
+}
+
+static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem);
+ * rc < 0 : recoverable error, try again
+ * rc = 0 : success
+ * rc > 0 : Unrecoverable error, this node should be deleted.
+ */
+ int rc = 0;
+ BUG_ON(xd->xname);
+ if (!xd->node)
+ return EIO;
+ if (unlikely(ref_flags(xd->node) != REF_PRISTINE)) {
+ rc = do_verify_xattr_datum(c, xd);
+ if (rc > 0) {
+ list_del_init(&xd->xindex);
+ delete_xattr_datum_node(c, xd);
+ }
+ }
+ if (!rc)
+ rc = do_load_xattr_datum(c, xd);
+ return rc;
+}
+
+static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_raw_node_ref *raw;
+ struct jffs2_raw_xattr rx;
+ struct kvec vecs[2];
+ size_t length;
+ int rc, totlen;
+ uint32_t phys_ofs = write_ofs(c);
+
+ BUG_ON(!xd->xname);
+
+ vecs[0].iov_base = &rx;
+ vecs[0].iov_len = PAD(sizeof(rx));
+ vecs[1].iov_base = xd->xname;
+ vecs[1].iov_len = xd->name_len + 1 + xd->value_len;
+ totlen = vecs[0].iov_len + vecs[1].iov_len;
+
+ /* Setup raw-xattr */
+ rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR);
+ rx.totlen = cpu_to_je32(PAD(totlen));
+ rx.hdr_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_unknown_node) - 4));
+
+ rx.xid = cpu_to_je32(xd->xid);
+ rx.version = cpu_to_je32(++xd->version);
+ rx.xprefix = xd->xprefix;
+ rx.name_len = xd->name_len;
+ rx.value_len = cpu_to_je16(xd->value_len);
+ rx.data_crc = cpu_to_je32(crc32(0, vecs[1].iov_base, vecs[1].iov_len));
+ rx.node_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_raw_xattr) - 4));
+
+ rc = jffs2_flash_writev(c, vecs, 2, phys_ofs, &length, 0);
+ if (rc || totlen != length) {
+ JFFS2_WARNING("jffs2_flash_writev()=%d, req=%u, wrote=%zu, at %#08x\n",
+ rc, totlen, length, phys_ofs);
+ rc = rc ? rc : -EIO;
+ if (length)
+ jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(totlen), NULL);
+
+ return rc;
+ }
+
+ /* success */
+ raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), NULL);
+ /* FIXME */ raw->next_in_ino = (void *)xd;
+
+ if (xd->node)
+ delete_xattr_datum_node(c, xd);
+ xd->node = raw;
+
+ dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n",
+ xd->xid, xd->version, xd->xprefix, xd->xname);
+
+ return 0;
+}
+
+static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
+ int xprefix, const char *xname,
+ const char *xvalue, int xsize)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_xattr_datum *xd;
+ uint32_t hashkey, name_len;
+ char *data;
+ int i, rc;
+
+ /* Search xattr_datum has same xname/xvalue by index */
+ hashkey = xattr_datum_hashkey(xprefix, xname, xvalue, xsize);
+ i = hashkey % XATTRINDEX_HASHSIZE;
+ list_for_each_entry(xd, &c->xattrindex[i], xindex) {
+ if (xd->hashkey==hashkey
+ && xd->xprefix==xprefix
+ && xd->value_len==xsize
+ && !strcmp(xd->xname, xname)
+ && !memcmp(xd->xvalue, xvalue, xsize)) {
+ xd->refcnt++;
+ return xd;
+ }
+ }
+
+ /* Not found, Create NEW XATTR-Cache */
+ name_len = strlen(xname);
+
+ xd = jffs2_alloc_xattr_datum();
+ if (!xd)
+ return ERR_PTR(-ENOMEM);
+
+ data = kmalloc(name_len + 1 + xsize, GFP_KERNEL);
+ if (!data) {
+ jffs2_free_xattr_datum(xd);
+ return ERR_PTR(-ENOMEM);
+ }
+ strcpy(data, xname);
+ memcpy(data + name_len + 1, xvalue, xsize);
+
+ xd->refcnt = 1;
+ xd->xid = ++c->highest_xid;
+ xd->flags |= JFFS2_XFLAGS_HOT;
+ xd->xprefix = xprefix;
+
+ xd->hashkey = hashkey;
+ xd->xname = data;
+ xd->xvalue = data + name_len + 1;
+ xd->name_len = name_len;
+ xd->value_len = xsize;
+ xd->data_crc = crc32(0, data, xd->name_len + 1 + xd->value_len);
+
+ rc = save_xattr_datum(c, xd);
+ if (rc) {
+ kfree(xd->xname);
+ jffs2_free_xattr_datum(xd);
+ return ERR_PTR(rc);
+ }
+
+ /* Insert Hash Index */
+ i = hashkey % XATTRINDEX_HASHSIZE;
+ list_add(&xd->xindex, &c->xattrindex[i]);
+
+ c->xdatum_mem_usage += (xd->name_len + 1 + xd->value_len);
+ reclaim_xattr_datum(c);
+
+ return xd;
+}
+
+/* -------- xref related functions ------------------
+ * verify_xattr_ref(c, ref)
+ * is used to load xref information from medium. Because summary data does not
+ * contain xid/ino, it's necessary to verify once while mounting process.
+ * delete_xattr_ref_node(c, ref)
+ * is used to delete a jffs2 node is dominated by xref. When EBS is enabled,
+ * it overwrites the obsolete node by myself.
+ * delete_xattr_ref(c, ref)
+ * is used to delete jffs2_xattr_ref object. If the reference counter of xdatum
+ * is refered by this xref become 0, delete_xattr_datum() is called later.
+ * save_xattr_ref(c, ref)
+ * is used to write xref to medium.
+ * create_xattr_ref(c, ic, xd)
+ * is used to create a new xref and write to medium.
+ * jffs2_xattr_delete_inode(c, ic)
+ * is called to remove xrefs related to obsolete inode when inode is unlinked.
+ * jffs2_xattr_free_inode(c, ic)
+ * is called to release xattr related objects when unmounting.
+ * check_xattr_ref_inode(c, ic)
+ * is used to confirm inode does not have duplicate xattr name/value pair.
+ * -------------------------------------------------- */
+static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+{
+ struct jffs2_eraseblock *jeb;
+ struct jffs2_raw_xref rr;
+ size_t readlen;
+ uint32_t crc, totlen;
+ int rc;
+
+ BUG_ON(ref_flags(ref->node) != REF_UNCHECKED);
+
+ rc = jffs2_flash_read(c, ref_offset(ref->node), sizeof(rr), &readlen, (char *)&rr);
+ if (rc || sizeof(rr) != readlen) {
+ JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n",
+ rc, sizeof(rr), readlen, ref_offset(ref->node));
+ return rc ? rc : -EIO;
+ }
+ /* obsolete node */
+ crc = crc32(0, &rr, sizeof(rr) - 4);
+ if (crc != je32_to_cpu(rr.node_crc)) {
+ if (je32_to_cpu(rr.node_crc) != 0xffffffff)
+ JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ ref_offset(ref->node), je32_to_cpu(rr.node_crc), crc);
+ return EIO;
+ }
+ if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
+ || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
+ || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) {
+ JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, "
+ "nodetype=%#04x/%#04x, totlen=%u/%zu\n",
+ ref_offset(ref->node), je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
+ je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
+ je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
+ return EIO;
+ }
+ ref->ino = je32_to_cpu(rr.ino);
+ ref->xid = je32_to_cpu(rr.xid);
+
+ /* fixup superblock/eraseblock info */
+ jeb = &c->blocks[ref_offset(ref->node) / c->sector_size];
+ totlen = PAD(sizeof(rr));
+
+ spin_lock(&c->erase_completion_lock);
+ c->unchecked_size -= totlen; c->used_size += totlen;
+ jeb->unchecked_size -= totlen; jeb->used_size += totlen;
+ ref->node->flash_offset = ref_offset(ref->node) | REF_PRISTINE;
+ spin_unlock(&c->erase_completion_lock);
+
+ dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n",
+ ref->ino, ref->xid, ref_offset(ref->node));
+ return 0;
+}
+
+static void delete_xattr_ref_node(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+{
+ struct jffs2_raw_xref rr;
+ size_t length;
+ int rc;
+
+ if (jffs2_sum_active()) {
+ memset(&rr, 0xff, sizeof(rr));
+ rc = jffs2_flash_read(c, ref_offset(ref->node),
+ sizeof(struct jffs2_unknown_node),
+ &length, (char *)&rr);
+ if (rc || length != sizeof(struct jffs2_unknown_node)) {
+ JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
+ rc, sizeof(struct jffs2_unknown_node),
+ length, ref_offset(ref->node));
+ }
+ rc = jffs2_flash_write(c, ref_offset(ref->node), sizeof(rr),
+ &length, (char *)&rr);
+ if (rc || length != sizeof(struct jffs2_raw_xref)) {
+ JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu at %#08x\n",
+ rc, sizeof(rr), length, ref_offset(ref->node));
+ }
+ }
+ spin_lock(&c->erase_completion_lock);
+ ref->node->next_in_ino = NULL;
+ spin_unlock(&c->erase_completion_lock);
+ jffs2_mark_node_obsolete(c, ref->node);
+ ref->node = NULL;
+}
+
+static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_xattr_datum *xd;
+
+ BUG_ON(!ref->node);
+ delete_xattr_ref_node(c, ref);
+
+ xd = ref->xd;
+ xd->refcnt--;
+ if (!xd->refcnt)
+ delete_xattr_datum(c, xd);
+ jffs2_free_xattr_ref(ref);
+}
+
+static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_raw_node_ref *raw;
+ struct jffs2_raw_xref rr;
+ size_t length;
+ uint32_t phys_ofs = write_ofs(c);
+ int ret;
+
+ rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rr.nodetype = cpu_to_je16(JFFS2_NODETYPE_XREF);
+ rr.totlen = cpu_to_je32(PAD(sizeof(rr)));
+ rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4));
+
+ rr.ino = cpu_to_je32(ref->ic->ino);
+ rr.xid = cpu_to_je32(ref->xd->xid);
+ rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4));
+
+ ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr);
+ if (ret || sizeof(rr) != length) {
+ JFFS2_WARNING("jffs2_flash_write() returned %d, request=%zu, retlen=%zu, at %#08x\n",
+ ret, sizeof(rr), length, phys_ofs);
+ ret = ret ? ret : -EIO;
+ if (length)
+ jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(sizeof(rr)), NULL);
+
+ return ret;
+ }
+
+ raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), NULL);
+ /* FIXME */ raw->next_in_ino = (void *)ref;
+ if (ref->node)
+ delete_xattr_ref_node(c, ref);
+ ref->node = raw;
+
+ dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid);
+
+ return 0;
+}
+
+static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic,
+ struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_xattr_ref *ref;
+ int ret;
+
+ ref = jffs2_alloc_xattr_ref();
+ if (!ref)
+ return ERR_PTR(-ENOMEM);
+ ref->ic = ic;
+ ref->xd = xd;
+
+ ret = save_xattr_ref(c, ref);
+ if (ret) {
+ jffs2_free_xattr_ref(ref);
+ return ERR_PTR(ret);
+ }
+
+ /* Chain to inode */
+ ref->next = ic->xref;
+ ic->xref = ref;
+
+ return ref; /* success */
+}
+
+void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+ /* It's called from jffs2_clear_inode() on inode removing.
+ When an inode with XATTR is removed, those XATTRs must be removed. */
+ struct jffs2_xattr_ref *ref, *_ref;
+
+ if (!ic || ic->nlink > 0)
+ return;
+
+ down_write(&c->xattr_sem);
+ for (ref = ic->xref; ref; ref = _ref) {
+ _ref = ref->next;
+ delete_xattr_ref(c, ref);
+ }
+ ic->xref = NULL;
+ up_write(&c->xattr_sem);
+}
+
+void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+ /* It's called from jffs2_free_ino_caches() until unmounting FS. */
+ struct jffs2_xattr_datum *xd;
+ struct jffs2_xattr_ref *ref, *_ref;
+
+ down_write(&c->xattr_sem);
+ for (ref = ic->xref; ref; ref = _ref) {
+ _ref = ref->next;
+ xd = ref->xd;
+ xd->refcnt--;
+ if (!xd->refcnt) {
+ unload_xattr_datum(c, xd);
+ jffs2_free_xattr_datum(xd);
+ }
+ jffs2_free_xattr_ref(ref);
+ }
+ ic->xref = NULL;
+ up_write(&c->xattr_sem);
+}
+
+static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+ /* success of check_xattr_ref_inode() means taht inode (ic) dose not have
+ * duplicate name/value pairs. If duplicate name/value pair would be found,
+ * one will be removed.
+ */
+ struct jffs2_xattr_ref *ref, *cmp, **pref;
+ int rc = 0;
+
+ if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED))
+ return 0;
+ down_write(&c->xattr_sem);
+ retry:
+ rc = 0;
+ for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
+ if (!ref->xd->xname) {
+ rc = load_xattr_datum(c, ref->xd);
+ if (unlikely(rc > 0)) {
+ *pref = ref->next;
+ delete_xattr_ref(c, ref);
+ goto retry;
+ } else if (unlikely(rc < 0))
+ goto out;
+ }
+ for (cmp=ref->next, pref=&ref->next; cmp; pref=&cmp->next, cmp=cmp->next) {
+ if (!cmp->xd->xname) {
+ ref->xd->flags |= JFFS2_XFLAGS_BIND;
+ rc = load_xattr_datum(c, cmp->xd);
+ ref->xd->flags &= ~JFFS2_XFLAGS_BIND;
+ if (unlikely(rc > 0)) {
+ *pref = cmp->next;
+ delete_xattr_ref(c, cmp);
+ goto retry;
+ } else if (unlikely(rc < 0))
+ goto out;
+ }
+ if (ref->xd->xprefix == cmp->xd->xprefix
+ && !strcmp(ref->xd->xname, cmp->xd->xname)) {
+ *pref = cmp->next;
+ delete_xattr_ref(c, cmp);
+ goto retry;
+ }
+ }
+ }
+ ic->flags |= INO_FLAGS_XATTR_CHECKED;
+ out:
+ up_write(&c->xattr_sem);
+
+ return rc;
+}
+
+/* -------- xattr subsystem functions ---------------
+ * jffs2_init_xattr_subsystem(c)
+ * is used to initialize semaphore and list_head, and some variables.
+ * jffs2_find_xattr_datum(c, xid)
+ * is used to lookup xdatum while scanning process.
+ * jffs2_clear_xattr_subsystem(c)
+ * is used to release any xattr related objects.
+ * jffs2_build_xattr_subsystem(c)
+ * is used to associate xdatum and xref while super block building process.
+ * jffs2_setup_xattr_datum(c, xid, version)
+ * is used to insert xdatum while scanning process.
+ * -------------------------------------------------- */
+void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c)
+{
+ int i;
+
+ for (i=0; i < XATTRINDEX_HASHSIZE; i++)
+ INIT_LIST_HEAD(&c->xattrindex[i]);
+ INIT_LIST_HEAD(&c->xattr_unchecked);
+ c->xref_temp = NULL;
+
+ init_rwsem(&c->xattr_sem);
+ c->xdatum_mem_usage = 0;
+ c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */
+}
+
+static struct jffs2_xattr_datum *jffs2_find_xattr_datum(struct jffs2_sb_info *c, uint32_t xid)
+{
+ struct jffs2_xattr_datum *xd;
+ int i = xid % XATTRINDEX_HASHSIZE;
+
+ /* It's only used in scanning/building process. */
+ BUG_ON(!(c->flags & (JFFS2_SB_FLAG_SCANNING|JFFS2_SB_FLAG_BUILDING)));
+
+ list_for_each_entry(xd, &c->xattrindex[i], xindex) {
+ if (xd->xid==xid)
+ return xd;
+ }
+ return NULL;
+}
+
+void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
+{
+ struct jffs2_xattr_datum *xd, *_xd;
+ struct jffs2_xattr_ref *ref, *_ref;
+ int i;
+
+ for (ref=c->xref_temp; ref; ref = _ref) {
+ _ref = ref->next;
+ jffs2_free_xattr_ref(ref);
+ }
+ c->xref_temp = NULL;
+
+ for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
+ list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
+ list_del(&xd->xindex);
+ if (xd->xname)
+ kfree(xd->xname);
+ jffs2_free_xattr_datum(xd);
+ }
+ }
+}
+
+void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
+{
+ struct jffs2_xattr_ref *ref, *_ref;
+ struct jffs2_xattr_datum *xd, *_xd;
+ struct jffs2_inode_cache *ic;
+ int i, xdatum_count =0, xdatum_unchecked_count = 0, xref_count = 0;
+
+ BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
+
+ /* Phase.1 */
+ for (ref=c->xref_temp; ref; ref=_ref) {
+ _ref = ref->next;
+ /* checking REF_UNCHECKED nodes */
+ if (ref_flags(ref->node) != REF_PRISTINE) {
+ if (verify_xattr_ref(c, ref)) {
+ delete_xattr_ref_node(c, ref);
+ jffs2_free_xattr_ref(ref);
+ continue;
+ }
+ }
+ /* At this point, ref->xid and ref->ino contain XID and inode number.
+ ref->xd and ref->ic are not valid yet. */
+ xd = jffs2_find_xattr_datum(c, ref->xid);
+ ic = jffs2_get_ino_cache(c, ref->ino);
+ if (!xd || !ic) {
+ if (ref_flags(ref->node) != REF_UNCHECKED)
+ JFFS2_WARNING("xref(ino=%u, xid=%u) is orphan. \n",
+ ref->ino, ref->xid);
+ delete_xattr_ref_node(c, ref);
+ jffs2_free_xattr_ref(ref);
+ continue;
+ }
+ ref->xd = xd;
+ ref->ic = ic;
+ xd->refcnt++;
+ ref->next = ic->xref;
+ ic->xref = ref;
+ xref_count++;
+ }
+ c->xref_temp = NULL;
+ /* After this, ref->xid/ino are NEVER used. */
+
+ /* Phase.2 */
+ for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
+ list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
+ list_del_init(&xd->xindex);
+ if (!xd->refcnt) {
+ if (ref_flags(xd->node) != REF_UNCHECKED)
+ JFFS2_WARNING("orphan xdatum(xid=%u, version=%u) at %#08x\n",
+ xd->xid, xd->version, ref_offset(xd->node));
+ delete_xattr_datum(c, xd);
+ continue;
+ }
+ if (ref_flags(xd->node) != REF_PRISTINE) {
+ dbg_xattr("unchecked xdatum(xid=%u) at %#08x\n",
+ xd->xid, ref_offset(xd->node));
+ list_add(&xd->xindex, &c->xattr_unchecked);
+ xdatum_unchecked_count++;
+ }
+ xdatum_count++;
+ }
+ }
+ /* build complete */
+ JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum (%u unchecked) and "
+ "%u of xref found.\n", xdatum_count, xdatum_unchecked_count, xref_count);
+}
+
+struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
+ uint32_t xid, uint32_t version)
+{
+ struct jffs2_xattr_datum *xd, *_xd;
+
+ _xd = jffs2_find_xattr_datum(c, xid);
+ if (_xd) {
+ dbg_xattr("duplicate xdatum (xid=%u, version=%u/%u) at %#08x\n",
+ xid, version, _xd->version, ref_offset(_xd->node));
+ if (version < _xd->version)
+ return ERR_PTR(-EEXIST);
+ }
+ xd = jffs2_alloc_xattr_datum();
+ if (!xd)
+ return ERR_PTR(-ENOMEM);
+ xd->xid = xid;
+ xd->version = version;
+ if (xd->xid > c->highest_xid)
+ c->highest_xid = xd->xid;
+ list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]);
+
+ if (_xd) {
+ list_del_init(&_xd->xindex);
+ delete_xattr_datum_node(c, _xd);
+ jffs2_free_xattr_datum(_xd);
+ }
+ return xd;
+}
+
+/* -------- xattr subsystem functions ---------------
+ * xprefix_to_handler(xprefix)
+ * is used to translate xprefix into xattr_handler.
+ * jffs2_listxattr(dentry, buffer, size)
+ * is an implementation of listxattr handler on jffs2.
+ * do_jffs2_getxattr(inode, xprefix, xname, buffer, size)
+ * is an implementation of getxattr handler on jffs2.
+ * do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags)
+ * is an implementation of setxattr handler on jffs2.
+ * -------------------------------------------------- */
+struct xattr_handler *jffs2_xattr_handlers[] = {
+ &jffs2_user_xattr_handler,
+#ifdef CONFIG_JFFS2_FS_SECURITY
+ &jffs2_security_xattr_handler,
+#endif
+#ifdef CONFIG_JFFS2_FS_POSIX_ACL
+ &jffs2_acl_access_xattr_handler,
+ &jffs2_acl_default_xattr_handler,
+#endif
+ &jffs2_trusted_xattr_handler,
+ NULL
+};
+
+static struct xattr_handler *xprefix_to_handler(int xprefix) {
+ struct xattr_handler *ret;
+
+ switch (xprefix) {
+ case JFFS2_XPREFIX_USER:
+ ret = &jffs2_user_xattr_handler;
+ break;
+#ifdef CONFIG_JFFS2_FS_SECURITY
+ case JFFS2_XPREFIX_SECURITY:
+ ret = &jffs2_security_xattr_handler;
+ break;
+#endif
+#ifdef CONFIG_JFFS2_FS_POSIX_ACL
+ case JFFS2_XPREFIX_ACL_ACCESS:
+ ret = &jffs2_acl_access_xattr_handler;
+ break;
+ case JFFS2_XPREFIX_ACL_DEFAULT:
+ ret = &jffs2_acl_default_xattr_handler;
+ break;
+#endif
+ case JFFS2_XPREFIX_TRUSTED:
+ ret = &jffs2_trusted_xattr_handler;
+ break;
+ default:
+ ret = NULL;
+ break;
+ }
+ return ret;
+}
+
+ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ struct jffs2_inode_cache *ic = f->inocache;
+ struct jffs2_xattr_ref *ref, **pref;
+ struct jffs2_xattr_datum *xd;
+ struct xattr_handler *xhandle;
+ ssize_t len, rc;
+ int retry = 0;
+
+ rc = check_xattr_ref_inode(c, ic);
+ if (unlikely(rc))
+ return rc;
+
+ down_read(&c->xattr_sem);
+ retry:
+ len = 0;
+ for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
+ BUG_ON(ref->ic != ic);
+ xd = ref->xd;
+ if (!xd->xname) {
+ /* xdatum is unchached */
+ if (!retry) {
+ retry = 1;
+ up_read(&c->xattr_sem);
+ down_write(&c->xattr_sem);
+ goto retry;
+ } else {
+ rc = load_xattr_datum(c, xd);
+ if (unlikely(rc > 0)) {
+ *pref = ref->next;
+ delete_xattr_ref(c, ref);
+ goto retry;
+ } else if (unlikely(rc < 0))
+ goto out;
+ }
+ }
+ xhandle = xprefix_to_handler(xd->xprefix);
+ if (!xhandle)
+ continue;
+ if (buffer) {
+ rc = xhandle->list(inode, buffer+len, size-len, xd->xname, xd->name_len);
+ } else {
+ rc = xhandle->list(inode, NULL, 0, xd->xname, xd->name_len);
+ }
+ if (rc < 0)
+ goto out;
+ len += rc;
+ }
+ rc = len;
+ out:
+ if (!retry) {
+ up_read(&c->xattr_sem);
+ } else {
+ up_write(&c->xattr_sem);
+ }
+ return rc;
+}
+
+int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
+ char *buffer, size_t size)
+{
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ struct jffs2_inode_cache *ic = f->inocache;
+ struct jffs2_xattr_datum *xd;
+ struct jffs2_xattr_ref *ref, **pref;
+ int rc, retry = 0;
+
+ rc = check_xattr_ref_inode(c, ic);
+ if (unlikely(rc))
+ return rc;
+
+ down_read(&c->xattr_sem);
+ retry:
+ for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
+ BUG_ON(ref->ic!=ic);
+
+ xd = ref->xd;
+ if (xd->xprefix != xprefix)
+ continue;
+ if (!xd->xname) {
+ /* xdatum is unchached */
+ if (!retry) {
+ retry = 1;
+ up_read(&c->xattr_sem);
+ down_write(&c->xattr_sem);
+ goto retry;
+ } else {
+ rc = load_xattr_datum(c, xd);
+ if (unlikely(rc > 0)) {
+ *pref = ref->next;
+ delete_xattr_ref(c, ref);
+ goto retry;
+ } else if (unlikely(rc < 0)) {
+ goto out;
+ }
+ }
+ }
+ if (!strcmp(xname, xd->xname)) {
+ rc = xd->value_len;
+ if (buffer) {
+ if (size < rc) {
+ rc = -ERANGE;
+ } else {
+ memcpy(buffer, xd->xvalue, rc);
+ }
+ }
+ goto out;
+ }
+ }
+ rc = -ENODATA;
+ out:
+ if (!retry) {
+ up_read(&c->xattr_sem);
+ } else {
+ up_write(&c->xattr_sem);
+ }
+ return rc;
+}
+
+int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
+ const char *buffer, size_t size, int flags)
+{
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ struct jffs2_inode_cache *ic = f->inocache;
+ struct jffs2_xattr_datum *xd;
+ struct jffs2_xattr_ref *ref, *newref, **pref;
+ uint32_t length, request;
+ int rc;
+
+ rc = check_xattr_ref_inode(c, ic);
+ if (unlikely(rc))
+ return rc;
+
+ request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
+ rc = jffs2_reserve_space(c, request, &length,
+ ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
+ if (rc) {
+ JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
+ return rc;
+ }
+
+ /* Find existing xattr */
+ down_write(&c->xattr_sem);
+ retry:
+ for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
+ xd = ref->xd;
+ if (xd->xprefix != xprefix)
+ continue;
+ if (!xd->xname) {
+ rc = load_xattr_datum(c, xd);
+ if (unlikely(rc > 0)) {
+ *pref = ref->next;
+ delete_xattr_ref(c, ref);
+ goto retry;
+ } else if (unlikely(rc < 0))
+ goto out;
+ }
+ if (!strcmp(xd->xname, xname)) {
+ if (flags & XATTR_CREATE) {
+ rc = -EEXIST;
+ goto out;
+ }
+ if (!buffer) {
+ *pref = ref->next;
+ delete_xattr_ref(c, ref);
+ rc = 0;
+ goto out;
+ }
+ goto found;
+ }
+ }
+ /* not found */
+ if (flags & XATTR_REPLACE) {
+ rc = -ENODATA;
+ goto out;
+ }
+ if (!buffer) {
+ rc = -EINVAL;
+ goto out;
+ }
+ found:
+ xd = create_xattr_datum(c, xprefix, xname, buffer, size);
+ if (IS_ERR(xd)) {
+ rc = PTR_ERR(xd);
+ goto out;
+ }
+ up_write(&c->xattr_sem);
+ jffs2_complete_reservation(c);
+
+ /* create xattr_ref */
+ request = PAD(sizeof(struct jffs2_raw_xref));
+ rc = jffs2_reserve_space(c, request, &length,
+ ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE);
+ if (rc) {
+ JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
+ down_write(&c->xattr_sem);
+ xd->refcnt--;
+ if (!xd->refcnt)
+ delete_xattr_datum(c, xd);
+ up_write(&c->xattr_sem);
+ return rc;
+ }
+ down_write(&c->xattr_sem);
+ if (ref)
+ *pref = ref->next;
+ newref = create_xattr_ref(c, ic, xd);
+ if (IS_ERR(newref)) {
+ if (ref) {
+ ref->next = ic->xref;
+ ic->xref = ref;
+ }
+ rc = PTR_ERR(newref);
+ xd->refcnt--;
+ if (!xd->refcnt)
+ delete_xattr_datum(c, xd);
+ } else if (ref) {
+ delete_xattr_ref(c, ref);
+ }
+ out:
+ up_write(&c->xattr_sem);
+ jffs2_complete_reservation(c);
+ return rc;
+}
+
+/* -------- garbage collector functions -------------
+ * jffs2_garbage_collect_xattr_datum(c, xd)
+ * is used to move xdatum into new node.
+ * jffs2_garbage_collect_xattr_ref(c, ref)
+ * is used to move xref into new node.
+ * jffs2_verify_xattr(c)
+ * is used to call do_verify_xattr_datum() before garbage collecting.
+ * -------------------------------------------------- */
+int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ uint32_t totlen, length, old_ofs;
+ int rc = -EINVAL;
+
+ down_write(&c->xattr_sem);
+ BUG_ON(!xd->node);
+
+ old_ofs = ref_offset(xd->node);
+ totlen = ref_totlen(c, c->gcblock, xd->node);
+ if (totlen < sizeof(struct jffs2_raw_xattr))
+ goto out;
+
+ if (!xd->xname) {
+ rc = load_xattr_datum(c, xd);
+ if (unlikely(rc > 0)) {
+ delete_xattr_datum_node(c, xd);
+ rc = 0;
+ goto out;
+ } else if (unlikely(rc < 0))
+ goto out;
+ }
+ rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE);
+ if (rc || length < totlen) {
+ JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, totlen);
+ rc = rc ? rc : -EBADFD;
+ goto out;
+ }
+ rc = save_xattr_datum(c, xd);
+ if (!rc)
+ dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n",
+ xd->xid, xd->version, old_ofs, ref_offset(xd->node));
+ out:
+ up_write(&c->xattr_sem);
+ return rc;
+}
+
+
+int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+{
+ uint32_t totlen, length, old_ofs;
+ int rc = -EINVAL;
+
+ down_write(&c->xattr_sem);
+ BUG_ON(!ref->node);
+
+ old_ofs = ref_offset(ref->node);
+ totlen = ref_totlen(c, c->gcblock, ref->node);
+ if (totlen != sizeof(struct jffs2_raw_xref))
+ goto out;
+
+ rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE);
+ if (rc || length < totlen) {
+ JFFS2_WARNING("%s: jffs2_reserve_space() = %d, request = %u\n",
+ __FUNCTION__, rc, totlen);
+ rc = rc ? rc : -EBADFD;
+ goto out;
+ }
+ rc = save_xattr_ref(c, ref);
+ if (!rc)
+ dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n",
+ ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node));
+ out:
+ up_write(&c->xattr_sem);
+ return rc;
+}
+
+int jffs2_verify_xattr(struct jffs2_sb_info *c)
+{
+ struct jffs2_xattr_datum *xd, *_xd;
+ int rc;
+
+ down_write(&c->xattr_sem);
+ list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) {
+ rc = do_verify_xattr_datum(c, xd);
+ if (rc == 0) {
+ list_del_init(&xd->xindex);
+ break;
+ } else if (rc > 0) {
+ list_del_init(&xd->xindex);
+ delete_xattr_datum_node(c, xd);
+ }
+ }
+ up_write(&c->xattr_sem);
+
+ return list_empty(&c->xattr_unchecked) ? 1 : 0;
+}
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
new file mode 100644
index 00000000000..2c199856c58
--- /dev/null
+++ b/fs/jffs2/xattr.h
@@ -0,0 +1,116 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2006 NEC Corporation
+ *
+ * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ */
+#ifndef _JFFS2_FS_XATTR_H_
+#define _JFFS2_FS_XATTR_H_
+
+#include <linux/xattr.h>
+#include <linux/list.h>
+
+#define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */
+#define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */
+
+struct jffs2_xattr_datum
+{
+ void *always_null;
+ struct jffs2_raw_node_ref *node;
+ uint8_t class;
+ uint8_t flags;
+ uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */
+
+ struct list_head xindex; /* chained from c->xattrindex[n] */
+ uint32_t refcnt; /* # of xattr_ref refers this */
+ uint32_t xid;
+ uint32_t version;
+
+ uint32_t data_crc;
+ uint32_t hashkey;
+ char *xname; /* XATTR name without prefix */
+ uint32_t name_len; /* length of xname */
+ char *xvalue; /* XATTR value */
+ uint32_t value_len; /* length of xvalue */
+};
+
+struct jffs2_inode_cache;
+struct jffs2_xattr_ref
+{
+ void *always_null;
+ struct jffs2_raw_node_ref *node;
+ uint8_t class;
+ uint8_t flags; /* Currently unused */
+ u16 unused;
+
+ union {
+ struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */
+ uint32_t ino; /* only used in scanning/building */
+ };
+ union {
+ struct jffs2_xattr_datum *xd; /* reference to jffs2_xattr_datum */
+ uint32_t xid; /* only used in sccanning/building */
+ };
+ struct jffs2_xattr_ref *next; /* chained from ic->xref_list */
+};
+
+#ifdef CONFIG_JFFS2_FS_XATTR
+
+extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c);
+extern void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c);
+extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c);
+
+extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
+ uint32_t xid, uint32_t version);
+
+extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
+extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
+
+extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
+extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
+extern int jffs2_verify_xattr(struct jffs2_sb_info *c);
+
+extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
+ char *buffer, size_t size);
+extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
+ const char *buffer, size_t size, int flags);
+
+extern struct xattr_handler *jffs2_xattr_handlers[];
+extern struct xattr_handler jffs2_user_xattr_handler;
+extern struct xattr_handler jffs2_trusted_xattr_handler;
+
+extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
+#define jffs2_getxattr generic_getxattr
+#define jffs2_setxattr generic_setxattr
+#define jffs2_removexattr generic_removexattr
+
+#else
+
+#define jffs2_init_xattr_subsystem(c)
+#define jffs2_build_xattr_subsystem(c)
+#define jffs2_clear_xattr_subsystem(c)
+
+#define jffs2_xattr_delete_inode(c, ic)
+#define jffs2_xattr_free_inode(c, ic)
+#define jffs2_verify_xattr(c) (1)
+
+#define jffs2_xattr_handlers NULL
+#define jffs2_listxattr NULL
+#define jffs2_getxattr NULL
+#define jffs2_setxattr NULL
+#define jffs2_removexattr NULL
+
+#endif /* CONFIG_JFFS2_FS_XATTR */
+
+#ifdef CONFIG_JFFS2_FS_SECURITY
+extern int jffs2_init_security(struct inode *inode, struct inode *dir);
+extern struct xattr_handler jffs2_security_xattr_handler;
+#else
+#define jffs2_init_security(inode,dir) (0)
+#endif /* CONFIG_JFFS2_FS_SECURITY */
+
+#endif /* _JFFS2_FS_XATTR_H_ */
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
new file mode 100644
index 00000000000..ed046e19dbf
--- /dev/null
+++ b/fs/jffs2/xattr_trusted.c
@@ -0,0 +1,52 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2006 NEC Corporation
+ *
+ * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/jffs2.h>
+#include <linux/xattr.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+
+static int jffs2_trusted_getxattr(struct inode *inode, const char *name,
+ void *buffer, size_t size)
+{
+ if (!strcmp(name, ""))
+ return -EINVAL;
+ return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size);
+}
+
+static int jffs2_trusted_setxattr(struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags)
+{
+ if (!strcmp(name, ""))
+ return -EINVAL;
+ return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size, flags);
+}
+
+static size_t jffs2_trusted_listxattr(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
+{
+ size_t retlen = XATTR_TRUSTED_PREFIX_LEN + name_len + 1;
+
+ if (list && retlen<=list_size) {
+ strcpy(list, XATTR_TRUSTED_PREFIX);
+ strcpy(list + XATTR_TRUSTED_PREFIX_LEN, name);
+ }
+
+ return retlen;
+}
+
+struct xattr_handler jffs2_trusted_xattr_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .list = jffs2_trusted_listxattr,
+ .set = jffs2_trusted_setxattr,
+ .get = jffs2_trusted_getxattr
+};
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
new file mode 100644
index 00000000000..2f8e9aa01ea
--- /dev/null
+++ b/fs/jffs2/xattr_user.c
@@ -0,0 +1,52 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2006 NEC Corporation
+ *
+ * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/jffs2.h>
+#include <linux/xattr.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+
+static int jffs2_user_getxattr(struct inode *inode, const char *name,
+ void *buffer, size_t size)
+{
+ if (!strcmp(name, ""))
+ return -EINVAL;
+ return do_jffs2_getxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size);
+}
+
+static int jffs2_user_setxattr(struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags)
+{
+ if (!strcmp(name, ""))
+ return -EINVAL;
+ return do_jffs2_setxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size, flags);
+}
+
+static size_t jffs2_user_listxattr(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
+{
+ size_t retlen = XATTR_USER_PREFIX_LEN + name_len + 1;
+
+ if (list && retlen <= list_size) {
+ strcpy(list, XATTR_USER_PREFIX);
+ strcpy(list + XATTR_USER_PREFIX_LEN, name);
+ }
+
+ return retlen;
+}
+
+struct xattr_handler jffs2_user_xattr_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .list = jffs2_user_listxattr,
+ .set = jffs2_user_setxattr,
+ .get = jffs2_user_getxattr
+};
diff --git a/fs/locks.c b/fs/locks.c
index ab61a8b5482..69435c68c1e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2206,63 +2206,6 @@ int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
EXPORT_SYMBOL(lock_may_write);
-static inline void __steal_locks(struct file *file, fl_owner_t from)
-{
- struct inode *inode = file->f_dentry->d_inode;
- struct file_lock *fl = inode->i_flock;
-
- while (fl) {
- if (fl->fl_file == file && fl->fl_owner == from)
- fl->fl_owner = current->files;
- fl = fl->fl_next;
- }
-}
-
-/* When getting ready for executing a binary, we make sure that current
- * has a files_struct on its own. Before dropping the old files_struct,
- * we take over ownership of all locks for all file descriptors we own.
- * Note that we may accidentally steal a lock for a file that a sibling
- * has created since the unshare_files() call.
- */
-void steal_locks(fl_owner_t from)
-{
- struct files_struct *files = current->files;
- int i, j;
- struct fdtable *fdt;
-
- if (from == files)
- return;
-
- lock_kernel();
- j = 0;
-
- /*
- * We are not taking a ref to the file structures, so
- * we need to acquire ->file_lock.
- */
- spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- for (;;) {
- unsigned long set;
- i = j * __NFDBITS;
- if (i >= fdt->max_fdset || i >= fdt->max_fds)
- break;
- set = fdt->open_fds->fds_bits[j++];
- while (set) {
- if (set & 1) {
- struct file *file = fdt->fd[i];
- if (file)
- __steal_locks(file, from);
- }
- i++;
- set >>= 1;
- }
- }
- spin_unlock(&files->file_lock);
- unlock_kernel();
-}
-EXPORT_SYMBOL(steal_locks);
-
static int __init filelock_init(void)
{
filelock_cache = kmem_cache_create("file_lock_cache",
diff --git a/fs/namei.c b/fs/namei.c
index d6e2ee25173..184fe4acf82 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1127,7 +1127,7 @@ out:
if (likely(retval == 0)) {
if (unlikely(current->audit_context && nd && nd->dentry &&
nd->dentry->d_inode))
- audit_inode(name, nd->dentry->d_inode, flags);
+ audit_inode(name, nd->dentry->d_inode);
}
out_fail:
return retval;
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index c63a83e8da9..36e1e136bb0 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1484,14 +1484,15 @@ static inline void ntfs_flush_dcache_pages(struct page **pages,
unsigned nr_pages)
{
BUG_ON(!nr_pages);
+ /*
+ * Warning: Do not do the decrement at the same time as the call to
+ * flush_dcache_page() because it is a NULL macro on i386 and hence the
+ * decrement never happens so the loop never terminates.
+ */
do {
- /*
- * Warning: Do not do the decrement at the same time as the
- * call because flush_dcache_page() is a NULL macro on i386
- * and hence the decrement never happens.
- */
+ --nr_pages;
flush_dcache_page(pages[nr_pages]);
- } while (--nr_pages > 0);
+ } while (nr_pages > 0);
}
/**
diff --git a/fs/open.c b/fs/open.c
index 317b7c7f38a..4f178acd4c0 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -633,7 +633,7 @@ asmlinkage long sys_fchmod(unsigned int fd, mode_t mode)
dentry = file->f_dentry;
inode = dentry->d_inode;
- audit_inode(NULL, inode, 0);
+ audit_inode(NULL, inode);
err = -EROFS;
if (IS_RDONLY(inode))
@@ -786,7 +786,7 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group)
if (file) {
struct dentry * dentry;
dentry = file->f_dentry;
- audit_inode(NULL, dentry->d_inode, 0);
+ audit_inode(NULL, dentry->d_inode);
error = chown_common(dentry, user, group);
fput(file);
}
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 7ef1f094de9..8851b81e7c5 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -329,6 +329,7 @@ void delete_partition(struct gendisk *disk, int part)
p->ios[0] = p->ios[1] = 0;
p->sectors[0] = p->sectors[1] = 0;
devfs_remove("%s/part%d", disk->devfs_name, part);
+ sysfs_remove_link(&p->kobj, "subsystem");
if (p->holder_dir)
kobject_unregister(p->holder_dir);
kobject_uevent(&p->kobj, KOBJ_REMOVE);
@@ -363,6 +364,7 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len)
kobject_add(&p->kobj);
if (!disk->part_uevent_suppress)
kobject_uevent(&p->kobj, KOBJ_ADD);
+ sysfs_create_link(&p->kobj, &block_subsys.kset.kobj, "subsystem");
partition_sysfs_add_subdir(p);
disk->part[part-1] = p;
}
@@ -398,6 +400,7 @@ static void disk_sysfs_symlinks(struct gendisk *disk)
kfree(disk_name);
}
}
+ sysfs_create_link(&disk->kobj, &block_subsys.kset.kobj, "subsystem");
}
/* Not exported, helper to add_disk(). */
@@ -548,5 +551,6 @@ void del_gendisk(struct gendisk *disk)
put_device(disk->driverfs_dev);
disk->driverfs_dev = NULL;
}
+ sysfs_remove_link(&disk->kobj, "subsystem");
kobject_del(&disk->kobj);
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 6cc77dc3f3f..6afff725a8c 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1019,8 +1019,8 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
if (current != task)
return -EPERM;
- if (count > PAGE_SIZE)
- count = PAGE_SIZE;
+ if (count >= PAGE_SIZE)
+ count = PAGE_SIZE - 1;
if (*ppos != 0) {
/* No partial writes. */
@@ -1033,6 +1033,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
if (copy_from_user(page, buf, count))
goto out_free_page;
+ page[count] = '\0';
loginuid = simple_strtoul(page, &tmp, 10);
if (tmp == page) {
length = -EINVAL;
diff --git a/fs/super.c b/fs/super.c
index a66f66bb804..9d5c2add722 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -231,7 +231,7 @@ void generic_shutdown_super(struct super_block *sb)
if (root) {
sb->s_root = NULL;
shrink_dcache_parent(root);
- shrink_dcache_anon(&sb->s_anon);
+ shrink_dcache_anon(sb);
dput(root);
fsync_super(sb);
lock_super(sb);
diff --git a/fs/xattr.c b/fs/xattr.c
index e416190f5e9..c32f15b5f60 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -242,7 +242,7 @@ sys_fsetxattr(int fd, char __user *name, void __user *value,
if (!f)
return error;
dentry = f->f_dentry;
- audit_inode(NULL, dentry->d_inode, 0);
+ audit_inode(NULL, dentry->d_inode);
error = setxattr(dentry, name, value, size, flags);
fput(f);
return error;
@@ -469,7 +469,7 @@ sys_fremovexattr(int fd, char __user *name)
if (!f)
return error;
dentry = f->f_dentry;
- audit_inode(NULL, dentry->d_inode, 0);
+ audit_inode(NULL, dentry->d_inode);
error = removexattr(dentry, name);
fput(f);
return error;
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index bac27d66151..26b364c9d62 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -1,6 +1,5 @@
config XFS_FS
tristate "XFS filesystem support"
- select EXPORTFS if NFSD!=n
help
XFS is a high performance journaling filesystem which originated
on the SGI IRIX platform. It is completely multi-threaded, can
@@ -18,11 +17,6 @@ config XFS_FS
system of your root partition is compiled as a module, you'll need
to use an initial ramdisk (initrd) to boot.
-config XFS_EXPORT
- bool
- depends on XFS_FS && EXPORTFS
- default y
-
config XFS_QUOTA
bool "XFS Quota support"
depends on XFS_FS
@@ -65,18 +59,19 @@ config XFS_POSIX_ACL
If you don't know what Access Control Lists are, say N.
config XFS_RT
- bool "XFS Realtime support (EXPERIMENTAL)"
- depends on XFS_FS && EXPERIMENTAL
+ bool "XFS Realtime subvolume support"
+ depends on XFS_FS
help
If you say Y here you will be able to mount and use XFS filesystems
- which contain a realtime subvolume. The realtime subvolume is a
- separate area of disk space where only file data is stored. The
- realtime subvolume is designed to provide very deterministic
- data rates suitable for media streaming applications.
-
- See the xfs man page in section 5 for a bit more information.
+ which contain a realtime subvolume. The realtime subvolume is a
+ separate area of disk space where only file data is stored. It was
+ originally designed to provide deterministic data rates suitable
+ for media streaming applications, but is also useful as a generic
+ mechanism for ensuring data and metadata/log I/Os are completely
+ separated. Regular file I/Os are isolated to a separate device
+ from all other requests, and this can be done quite transparently
+ to applications via the inherit-realtime directory inode flag.
- This feature is unsupported at this time, is not yet fully
- functional, and may cause serious problems.
+ See the xfs man page in section 5 for additional information.
If unsure, say N.
diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6
index 5d73eaa1971..9e7f85986d0 100644
--- a/fs/xfs/Makefile-linux-2.6
+++ b/fs/xfs/Makefile-linux-2.6
@@ -59,7 +59,6 @@ xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o
xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o
xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o
-xfs-$(CONFIG_XFS_EXPORT) += $(XFS_LINUX)/xfs_export.o
xfs-y += xfs_alloc.o \
@@ -73,14 +72,12 @@ xfs-y += xfs_alloc.o \
xfs_btree.o \
xfs_buf_item.o \
xfs_da_btree.o \
- xfs_dir.o \
xfs_dir2.o \
xfs_dir2_block.o \
xfs_dir2_data.o \
xfs_dir2_leaf.o \
xfs_dir2_node.o \
xfs_dir2_sf.o \
- xfs_dir_leaf.o \
xfs_error.o \
xfs_extfree_item.o \
xfs_fsops.o \
@@ -117,6 +114,7 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
kmem.o \
xfs_aops.o \
xfs_buf.o \
+ xfs_export.o \
xfs_file.o \
xfs_fs_subr.o \
xfs_globals.o \
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 2cfd33d4d8a..939bd84bc7e 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -23,42 +23,6 @@
#include <linux/mm.h>
/*
- * Process flags handling
- */
-
-#define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO)
-#define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS)
-
-#define PFLAGS_SET_NOIO() do { \
- current->flags |= PF_NOIO; \
-} while (0)
-
-#define PFLAGS_CLEAR_NOIO() do { \
- current->flags &= ~PF_NOIO; \
-} while (0)
-
-/* these could be nested, so we save state */
-#define PFLAGS_SET_FSTRANS(STATEP) do { \
- *(STATEP) = current->flags; \
- current->flags |= PF_FSTRANS; \
-} while (0)
-
-#define PFLAGS_CLEAR_FSTRANS(STATEP) do { \
- *(STATEP) = current->flags; \
- current->flags &= ~PF_FSTRANS; \
-} while (0)
-
-/* Restore the PF_FSTRANS state to what was saved in STATEP */
-#define PFLAGS_RESTORE_FSTRANS(STATEP) do { \
- current->flags = ((current->flags & ~PF_FSTRANS) | \
- (*(STATEP) & PF_FSTRANS)); \
-} while (0)
-
-#define PFLAGS_DUP(OSTATEP, NSTATEP) do { \
- *(NSTATEP) = *(OSTATEP); \
-} while (0)
-
-/*
* General memory allocation interfaces
*/
@@ -83,7 +47,7 @@ kmem_flags_convert(unsigned int __nocast flags)
lflags = GFP_ATOMIC | __GFP_NOWARN;
} else {
lflags = GFP_KERNEL | __GFP_NOWARN;
- if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS))
+ if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
lflags &= ~__GFP_FS;
}
return lflags;
diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/linux-2.6/mrlock.h
index 1b262b790d9..32e1ce0f04c 100644
--- a/fs/xfs/linux-2.6/mrlock.h
+++ b/fs/xfs/linux-2.6/mrlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -28,7 +28,7 @@ typedef struct {
} mrlock_t;
#define mrinit(mrp, name) \
- ( (mrp)->mr_writer = 0, init_rwsem(&(mrp)->mr_lock) )
+ do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
#define mrlock_init(mrp, t,n,s) mrinit(mrp, n)
#define mrfree(mrp) do { } while (0)
#define mraccess(mrp) mraccessf(mrp, 0)
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h
index 194a84490bd..b25090094cc 100644
--- a/fs/xfs/linux-2.6/sema.h
+++ b/fs/xfs/linux-2.6/sema.h
@@ -34,20 +34,21 @@ typedef struct semaphore sema_t;
#define initnsema(sp, val, name) sema_init(sp, val)
#define psema(sp, b) down(sp)
#define vsema(sp) up(sp)
-#define valusema(sp) (atomic_read(&(sp)->count))
-#define freesema(sema)
+#define freesema(sema) do { } while (0)
+
+static inline int issemalocked(sema_t *sp)
+{
+ return down_trylock(sp) || (up(sp), 0);
+}
/*
* Map cpsema (try to get the sema) to down_trylock. We need to switch
* the return values since cpsema returns 1 (acquired) 0 (failed) and
* down_trylock returns the reverse 0 (acquired) 1 (failed).
*/
-
-#define cpsema(sp) (down_trylock(sp) ? 0 : 1)
-
-/*
- * Didn't do cvsema(sp). Not sure how to map this to up/down/...
- * It does a vsema if the values is < 0 other wise nothing.
- */
+static inline int cpsema(sema_t *sp)
+{
+ return down_trylock(sp) ? 0 : 1;
+}
#endif /* __XFS_SUPPORT_SEMA_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 4d191ef39b6..3e807b828e2 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -21,7 +21,6 @@
#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_trans.h"
#include "xfs_dmapi.h"
@@ -29,7 +28,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -76,7 +74,7 @@ xfs_page_trace(
int mask)
{
xfs_inode_t *ip;
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
loff_t isize = i_size_read(inode);
loff_t offset = page_offset(page);
int delalloc = -1, unmapped = -1, unwritten = -1;
@@ -136,9 +134,10 @@ xfs_destroy_ioend(
for (bh = ioend->io_buffer_head; bh; bh = next) {
next = bh->b_private;
- bh->b_end_io(bh, ioend->io_uptodate);
+ bh->b_end_io(bh, !ioend->io_error);
}
-
+ if (unlikely(ioend->io_error))
+ vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
vn_iowake(ioend->io_vnode);
mempool_free(ioend, xfs_ioend_pool);
}
@@ -180,13 +179,12 @@ xfs_end_bio_unwritten(
void *data)
{
xfs_ioend_t *ioend = data;
- vnode_t *vp = ioend->io_vnode;
+ bhv_vnode_t *vp = ioend->io_vnode;
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
- int error;
- if (ioend->io_uptodate)
- VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
+ if (likely(!ioend->io_error))
+ bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
xfs_destroy_ioend(ioend);
}
@@ -211,7 +209,7 @@ xfs_alloc_ioend(
* all the I/O from calling the completion routine too early.
*/
atomic_set(&ioend->io_remaining, 1);
- ioend->io_uptodate = 1; /* cleared if any I/O fails */
+ ioend->io_error = 0;
ioend->io_list = NULL;
ioend->io_type = type;
ioend->io_vnode = vn_from_inode(inode);
@@ -239,10 +237,10 @@ xfs_map_blocks(
xfs_iomap_t *mapp,
int flags)
{
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
int error, nmaps = 1;
- VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
+ error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
VMODIFY(vp);
return -error;
@@ -271,16 +269,14 @@ xfs_end_bio(
if (bio->bi_size)
return 1;
- ASSERT(ioend);
ASSERT(atomic_read(&bio->bi_cnt) >= 1);
+ ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
/* Toss bio and pass work off to an xfsdatad thread */
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- ioend->io_uptodate = 0;
bio->bi_private = NULL;
bio->bi_end_io = NULL;
-
bio_put(bio);
+
xfs_finish_ioend(ioend);
return 0;
}
@@ -1127,7 +1123,7 @@ xfs_vm_writepage(
* then mark the page dirty again and leave the page
* as is.
*/
- if (PFLAGS_TEST_FSTRANS() && need_trans)
+ if (current_test_flags(PF_FSTRANS) && need_trans)
goto out_fail;
/*
@@ -1158,6 +1154,18 @@ out_unlock:
return error;
}
+STATIC int
+xfs_vm_writepages(
+ struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct bhv_vnode *vp = vn_from_inode(mapping->host);
+
+ if (VN_TRUNC(vp))
+ VUNTRUNCATE(vp);
+ return generic_writepages(mapping, wbc);
+}
+
/*
* Called to move a page into cleanable state - and from there
* to be released. Possibly the page is already clean. We always
@@ -1204,7 +1212,7 @@ xfs_vm_releasepage(
/* If we are already inside a transaction or the thread cannot
* do I/O, we cannot release this page.
*/
- if (PFLAGS_TEST_FSTRANS())
+ if (current_test_flags(PF_FSTRANS))
return 0;
/*
@@ -1231,7 +1239,7 @@ __xfs_get_blocks(
int direct,
bmapi_flags_t flags)
{
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
xfs_iomap_t iomap;
xfs_off_t offset;
ssize_t size;
@@ -1241,8 +1249,8 @@ __xfs_get_blocks(
offset = (xfs_off_t)iblock << inode->i_blkbits;
ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
size = bh_result->b_size;
- VOP_BMAP(vp, offset, size,
- create ? flags : BMAPI_READ, &iomap, &niomap, error);
+ error = bhv_vop_bmap(vp, offset, size,
+ create ? flags : BMAPI_READ, &iomap, &niomap);
if (error)
return -error;
if (niomap == 0)
@@ -1370,13 +1378,13 @@ xfs_vm_direct_IO(
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
xfs_iomap_t iomap;
int maps = 1;
int error;
ssize_t ret;
- VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
+ error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
if (error)
return -error;
@@ -1409,14 +1417,12 @@ xfs_vm_bmap(
sector_t block)
{
struct inode *inode = (struct inode *)mapping->host;
- vnode_t *vp = vn_from_inode(inode);
- int error;
+ bhv_vnode_t *vp = vn_from_inode(inode);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
-
- VOP_RWLOCK(vp, VRWLOCK_READ);
- VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
- VOP_RWUNLOCK(vp, VRWLOCK_READ);
+ bhv_vop_rwlock(vp, VRWLOCK_READ);
+ bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
+ bhv_vop_rwunlock(vp, VRWLOCK_READ);
return generic_block_bmap(mapping, block, xfs_get_blocks);
}
@@ -1452,6 +1458,7 @@ struct address_space_operations xfs_address_space_operations = {
.readpage = xfs_vm_readpage,
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
+ .writepages = xfs_vm_writepages,
.sync_page = block_sync_page,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 60716543c68..706d8c781b8 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005 Silicon Graphics, Inc.
+ * Copyright (c) 2005-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -30,9 +30,9 @@ typedef void (*xfs_ioend_func_t)(void *);
typedef struct xfs_ioend {
struct xfs_ioend *io_list; /* next ioend in chain */
unsigned int io_type; /* delalloc / unwritten */
- unsigned int io_uptodate; /* I/O status register */
+ int io_error; /* I/O error code */
atomic_t io_remaining; /* hold count */
- struct vnode *io_vnode; /* file being written to */
+ struct bhv_vnode *io_vnode; /* file being written to */
struct buffer_head *io_buffer_head;/* buffer linked list head */
struct buffer_head *io_buffer_tail;/* buffer linked list tail */
size_t io_size; /* size of the extent */
@@ -43,4 +43,4 @@ typedef struct xfs_ioend {
extern struct address_space_operations xfs_address_space_operations;
extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
-#endif /* __XFS_IOPS_H__ */
+#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index b768ea910bb..5fb75d9151f 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -21,7 +21,6 @@
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_mount.h"
#include "xfs_export.h"
@@ -97,7 +96,7 @@ xfs_fs_encode_fh(
int len;
int is64 = 0;
#if XFS_BIG_INUMS
- vfs_t *vfs = vfs_from_sb(inode->i_sb);
+ bhv_vfs_t *vfs = vfs_from_sb(inode->i_sb);
if (!(vfs->vfs_flag & VFS_32BITINODES)) {
/* filesystem may contain 64bit inode numbers */
@@ -136,13 +135,13 @@ xfs_fs_get_dentry(
struct super_block *sb,
void *data)
{
- vnode_t *vp;
+ bhv_vnode_t *vp;
struct inode *inode;
struct dentry *result;
- vfs_t *vfsp = vfs_from_sb(sb);
+ bhv_vfs_t *vfsp = vfs_from_sb(sb);
int error;
- VFS_VGET(vfsp, &vp, (fid_t *)data, error);
+ error = bhv_vfs_vget(vfsp, &vp, (fid_t *)data);
if (error || vp == NULL)
return ERR_PTR(-ESTALE) ;
@@ -160,12 +159,12 @@ xfs_fs_get_parent(
struct dentry *child)
{
int error;
- vnode_t *vp, *cvp;
+ bhv_vnode_t *vp, *cvp;
struct dentry *parent;
cvp = NULL;
vp = vn_from_inode(child->d_inode);
- VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error);
+ error = bhv_vop_lookup(vp, &dotdot, &cvp, 0, NULL, NULL);
if (unlikely(error))
return ERR_PTR(-error);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index c847416f6d1..70662371bb1 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -21,7 +21,6 @@
#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_trans.h"
#include "xfs_dmapi.h"
@@ -32,7 +31,6 @@
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_attr_sf.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@@ -58,15 +56,12 @@ __xfs_file_read(
{
struct iovec iov = {buf, count};
struct file *file = iocb->ki_filp;
- vnode_t *vp = vn_from_inode(file->f_dentry->d_inode);
- ssize_t rval;
+ bhv_vnode_t *vp = vn_from_inode(file->f_dentry->d_inode);
BUG_ON(iocb->ki_pos != pos);
-
if (unlikely(file->f_flags & O_DIRECT))
ioflags |= IO_ISDIRECT;
- VOP_READ(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
- return rval;
+ return bhv_vop_read(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL);
}
STATIC ssize_t
@@ -100,15 +95,12 @@ __xfs_file_write(
struct iovec iov = {(void __user *)buf, count};
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- vnode_t *vp = vn_from_inode(inode);
- ssize_t rval;
+ bhv_vnode_t *vp = vn_from_inode(inode);
BUG_ON(iocb->ki_pos != pos);
if (unlikely(file->f_flags & O_DIRECT))
ioflags |= IO_ISDIRECT;
-
- VOP_WRITE(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
- return rval;
+ return bhv_vop_write(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL);
}
STATIC ssize_t
@@ -140,7 +132,7 @@ __xfs_file_readv(
loff_t *ppos)
{
struct inode *inode = file->f_mapping->host;
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
struct kiocb kiocb;
ssize_t rval;
@@ -149,7 +141,8 @@ __xfs_file_readv(
if (unlikely(file->f_flags & O_DIRECT))
ioflags |= IO_ISDIRECT;
- VOP_READ(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, ioflags, NULL, rval);
+ rval = bhv_vop_read(vp, &kiocb, iov, nr_segs,
+ &kiocb.ki_pos, ioflags, NULL);
*ppos = kiocb.ki_pos;
return rval;
@@ -184,7 +177,7 @@ __xfs_file_writev(
loff_t *ppos)
{
struct inode *inode = file->f_mapping->host;
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
struct kiocb kiocb;
ssize_t rval;
@@ -193,7 +186,8 @@ __xfs_file_writev(
if (unlikely(file->f_flags & O_DIRECT))
ioflags |= IO_ISDIRECT;
- VOP_WRITE(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, ioflags, NULL, rval);
+ rval = bhv_vop_write(vp, &kiocb, iov, nr_segs,
+ &kiocb.ki_pos, ioflags, NULL);
*ppos = kiocb.ki_pos;
return rval;
@@ -227,11 +221,8 @@ xfs_file_sendfile(
read_actor_t actor,
void *target)
{
- vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
- ssize_t rval;
-
- VOP_SENDFILE(vp, filp, pos, 0, count, actor, target, NULL, rval);
- return rval;
+ return bhv_vop_sendfile(vn_from_inode(filp->f_dentry->d_inode),
+ filp, pos, 0, count, actor, target, NULL);
}
STATIC ssize_t
@@ -242,11 +233,8 @@ xfs_file_sendfile_invis(
read_actor_t actor,
void *target)
{
- vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
- ssize_t rval;
-
- VOP_SENDFILE(vp, filp, pos, IO_INVIS, count, actor, target, NULL, rval);
- return rval;
+ return bhv_vop_sendfile(vn_from_inode(filp->f_dentry->d_inode),
+ filp, pos, IO_INVIS, count, actor, target, NULL);
}
STATIC ssize_t
@@ -257,11 +245,8 @@ xfs_file_splice_read(
size_t len,
unsigned int flags)
{
- vnode_t *vp = vn_from_inode(infilp->f_dentry->d_inode);
- ssize_t rval;
-
- VOP_SPLICE_READ(vp, infilp, ppos, pipe, len, flags, 0, NULL, rval);
- return rval;
+ return bhv_vop_splice_read(vn_from_inode(infilp->f_dentry->d_inode),
+ infilp, ppos, pipe, len, flags, 0, NULL);
}
STATIC ssize_t
@@ -272,11 +257,9 @@ xfs_file_splice_read_invis(
size_t len,
unsigned int flags)
{
- vnode_t *vp = vn_from_inode(infilp->f_dentry->d_inode);
- ssize_t rval;
-
- VOP_SPLICE_READ(vp, infilp, ppos, pipe, len, flags, IO_INVIS, NULL, rval);
- return rval;
+ return bhv_vop_splice_read(vn_from_inode(infilp->f_dentry->d_inode),
+ infilp, ppos, pipe, len, flags, IO_INVIS,
+ NULL);
}
STATIC ssize_t
@@ -287,11 +270,8 @@ xfs_file_splice_write(
size_t len,
unsigned int flags)
{
- vnode_t *vp = vn_from_inode(outfilp->f_dentry->d_inode);
- ssize_t rval;
-
- VOP_SPLICE_WRITE(vp, pipe, outfilp, ppos, len, flags, 0, NULL, rval);
- return rval;
+ return bhv_vop_splice_write(vn_from_inode(outfilp->f_dentry->d_inode),
+ pipe, outfilp, ppos, len, flags, 0, NULL);
}
STATIC ssize_t
@@ -302,11 +282,9 @@ xfs_file_splice_write_invis(
size_t len,
unsigned int flags)
{
- vnode_t *vp = vn_from_inode(outfilp->f_dentry->d_inode);
- ssize_t rval;
-
- VOP_SPLICE_WRITE(vp, pipe, outfilp, ppos, len, flags, IO_INVIS, NULL, rval);
- return rval;
+ return bhv_vop_splice_write(vn_from_inode(outfilp->f_dentry->d_inode),
+ pipe, outfilp, ppos, len, flags, IO_INVIS,
+ NULL);
}
STATIC int
@@ -314,13 +292,17 @@ xfs_file_open(
struct inode *inode,
struct file *filp)
{
- vnode_t *vp = vn_from_inode(inode);
- int error;
-
if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EFBIG;
- VOP_OPEN(vp, NULL, error);
- return -error;
+ return -bhv_vop_open(vn_from_inode(inode), NULL);
+}
+
+STATIC int
+xfs_file_close(
+ struct file *filp)
+{
+ return -bhv_vop_close(vn_from_inode(filp->f_dentry->d_inode), 0,
+ file_count(filp) > 1 ? L_FALSE : L_TRUE, NULL);
}
STATIC int
@@ -328,12 +310,11 @@ xfs_file_release(
struct inode *inode,
struct file *filp)
{
- vnode_t *vp = vn_from_inode(inode);
- int error = 0;
+ bhv_vnode_t *vp = vn_from_inode(inode);
if (vp)
- VOP_RELEASE(vp, error);
- return -error;
+ return -bhv_vop_release(vp);
+ return 0;
}
STATIC int
@@ -342,15 +323,14 @@ xfs_file_fsync(
struct dentry *dentry,
int datasync)
{
- struct inode *inode = dentry->d_inode;
- vnode_t *vp = vn_from_inode(inode);
- int error;
+ bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
int flags = FSYNC_WAIT;
if (datasync)
flags |= FSYNC_DATA;
- VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
- return -error;
+ if (VN_TRUNC(vp))
+ VUNTRUNCATE(vp);
+ return -bhv_vop_fsync(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1);
}
#ifdef CONFIG_XFS_DMAPI
@@ -361,16 +341,11 @@ xfs_vm_nopage(
int *type)
{
struct inode *inode = area->vm_file->f_dentry->d_inode;
- vnode_t *vp = vn_from_inode(inode);
- xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
- int error;
+ bhv_vnode_t *vp = vn_from_inode(inode);
ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
-
- error = XFS_SEND_MMAP(mp, area, 0);
- if (error)
+ if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), area, 0))
return NULL;
-
return filemap_nopage(area, address, type);
}
#endif /* CONFIG_XFS_DMAPI */
@@ -382,7 +357,7 @@ xfs_file_readdir(
filldir_t filldir)
{
int error = 0;
- vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
+ bhv_vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
uio_t uio;
iovec_t iov;
int eof = 0;
@@ -417,7 +392,7 @@ xfs_file_readdir(
start_offset = uio.uio_offset;
- VOP_READDIR(vp, &uio, NULL, &eof, error);
+ error = bhv_vop_readdir(vp, &uio, NULL, &eof);
if ((uio.uio_offset == start_offset) || error) {
size = 0;
break;
@@ -456,38 +431,28 @@ xfs_file_mmap(
struct file *filp,
struct vm_area_struct *vma)
{
- struct inode *ip = filp->f_dentry->d_inode;
- vnode_t *vp = vn_from_inode(ip);
- vattr_t vattr;
- int error;
-
vma->vm_ops = &xfs_file_vm_ops;
#ifdef CONFIG_XFS_DMAPI
- if (vp->v_vfsp->vfs_flag & VFS_DMI) {
+ if (vn_from_inode(filp->f_dentry->d_inode)->v_vfsp->vfs_flag & VFS_DMI)
vma->vm_ops = &xfs_dmapi_file_vm_ops;
- }
#endif /* CONFIG_XFS_DMAPI */
- vattr.va_mask = XFS_AT_UPDATIME;
- VOP_SETATTR(vp, &vattr, XFS_AT_UPDATIME, NULL, error);
- if (likely(!error))
- __vn_revalidate(vp, &vattr); /* update flags */
+ file_accessed(filp);
return 0;
}
-
STATIC long
xfs_file_ioctl(
struct file *filp,
unsigned int cmd,
- unsigned long arg)
+ unsigned long p)
{
int error;
struct inode *inode = filp->f_dentry->d_inode;
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
- VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error);
+ error = bhv_vop_ioctl(vp, inode, filp, 0, cmd, (void __user *)p);
VMODIFY(vp);
/* NOTE: some of the ioctl's return positive #'s as a
@@ -503,13 +468,13 @@ STATIC long
xfs_file_ioctl_invis(
struct file *filp,
unsigned int cmd,
- unsigned long arg)
+ unsigned long p)
{
- struct inode *inode = filp->f_dentry->d_inode;
- vnode_t *vp = vn_from_inode(inode);
int error;
+ struct inode *inode = filp->f_dentry->d_inode;
+ bhv_vnode_t *vp = vn_from_inode(inode);
- VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error);
+ error = bhv_vop_ioctl(vp, inode, filp, IO_INVIS, cmd, (void __user *)p);
VMODIFY(vp);
/* NOTE: some of the ioctl's return positive #'s as a
@@ -528,7 +493,7 @@ xfs_vm_mprotect(
struct vm_area_struct *vma,
unsigned int newflags)
{
- vnode_t *vp = vn_from_inode(vma->vm_file->f_dentry->d_inode);
+ bhv_vnode_t *vp = vn_from_inode(vma->vm_file->f_dentry->d_inode);
int error = 0;
if (vp->v_vfsp->vfs_flag & VFS_DMI) {
@@ -554,24 +519,19 @@ STATIC int
xfs_file_open_exec(
struct inode *inode)
{
- vnode_t *vp = vn_from_inode(inode);
- xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
- int error = 0;
- xfs_inode_t *ip;
+ bhv_vnode_t *vp = vn_from_inode(inode);
- if (vp->v_vfsp->vfs_flag & VFS_DMI) {
- ip = xfs_vtoi(vp);
- if (!ip) {
- error = -EINVAL;
- goto open_exec_out;
- }
- if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) {
- error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
+ if (unlikely(vp->v_vfsp->vfs_flag & VFS_DMI)) {
+ xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
+ xfs_inode_t *ip = xfs_vtoi(vp);
+
+ if (!ip)
+ return -EINVAL;
+ if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ))
+ return -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
0, 0, 0, NULL);
- }
}
-open_exec_out:
- return error;
+ return 0;
}
#endif /* HAVE_FOP_OPEN_EXEC */
@@ -592,6 +552,7 @@ const struct file_operations xfs_file_operations = {
#endif
.mmap = xfs_file_mmap,
.open = xfs_file_open,
+ .flush = xfs_file_close,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
#ifdef HAVE_FOP_OPEN_EXEC
@@ -616,6 +577,7 @@ const struct file_operations xfs_invis_file_operations = {
#endif
.mmap = xfs_file_mmap,
.open = xfs_file_open,
+ .flush = xfs_file_close,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
};
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 575f2a790f3..dc0562828e7 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -15,40 +15,12 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-
#include "xfs.h"
-/*
- * Stub for no-op vnode operations that return error status.
- */
-int
-fs_noerr(void)
-{
- return 0;
-}
+int fs_noerr(void) { return 0; }
+int fs_nosys(void) { return ENOSYS; }
+void fs_noval(void) { return; }
-/*
- * Operation unsupported under this file system.
- */
-int
-fs_nosys(void)
-{
- return ENOSYS;
-}
-
-/*
- * Stub for inactive, strategy, and read/write lock/unlock. Does nothing.
- */
-/* ARGSUSED */
-void
-fs_noval(void)
-{
-}
-
-/*
- * vnode pcache layer for vnode_tosspages.
- * 'last' parameter unused but left in for IRIX compatibility
- */
void
fs_tosspages(
bhv_desc_t *bdp,
@@ -56,18 +28,13 @@ fs_tosspages(
xfs_off_t last,
int fiopt)
{
- vnode_t *vp = BHV_TO_VNODE(bdp);
+ bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
struct inode *ip = vn_to_inode(vp);
if (VN_CACHED(vp))
truncate_inode_pages(ip->i_mapping, first);
}
-
-/*
- * vnode pcache layer for vnode_flushinval_pages.
- * 'last' parameter unused but left in for IRIX compatibility
- */
void
fs_flushinval_pages(
bhv_desc_t *bdp,
@@ -75,20 +42,17 @@ fs_flushinval_pages(
xfs_off_t last,
int fiopt)
{
- vnode_t *vp = BHV_TO_VNODE(bdp);
+ bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
struct inode *ip = vn_to_inode(vp);
if (VN_CACHED(vp)) {
+ if (VN_TRUNC(vp))
+ VUNTRUNCATE(vp);
filemap_write_and_wait(ip->i_mapping);
-
truncate_inode_pages(ip->i_mapping, first);
}
}
-/*
- * vnode pcache layer for vnode_flush_pages.
- * 'last' parameter unused but left in for IRIX compatibility
- */
int
fs_flush_pages(
bhv_desc_t *bdp,
@@ -97,15 +61,16 @@ fs_flush_pages(
uint64_t flags,
int fiopt)
{
- vnode_t *vp = BHV_TO_VNODE(bdp);
+ bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
struct inode *ip = vn_to_inode(vp);
- if (VN_CACHED(vp)) {
+ if (VN_DIRTY(vp)) {
+ if (VN_TRUNC(vp))
+ VUNTRUNCATE(vp);
filemap_fdatawrite(ip->i_mapping);
if (flags & XFS_B_ASYNC)
return 0;
filemap_fdatawait(ip->i_mapping);
}
-
return 0;
}
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
index 6e8085f3463..6c162c3dde7 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -45,6 +45,7 @@ xfs_param_t xfs_params = {
.xfs_buf_age = { 1*100, 15*100, 7200*100},
.inherit_nosym = { 0, 0, 1 },
.rotorstep = { 1, 1, 255 },
+ .inherit_nodfrg = { 0, 1, 1 },
};
/*
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 84478491609..6e52a5dd38d 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -31,7 +30,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
@@ -78,7 +76,7 @@ xfs_find_handle(
xfs_handle_t handle;
xfs_fsop_handlereq_t hreq;
struct inode *inode;
- struct vnode *vp;
+ bhv_vnode_t *vp;
if (copy_from_user(&hreq, arg, sizeof(hreq)))
return -XFS_ERROR(EFAULT);
@@ -192,7 +190,7 @@ xfs_vget_fsop_handlereq(
xfs_mount_t *mp,
struct inode *parinode, /* parent inode pointer */
xfs_fsop_handlereq_t *hreq,
- vnode_t **vp,
+ bhv_vnode_t **vp,
struct inode **inode)
{
void __user *hanp;
@@ -202,7 +200,7 @@ xfs_vget_fsop_handlereq(
xfs_handle_t handle;
xfs_inode_t *ip;
struct inode *inodep;
- vnode_t *vpp;
+ bhv_vnode_t *vpp;
xfs_ino_t ino;
__u32 igen;
int error;
@@ -277,7 +275,7 @@ xfs_open_by_handle(
struct file *filp;
struct inode *inode;
struct dentry *dentry;
- vnode_t *vp;
+ bhv_vnode_t *vp;
xfs_fsop_handlereq_t hreq;
if (!capable(CAP_SYS_ADMIN))
@@ -362,7 +360,7 @@ xfs_readlink_by_handle(
struct uio auio;
struct inode *inode;
xfs_fsop_handlereq_t hreq;
- vnode_t *vp;
+ bhv_vnode_t *vp;
__u32 olen;
if (!capable(CAP_SYS_ADMIN))
@@ -393,9 +391,11 @@ xfs_readlink_by_handle(
auio.uio_segflg = UIO_USERSPACE;
auio.uio_resid = olen;
- VOP_READLINK(vp, &auio, IO_INVIS, NULL, error);
-
+ error = bhv_vop_readlink(vp, &auio, IO_INVIS, NULL);
VN_RELE(vp);
+ if (error)
+ return -error;
+
return (olen - auio.uio_resid);
}
@@ -411,7 +411,7 @@ xfs_fssetdm_by_handle(
xfs_fsop_setdm_handlereq_t dmhreq;
struct inode *inode;
bhv_desc_t *bdp;
- vnode_t *vp;
+ bhv_vnode_t *vp;
if (!capable(CAP_MKNOD))
return -XFS_ERROR(EPERM);
@@ -452,7 +452,7 @@ xfs_attrlist_by_handle(
attrlist_cursor_kern_t *cursor;
xfs_fsop_attrlist_handlereq_t al_hreq;
struct inode *inode;
- vnode_t *vp;
+ bhv_vnode_t *vp;
char *kbuf;
if (!capable(CAP_SYS_ADMIN))
@@ -472,8 +472,8 @@ xfs_attrlist_by_handle(
goto out_vn_rele;
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
- VOP_ATTR_LIST(vp, kbuf, al_hreq.buflen, al_hreq.flags,
- cursor, NULL, error);
+ error = bhv_vop_attr_list(vp, kbuf, al_hreq.buflen, al_hreq.flags,
+ cursor, NULL);
if (error)
goto out_kfree;
@@ -490,7 +490,7 @@ xfs_attrlist_by_handle(
STATIC int
xfs_attrmulti_attr_get(
- struct vnode *vp,
+ bhv_vnode_t *vp,
char *name,
char __user *ubuf,
__uint32_t *len,
@@ -505,7 +505,7 @@ xfs_attrmulti_attr_get(
if (!kbuf)
return ENOMEM;
- VOP_ATTR_GET(vp, name, kbuf, len, flags, NULL, error);
+ error = bhv_vop_attr_get(vp, name, kbuf, len, flags, NULL);
if (error)
goto out_kfree;
@@ -519,7 +519,7 @@ xfs_attrmulti_attr_get(
STATIC int
xfs_attrmulti_attr_set(
- struct vnode *vp,
+ bhv_vnode_t *vp,
char *name,
const char __user *ubuf,
__uint32_t len,
@@ -542,7 +542,7 @@ xfs_attrmulti_attr_set(
if (copy_from_user(kbuf, ubuf, len))
goto out_kfree;
- VOP_ATTR_SET(vp, name, kbuf, len, flags, NULL, error);
+ error = bhv_vop_attr_set(vp, name, kbuf, len, flags, NULL);
out_kfree:
kfree(kbuf);
@@ -551,20 +551,15 @@ xfs_attrmulti_attr_set(
STATIC int
xfs_attrmulti_attr_remove(
- struct vnode *vp,
+ bhv_vnode_t *vp,
char *name,
__uint32_t flags)
{
- int error;
-
-
if (IS_RDONLY(&vp->v_inode))
return -EROFS;
if (IS_IMMUTABLE(&vp->v_inode) || IS_APPEND(&vp->v_inode))
return EPERM;
-
- VOP_ATTR_REMOVE(vp, name, flags, NULL, error);
- return error;
+ return bhv_vop_attr_remove(vp, name, flags, NULL);
}
STATIC int
@@ -578,7 +573,7 @@ xfs_attrmulti_by_handle(
xfs_attr_multiop_t *ops;
xfs_fsop_attrmulti_handlereq_t am_hreq;
struct inode *inode;
- vnode_t *vp;
+ bhv_vnode_t *vp;
unsigned int i, size;
char *attr_name;
@@ -658,7 +653,7 @@ xfs_attrmulti_by_handle(
STATIC int
xfs_ioc_space(
bhv_desc_t *bdp,
- vnode_t *vp,
+ bhv_vnode_t *vp,
struct file *filp,
int flags,
unsigned int cmd,
@@ -682,7 +677,7 @@ xfs_ioc_fsgeometry(
STATIC int
xfs_ioc_xattr(
- vnode_t *vp,
+ bhv_vnode_t *vp,
xfs_inode_t *ip,
struct file *filp,
unsigned int cmd,
@@ -711,7 +706,7 @@ xfs_ioctl(
void __user *arg)
{
int error;
- vnode_t *vp;
+ bhv_vnode_t *vp;
xfs_inode_t *ip;
xfs_mount_t *mp;
@@ -962,7 +957,7 @@ xfs_ioctl(
STATIC int
xfs_ioc_space(
bhv_desc_t *bdp,
- vnode_t *vp,
+ bhv_vnode_t *vp,
struct file *filp,
int ioflags,
unsigned int cmd,
@@ -1153,14 +1148,14 @@ xfs_di2lxflags(
STATIC int
xfs_ioc_xattr(
- vnode_t *vp,
+ bhv_vnode_t *vp,
xfs_inode_t *ip,
struct file *filp,
unsigned int cmd,
void __user *arg)
{
struct fsxattr fa;
- struct vattr *vattr;
+ struct bhv_vattr *vattr;
int error = 0;
int attr_flags;
unsigned int flags;
@@ -1173,7 +1168,7 @@ xfs_ioc_xattr(
case XFS_IOC_FSGETXATTR: {
vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
XFS_AT_NEXTENTS | XFS_AT_PROJID;
- VOP_GETATTR(vp, vattr, 0, NULL, error);
+ error = bhv_vop_getattr(vp, vattr, 0, NULL);
if (unlikely(error)) {
error = -error;
break;
@@ -1206,7 +1201,7 @@ xfs_ioc_xattr(
vattr->va_extsize = fa.fsx_extsize;
vattr->va_projid = fa.fsx_projid;
- VOP_SETATTR(vp, vattr, attr_flags, NULL, error);
+ error = bhv_vop_setattr(vp, vattr, attr_flags, NULL);
if (likely(!error))
__vn_revalidate(vp, vattr); /* update flags */
error = -error;
@@ -1216,7 +1211,7 @@ xfs_ioc_xattr(
case XFS_IOC_FSGETXATTRA: {
vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
XFS_AT_ANEXTENTS | XFS_AT_PROJID;
- VOP_GETATTR(vp, vattr, 0, NULL, error);
+ error = bhv_vop_getattr(vp, vattr, 0, NULL);
if (unlikely(error)) {
error = -error;
break;
@@ -1262,7 +1257,7 @@ xfs_ioc_xattr(
vattr->va_xflags = xfs_merge_ioc_xflags(flags,
xfs_ip2xflags(ip));
- VOP_SETATTR(vp, vattr, attr_flags, NULL, error);
+ error = bhv_vop_setattr(vp, vattr, attr_flags, NULL);
if (likely(!error))
__vn_revalidate(vp, vattr); /* update flags */
error = -error;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 251bfe451a3..601f01c92f7 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -114,7 +114,7 @@ xfs_compat_ioctl(
unsigned long arg)
{
struct inode *inode = file->f_dentry->d_inode;
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
int error;
switch (cmd) {
@@ -193,7 +193,7 @@ xfs_compat_ioctl(
return -ENOIOCTLCMD;
}
- VOP_IOCTL(vp, inode, file, mode, cmd, (void __user *)arg, error);
+ error = bhv_vop_ioctl(vp, inode, file, mode, cmd, (void __user *)arg);
VMODIFY(vp);
return error;
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 2e2e275c786..12810baeb5d 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -61,7 +59,7 @@
*/
xfs_inode_t *
xfs_vtoi(
- struct vnode *vp)
+ bhv_vnode_t *vp)
{
bhv_desc_t *bdp;
@@ -80,7 +78,7 @@ void
xfs_synchronize_atime(
xfs_inode_t *ip)
{
- vnode_t *vp;
+ bhv_vnode_t *vp;
vp = XFS_ITOV_NULL(ip);
if (vp) {
@@ -200,14 +198,10 @@ xfs_ichgtime_fast(
STATIC void
xfs_validate_fields(
struct inode *ip,
- struct vattr *vattr)
+ bhv_vattr_t *vattr)
{
- vnode_t *vp = vn_from_inode(ip);
- int error;
-
vattr->va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS;
- VOP_GETATTR(vp, vattr, ATTR_LAZY, NULL, error);
- if (likely(!error)) {
+ if (!bhv_vop_getattr(vn_from_inode(ip), vattr, ATTR_LAZY, NULL)) {
ip->i_nlink = vattr->va_nlink;
ip->i_blocks = vattr->va_nblocks;
@@ -225,7 +219,7 @@ xfs_validate_fields(
*/
STATIC int
xfs_init_security(
- struct vnode *vp,
+ bhv_vnode_t *vp,
struct inode *dir)
{
struct inode *ip = vn_to_inode(vp);
@@ -241,7 +235,7 @@ xfs_init_security(
return -error;
}
- VOP_ATTR_SET(vp, name, value, length, ATTR_SECURE, NULL, error);
+ error = bhv_vop_attr_set(vp, name, value, length, ATTR_SECURE, NULL);
if (!error)
VMODIFY(vp);
@@ -264,13 +258,12 @@ xfs_has_fs_struct(struct task_struct *task)
STATIC inline void
xfs_cleanup_inode(
- vnode_t *dvp,
- vnode_t *vp,
+ bhv_vnode_t *dvp,
+ bhv_vnode_t *vp,
struct dentry *dentry,
int mode)
{
struct dentry teardown = {};
- int error;
/* Oh, the horror.
* If we can't add the ACL or we fail in
@@ -281,9 +274,9 @@ xfs_cleanup_inode(
teardown.d_name = dentry->d_name;
if (S_ISDIR(mode))
- VOP_RMDIR(dvp, &teardown, NULL, error);
+ bhv_vop_rmdir(dvp, &teardown, NULL);
else
- VOP_REMOVE(dvp, &teardown, NULL, error);
+ bhv_vop_remove(dvp, &teardown, NULL);
VN_RELE(vp);
}
@@ -295,8 +288,8 @@ xfs_vn_mknod(
dev_t rdev)
{
struct inode *ip;
- vattr_t vattr = { 0 };
- vnode_t *vp = NULL, *dvp = vn_from_inode(dir);
+ bhv_vattr_t vattr = { 0 };
+ bhv_vnode_t *vp = NULL, *dvp = vn_from_inode(dir);
xfs_acl_t *default_acl = NULL;
attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS;
int error;
@@ -330,10 +323,10 @@ xfs_vn_mknod(
vattr.va_mask |= XFS_AT_RDEV;
/*FALLTHROUGH*/
case S_IFREG:
- VOP_CREATE(dvp, dentry, &vattr, &vp, NULL, error);
+ error = bhv_vop_create(dvp, dentry, &vattr, &vp, NULL);
break;
case S_IFDIR:
- VOP_MKDIR(dvp, dentry, &vattr, &vp, NULL, error);
+ error = bhv_vop_mkdir(dvp, dentry, &vattr, &vp, NULL);
break;
default:
error = EINVAL;
@@ -396,14 +389,14 @@ xfs_vn_lookup(
struct dentry *dentry,
struct nameidata *nd)
{
- struct vnode *vp = vn_from_inode(dir), *cvp;
+ bhv_vnode_t *vp = vn_from_inode(dir), *cvp;
int error;
if (dentry->d_name.len >= MAXNAMELEN)
return ERR_PTR(-ENAMETOOLONG);
- VOP_LOOKUP(vp, dentry, &cvp, 0, NULL, NULL, error);
- if (error) {
+ error = bhv_vop_lookup(vp, dentry, &cvp, 0, NULL, NULL);
+ if (unlikely(error)) {
if (unlikely(error != ENOENT))
return ERR_PTR(-error);
d_add(dentry, NULL);
@@ -420,9 +413,9 @@ xfs_vn_link(
struct dentry *dentry)
{
struct inode *ip; /* inode of guy being linked to */
- vnode_t *tdvp; /* target directory for new name/link */
- vnode_t *vp; /* vp of name being linked */
- vattr_t vattr;
+ bhv_vnode_t *tdvp; /* target directory for new name/link */
+ bhv_vnode_t *vp; /* vp of name being linked */
+ bhv_vattr_t vattr;
int error;
ip = old_dentry->d_inode; /* inode being linked to */
@@ -432,7 +425,7 @@ xfs_vn_link(
tdvp = vn_from_inode(dir);
vp = vn_from_inode(ip);
- VOP_LINK(tdvp, vp, dentry, NULL, error);
+ error = bhv_vop_link(tdvp, vp, dentry, NULL);
if (likely(!error)) {
VMODIFY(tdvp);
VN_HOLD(vp);
@@ -448,14 +441,14 @@ xfs_vn_unlink(
struct dentry *dentry)
{
struct inode *inode;
- vnode_t *dvp; /* directory containing name to remove */
- vattr_t vattr;
+ bhv_vnode_t *dvp; /* directory containing name to remove */
+ bhv_vattr_t vattr;
int error;
inode = dentry->d_inode;
dvp = vn_from_inode(dir);
- VOP_REMOVE(dvp, dentry, NULL, error);
+ error = bhv_vop_remove(dvp, dentry, NULL);
if (likely(!error)) {
xfs_validate_fields(dir, &vattr); /* size needs update */
xfs_validate_fields(inode, &vattr);
@@ -470,27 +463,26 @@ xfs_vn_symlink(
const char *symname)
{
struct inode *ip;
- vattr_t vattr = { 0 };
- vnode_t *dvp; /* directory containing name of symlink */
- vnode_t *cvp; /* used to lookup symlink to put in dentry */
+ bhv_vattr_t va = { 0 };
+ bhv_vnode_t *dvp; /* directory containing name of symlink */
+ bhv_vnode_t *cvp; /* used to lookup symlink to put in dentry */
int error;
dvp = vn_from_inode(dir);
cvp = NULL;
- vattr.va_mode = S_IFLNK |
+ va.va_mode = S_IFLNK |
(irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO);
- vattr.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
+ va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
- error = 0;
- VOP_SYMLINK(dvp, dentry, &vattr, (char *)symname, &cvp, NULL, error);
+ error = bhv_vop_symlink(dvp, dentry, &va, (char *)symname, &cvp, NULL);
if (likely(!error && cvp)) {
error = xfs_init_security(cvp, dir);
if (likely(!error)) {
ip = vn_to_inode(cvp);
d_instantiate(dentry, ip);
- xfs_validate_fields(dir, &vattr);
- xfs_validate_fields(ip, &vattr);
+ xfs_validate_fields(dir, &va);
+ xfs_validate_fields(ip, &va);
} else {
xfs_cleanup_inode(dvp, cvp, dentry, 0);
}
@@ -504,11 +496,11 @@ xfs_vn_rmdir(
struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- vnode_t *dvp = vn_from_inode(dir);
- vattr_t vattr;
+ bhv_vnode_t *dvp = vn_from_inode(dir);
+ bhv_vattr_t vattr;
int error;
- VOP_RMDIR(dvp, dentry, NULL, error);
+ error = bhv_vop_rmdir(dvp, dentry, NULL);
if (likely(!error)) {
xfs_validate_fields(inode, &vattr);
xfs_validate_fields(dir, &vattr);
@@ -524,15 +516,15 @@ xfs_vn_rename(
struct dentry *ndentry)
{
struct inode *new_inode = ndentry->d_inode;
- vnode_t *fvp; /* from directory */
- vnode_t *tvp; /* target directory */
- vattr_t vattr;
+ bhv_vnode_t *fvp; /* from directory */
+ bhv_vnode_t *tvp; /* target directory */
+ bhv_vattr_t vattr;
int error;
fvp = vn_from_inode(odir);
tvp = vn_from_inode(ndir);
- VOP_RENAME(fvp, odentry, tvp, ndentry, NULL, error);
+ error = bhv_vop_rename(fvp, odentry, tvp, ndentry, NULL);
if (likely(!error)) {
if (new_inode)
xfs_validate_fields(new_inode, &vattr);
@@ -553,7 +545,7 @@ xfs_vn_follow_link(
struct dentry *dentry,
struct nameidata *nd)
{
- vnode_t *vp;
+ bhv_vnode_t *vp;
uio_t *uio;
iovec_t iov;
int error;
@@ -586,8 +578,8 @@ xfs_vn_follow_link(
uio->uio_resid = MAXPATHLEN;
uio->uio_iovcnt = 1;
- VOP_READLINK(vp, uio, 0, NULL, error);
- if (error) {
+ error = bhv_vop_readlink(vp, uio, 0, NULL);
+ if (unlikely(error)) {
kfree(link);
link = ERR_PTR(-error);
} else {
@@ -618,12 +610,7 @@ xfs_vn_permission(
int mode,
struct nameidata *nd)
{
- vnode_t *vp = vn_from_inode(inode);
- int error;
-
- mode <<= 6; /* convert from linux to vnode access bits */
- VOP_ACCESS(vp, mode, NULL, error);
- return -error;
+ return -bhv_vop_access(vn_from_inode(inode), mode << 6, NULL);
}
#else
#define xfs_vn_permission NULL
@@ -636,14 +623,14 @@ xfs_vn_getattr(
struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
int error = 0;
if (unlikely(vp->v_flag & VMODIFIED))
error = vn_revalidate(vp);
if (!error)
generic_fillattr(inode, stat);
- return 0;
+ return -error;
}
STATIC int
@@ -653,8 +640,8 @@ xfs_vn_setattr(
{
struct inode *inode = dentry->d_inode;
unsigned int ia_valid = attr->ia_valid;
- vnode_t *vp = vn_from_inode(inode);
- vattr_t vattr = { 0 };
+ bhv_vnode_t *vp = vn_from_inode(inode);
+ bhv_vattr_t vattr = { 0 };
int flags = 0;
int error;
@@ -697,7 +684,7 @@ xfs_vn_setattr(
flags |= ATTR_NONBLOCK;
#endif
- VOP_SETATTR(vp, &vattr, flags, NULL, error);
+ error = bhv_vop_setattr(vp, &vattr, flags, NULL);
if (likely(!error))
__vn_revalidate(vp, &vattr);
return -error;
@@ -718,7 +705,7 @@ xfs_vn_setxattr(
size_t size,
int flags)
{
- vnode_t *vp = vn_from_inode(dentry->d_inode);
+ bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
char *attr = (char *)name;
attrnames_t *namesp;
int xflags = 0;
@@ -748,7 +735,7 @@ xfs_vn_getxattr(
void *data,
size_t size)
{
- vnode_t *vp = vn_from_inode(dentry->d_inode);
+ bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
char *attr = (char *)name;
attrnames_t *namesp;
int xflags = 0;
@@ -777,7 +764,7 @@ xfs_vn_listxattr(
char *data,
size_t size)
{
- vnode_t *vp = vn_from_inode(dentry->d_inode);
+ bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
int error, xflags = ATTR_KERNAMELS;
ssize_t result;
@@ -796,7 +783,7 @@ xfs_vn_removexattr(
struct dentry *dentry,
const char *name)
{
- vnode_t *vp = vn_from_inode(dentry->d_inode);
+ bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
char *attr = (char *)name;
attrnames_t *namesp;
int xflags = 0;
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index e9fe43d7476..aa26ab906c8 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -134,14 +134,21 @@ BUFFER_FNS(PrivateStart, unwritten);
#define xfs_buf_age_centisecs xfs_params.xfs_buf_age.val
#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
#define xfs_rotorstep xfs_params.rotorstep.val
+#define xfs_inherit_nodefrag xfs_params.inherit_nodfrg.val
-#ifndef raw_smp_processor_id
-#define raw_smp_processor_id() smp_processor_id()
-#endif
-#define current_cpu() raw_smp_processor_id()
+#define current_cpu() (raw_smp_processor_id())
#define current_pid() (current->pid)
#define current_fsuid(cred) (current->fsuid)
#define current_fsgid(cred) (current->fsgid)
+#define current_set_flags(f) (current->flags |= (f))
+#define current_test_flags(f) (current->flags & (f))
+#define current_clear_flags(f) (current->flags & ~(f))
+#define current_set_flags_nested(sp, f) \
+ (*(sp) = current->flags, current->flags |= (f))
+#define current_clear_flags_nested(sp, f) \
+ (*(sp) = current->flags, current->flags &= ~(f))
+#define current_restore_flags_nested(sp, f) \
+ (current->flags = ((current->flags & ~(f)) | (*(sp) & (f))))
#define NBPP PAGE_SIZE
#define DPPSHFT (PAGE_SHIFT - 9)
@@ -187,25 +194,9 @@ BUFFER_FNS(PrivateStart, unwritten);
/* bytes to clicks */
#define btoc(x) (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT)
-#ifndef ENOATTR
#define ENOATTR ENODATA /* Attribute not found */
-#endif
-
-/* Note: EWRONGFS never visible outside the kernel */
-#define EWRONGFS EINVAL /* Mount with wrong filesystem type */
-
-/*
- * XXX EFSCORRUPTED needs a real value in errno.h. asm-i386/errno.h won't
- * return codes out of its known range in errno.
- * XXX Also note: needs to be < 1000 and fairly unique on Linux (mustn't
- * conflict with any code we use already or any code a driver may use)
- * XXX Some options (currently we do #2):
- * 1/ New error code ["Filesystem is corrupted", _after_ glibc updated]
- * 2/ 990 ["Unknown error 990"]
- * 3/ EUCLEAN ["Structure needs cleaning"]
- * 4/ Convert EFSCORRUPTED to EIO [just prior to return into userspace]
- */
-#define EFSCORRUPTED 990 /* Filesystem is corrupted */
+#define EWRONGFS EINVAL /* Mount with wrong filesystem type */
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
#define SYNCHRONIZE() barrier()
#define __return_address __builtin_return_address(0)
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 67efe330898..5d9cfd91ad0 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -206,7 +204,7 @@ xfs_read(
xfs_fsize_t n;
xfs_inode_t *ip;
xfs_mount_t *mp;
- vnode_t *vp;
+ bhv_vnode_t *vp;
unsigned long seg;
ip = XFS_BHVTOI(bdp);
@@ -258,7 +256,7 @@ xfs_read(
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
!(ioflags & IO_INVIS)) {
- vrwlock_t locktype = VRWLOCK_READ;
+ bhv_vrwlock_t locktype = VRWLOCK_READ;
int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
@@ -271,7 +269,7 @@ xfs_read(
}
if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp)))
- VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(*offset)),
+ bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)),
-1, FI_REMAPF_LOCKED);
xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
@@ -313,7 +311,7 @@ xfs_sendfile(
if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
(!(ioflags & IO_INVIS))) {
- vrwlock_t locktype = VRWLOCK_READ;
+ bhv_vrwlock_t locktype = VRWLOCK_READ;
int error;
error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
@@ -357,7 +355,7 @@ xfs_splice_read(
if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
(!(ioflags & IO_INVIS))) {
- vrwlock_t locktype = VRWLOCK_READ;
+ bhv_vrwlock_t locktype = VRWLOCK_READ;
int error;
error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
@@ -401,7 +399,7 @@ xfs_splice_write(
if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_WRITE) &&
(!(ioflags & IO_INVIS))) {
- vrwlock_t locktype = VRWLOCK_WRITE;
+ bhv_vrwlock_t locktype = VRWLOCK_WRITE;
int error;
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, BHV_TO_VNODE(bdp),
@@ -458,7 +456,7 @@ xfs_zero_last_block(
last_fsb = XFS_B_TO_FSBT(mp, isize);
nimaps = 1;
error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
- &nimaps, NULL);
+ &nimaps, NULL, NULL);
if (error) {
return error;
}
@@ -499,7 +497,7 @@ xfs_zero_last_block(
int /* error (positive) */
xfs_zero_eof(
- vnode_t *vp,
+ bhv_vnode_t *vp,
xfs_iocore_t *io,
xfs_off_t offset, /* starting I/O offset */
xfs_fsize_t isize, /* current inode size */
@@ -510,7 +508,6 @@ xfs_zero_eof(
xfs_fileoff_t end_zero_fsb;
xfs_fileoff_t zero_count_fsb;
xfs_fileoff_t last_fsb;
- xfs_extlen_t buf_len_fsb;
xfs_mount_t *mp = io->io_mount;
int nimaps;
int error = 0;
@@ -556,7 +553,7 @@ xfs_zero_eof(
nimaps = 1;
zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
- 0, NULL, 0, &imap, &nimaps, NULL);
+ 0, NULL, 0, &imap, &nimaps, NULL, NULL);
if (error) {
ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
@@ -579,16 +576,7 @@ xfs_zero_eof(
}
/*
- * There are blocks in the range requested.
- * Zero them a single write at a time. We actually
- * don't zero the entire range returned if it is
- * too big and simply loop around to get the rest.
- * That is not the most efficient thing to do, but it
- * is simple and this path should not be exercised often.
- */
- buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
- mp->m_writeio_blocks << 8);
- /*
+ * There are blocks we need to zero.
* Drop the inode lock while we're doing the I/O.
* We'll still have the iolock to protect us.
*/
@@ -596,14 +584,13 @@ xfs_zero_eof(
error = xfs_iozero(ip,
XFS_FSB_TO_B(mp, start_zero_fsb),
- XFS_FSB_TO_B(mp, buf_len_fsb),
+ XFS_FSB_TO_B(mp, imap.br_blockcount),
end_size);
-
if (error) {
goto out_lock;
}
- start_zero_fsb = imap.br_startoff + buf_len_fsb;
+ start_zero_fsb = imap.br_startoff + imap.br_blockcount;
ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
@@ -637,11 +624,11 @@ xfs_write(
ssize_t ret = 0, error = 0;
xfs_fsize_t isize, new_size;
xfs_iocore_t *io;
- vnode_t *vp;
+ bhv_vnode_t *vp;
unsigned long seg;
int iolock;
int eventsent = 0;
- vrwlock_t locktype;
+ bhv_vrwlock_t locktype;
size_t ocount = 0, count;
loff_t pos;
int need_i_mutex = 1, need_flush = 0;
@@ -679,11 +666,11 @@ xfs_write(
io = &xip->i_iocore;
mp = io->io_mount;
+ vfs_wait_for_freeze(vp->v_vfsp, SB_FREEZE_WRITE);
+
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
-
if (ioflags & IO_ISDIRECT) {
xfs_buftarg_t *target =
(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
@@ -814,7 +801,7 @@ retry:
if (need_flush) {
xfs_inval_cached_trace(io, pos, -1,
ctooff(offtoct(pos)), -1);
- VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
+ bhv_vop_flushinval_pages(vp, ctooff(offtoct(pos)),
-1, FI_REMAPF_LOCKED);
}
@@ -903,79 +890,9 @@ retry:
/* Handle various SYNC-type writes */
if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
- /*
- * If we're treating this as O_DSYNC and we have not updated the
- * size, force the log.
- */
- if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
- !(xip->i_update_size)) {
- xfs_inode_log_item_t *iip = xip->i_itemp;
-
- /*
- * If an allocation transaction occurred
- * without extending the size, then we have to force
- * the log up the proper point to ensure that the
- * allocation is permanent. We can't count on
- * the fact that buffered writes lock out direct I/O
- * writes - the direct I/O write could have extended
- * the size nontransactionally, then finished before
- * we started. xfs_write_file will think that the file
- * didn't grow but the update isn't safe unless the
- * size change is logged.
- *
- * Force the log if we've committed a transaction
- * against the inode or if someone else has and
- * the commit record hasn't gone to disk (e.g.
- * the inode is pinned). This guarantees that
- * all changes affecting the inode are permanent
- * when we return.
- */
- if (iip && iip->ili_last_lsn) {
- xfs_log_force(mp, iip->ili_last_lsn,
- XFS_LOG_FORCE | XFS_LOG_SYNC);
- } else if (xfs_ipincount(xip) > 0) {
- xfs_log_force(mp, (xfs_lsn_t)0,
- XFS_LOG_FORCE | XFS_LOG_SYNC);
- }
-
- } else {
- xfs_trans_t *tp;
-
- /*
- * O_SYNC or O_DSYNC _with_ a size update are handled
- * the same way.
- *
- * If the write was synchronous then we need to make
- * sure that the inode modification time is permanent.
- * We'll have updated the timestamp above, so here
- * we use a synchronous transaction to log the inode.
- * It's not fast, but it's necessary.
- *
- * If this a dsync write and the size got changed
- * non-transactionally, then we need to ensure that
- * the size change gets logged in a synchronous
- * transaction.
- */
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
- if ((error = xfs_trans_reserve(tp, 0,
- XFS_SWRITE_LOG_RES(mp),
- 0, 0, 0))) {
- /* Transaction reserve failed */
- xfs_trans_cancel(tp, 0);
- } else {
- /* Transaction reserve successful */
- xfs_ilock(xip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, xip);
- xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0, NULL);
- xfs_iunlock(xip, XFS_ILOCK_EXCL);
- }
- if (error)
- goto out_unlock_internal;
- }
+ error = xfs_write_sync_logforce(mp, xip);
+ if (error)
+ goto out_unlock_internal;
xfs_rwunlock(bdp, locktype);
if (need_i_mutex)
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index 8f453995235..c77e62efb74 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -18,8 +18,8 @@
#ifndef __XFS_LRW_H__
#define __XFS_LRW_H__
-struct vnode;
struct bhv_desc;
+struct bhv_vnode;
struct xfs_mount;
struct xfs_iocore;
struct xfs_inode;
@@ -49,7 +49,7 @@ struct xfs_iomap;
#define XFS_CTRUNC4 14
#define XFS_CTRUNC5 15
#define XFS_CTRUNC6 16
-#define XFS_BUNMAPI 17
+#define XFS_BUNMAP 17
#define XFS_INVAL_CACHED 18
#define XFS_DIORD_ENTER 19
#define XFS_DIOWR_ENTER 20
@@ -82,7 +82,7 @@ extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
extern int xfs_bdstrat_cb(struct xfs_buf *);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
-extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t,
+extern int xfs_zero_eof(struct bhv_vnode *, struct xfs_iocore *, xfs_off_t,
xfs_fsize_t, xfs_fsize_t);
extern ssize_t xfs_read(struct bhv_desc *, struct kiocb *,
const struct iovec *, unsigned int,
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 68f4793e8a1..f2a0778536f 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -151,7 +149,7 @@ xfs_set_inodeops(
STATIC __inline__ void
xfs_revalidate_inode(
xfs_mount_t *mp,
- vnode_t *vp,
+ bhv_vnode_t *vp,
xfs_inode_t *ip)
{
struct inode *inode = vn_to_inode(vp);
@@ -206,7 +204,7 @@ xfs_revalidate_inode(
void
xfs_initialize_vnode(
bhv_desc_t *bdp,
- vnode_t *vp,
+ bhv_vnode_t *vp,
bhv_desc_t *inode_bhv,
int unlock)
{
@@ -336,7 +334,7 @@ STATIC struct inode *
xfs_fs_alloc_inode(
struct super_block *sb)
{
- vnode_t *vp;
+ bhv_vnode_t *vp;
vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
if (unlikely(!vp))
@@ -359,13 +357,13 @@ xfs_fs_inode_init_once(
{
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR)
- inode_init_once(vn_to_inode((vnode_t *)vnode));
+ inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
}
STATIC int
xfs_init_zones(void)
{
- xfs_vnode_zone = kmem_zone_init_flags(sizeof(vnode_t), "xfs_vnode_t",
+ xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
KM_ZONE_SPREAD,
xfs_fs_inode_init_once);
@@ -409,22 +407,17 @@ xfs_fs_write_inode(
struct inode *inode,
int sync)
{
- vnode_t *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
int error = 0, flags = FLUSH_INODE;
if (vp) {
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
if (sync)
flags |= FLUSH_SYNC;
- VOP_IFLUSH(vp, flags, error);
- if (error == EAGAIN) {
- if (sync)
- VOP_IFLUSH(vp, flags | FLUSH_LOG, error);
- else
- error = 0;
- }
+ error = bhv_vop_iflush(vp, flags);
+ if (error == EAGAIN)
+ error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
}
-
return -error;
}
@@ -432,8 +425,7 @@ STATIC void
xfs_fs_clear_inode(
struct inode *inode)
{
- vnode_t *vp = vn_from_inode(inode);
- int error, cache;
+ bhv_vnode_t *vp = vn_from_inode(inode);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
@@ -446,20 +438,18 @@ xfs_fs_clear_inode(
* This can happen because xfs_iget_core calls xfs_idestroy if we
* find an inode with di_mode == 0 but without IGET_CREATE set.
*/
- if (vp->v_fbhv)
- VOP_INACTIVE(vp, NULL, cache);
+ if (VNHEAD(vp))
+ bhv_vop_inactive(vp, NULL);
VN_LOCK(vp);
vp->v_flag &= ~VMODIFIED;
VN_UNLOCK(vp, 0);
- if (vp->v_fbhv) {
- VOP_RECLAIM(vp, error);
- if (error)
- panic("vn_purge: cannot reclaim");
- }
+ if (VNHEAD(vp))
+ if (bhv_vop_reclaim(vp))
+ panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, vp);
- ASSERT(vp->v_fbhv == NULL);
+ ASSERT(VNHEAD(vp) == NULL);
#ifdef XFS_VNODE_TRACE
ktrace_free(vp->v_trace);
@@ -475,13 +465,13 @@ xfs_fs_clear_inode(
*/
STATIC void
xfs_syncd_queue_work(
- struct vfs *vfs,
+ struct bhv_vfs *vfs,
void *data,
- void (*syncer)(vfs_t *, void *))
+ void (*syncer)(bhv_vfs_t *, void *))
{
- vfs_sync_work_t *work;
+ struct bhv_vfs_sync_work *work;
- work = kmem_alloc(sizeof(struct vfs_sync_work), KM_SLEEP);
+ work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
INIT_LIST_HEAD(&work->w_list);
work->w_syncer = syncer;
work->w_data = data;
@@ -500,7 +490,7 @@ xfs_syncd_queue_work(
*/
STATIC void
xfs_flush_inode_work(
- vfs_t *vfs,
+ bhv_vfs_t *vfs,
void *inode)
{
filemap_flush(((struct inode *)inode)->i_mapping);
@@ -512,7 +502,7 @@ xfs_flush_inode(
xfs_inode_t *ip)
{
struct inode *inode = vn_to_inode(XFS_ITOV(ip));
- struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
+ struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
igrab(inode);
xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
@@ -525,7 +515,7 @@ xfs_flush_inode(
*/
STATIC void
xfs_flush_device_work(
- vfs_t *vfs,
+ bhv_vfs_t *vfs,
void *inode)
{
sync_blockdev(vfs->vfs_super->s_bdev);
@@ -537,7 +527,7 @@ xfs_flush_device(
xfs_inode_t *ip)
{
struct inode *inode = vn_to_inode(XFS_ITOV(ip));
- struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
+ struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
igrab(inode);
xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
@@ -545,16 +535,16 @@ xfs_flush_device(
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
}
-#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR|SYNC_REFCACHE)
STATIC void
vfs_sync_worker(
- vfs_t *vfsp,
+ bhv_vfs_t *vfsp,
void *unused)
{
int error;
if (!(vfsp->vfs_flag & VFS_RDONLY))
- VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error);
+ error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \
+ SYNC_ATTR | SYNC_REFCACHE, NULL);
vfsp->vfs_sync_seq++;
wmb();
wake_up(&vfsp->vfs_wait_single_sync_task);
@@ -565,8 +555,8 @@ xfssyncd(
void *arg)
{
long timeleft;
- vfs_t *vfsp = (vfs_t *) arg;
- struct vfs_sync_work *work, *n;
+ bhv_vfs_t *vfsp = (bhv_vfs_t *) arg;
+ bhv_vfs_sync_work_t *work, *n;
LIST_HEAD (tmp);
timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
@@ -600,7 +590,7 @@ xfssyncd(
list_del(&work->w_list);
if (work == &vfsp->vfs_sync_work)
continue;
- kmem_free(work, sizeof(struct vfs_sync_work));
+ kmem_free(work, sizeof(struct bhv_vfs_sync_work));
}
}
@@ -609,7 +599,7 @@ xfssyncd(
STATIC int
xfs_fs_start_syncd(
- vfs_t *vfsp)
+ bhv_vfs_t *vfsp)
{
vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
vfsp->vfs_sync_work.w_vfs = vfsp;
@@ -621,7 +611,7 @@ xfs_fs_start_syncd(
STATIC void
xfs_fs_stop_syncd(
- vfs_t *vfsp)
+ bhv_vfs_t *vfsp)
{
kthread_stop(vfsp->vfs_sync_task);
}
@@ -630,35 +620,26 @@ STATIC void
xfs_fs_put_super(
struct super_block *sb)
{
- vfs_t *vfsp = vfs_from_sb(sb);
+ bhv_vfs_t *vfsp = vfs_from_sb(sb);
int error;
xfs_fs_stop_syncd(vfsp);
- VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error);
- if (!error)
- VFS_UNMOUNT(vfsp, 0, NULL, error);
+ bhv_vfs_sync(vfsp, SYNC_ATTR | SYNC_DELWRI, NULL);
+ error = bhv_vfs_unmount(vfsp, 0, NULL);
if (error) {
- printk("XFS unmount got error %d\n", error);
- printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp);
- return;
+ printk("XFS: unmount got error=%d\n", error);
+ printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__, vfsp);
+ } else {
+ vfs_deallocate(vfsp);
}
-
- vfs_deallocate(vfsp);
}
STATIC void
xfs_fs_write_super(
struct super_block *sb)
{
- vfs_t *vfsp = vfs_from_sb(sb);
- int error;
-
- if (sb->s_flags & MS_RDONLY) {
- sb->s_dirt = 0; /* paranoia */
- return;
- }
- /* Push the log and superblock a little */
- VFS_SYNC(vfsp, SYNC_FSDATA, NULL, error);
+ if (!(sb->s_flags & MS_RDONLY))
+ bhv_vfs_sync(vfs_from_sb(sb), SYNC_FSDATA, NULL);
sb->s_dirt = 0;
}
@@ -667,16 +648,16 @@ xfs_fs_sync_super(
struct super_block *sb,
int wait)
{
- vfs_t *vfsp = vfs_from_sb(sb);
- int error;
- int flags = SYNC_FSDATA;
+ bhv_vfs_t *vfsp = vfs_from_sb(sb);
+ int error;
+ int flags;
if (unlikely(sb->s_frozen == SB_FREEZE_WRITE))
flags = SYNC_QUIESCE;
else
flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
- VFS_SYNC(vfsp, flags, NULL, error);
+ error = bhv_vfs_sync(vfsp, flags, NULL);
sb->s_dirt = 0;
if (unlikely(laptop_mode)) {
@@ -706,11 +687,7 @@ xfs_fs_statfs(
struct super_block *sb,
struct kstatfs *statp)
{
- vfs_t *vfsp = vfs_from_sb(sb);
- int error;
-
- VFS_STATVFS(vfsp, statp, NULL, error);
- return -error;
+ return -bhv_vfs_statvfs(vfs_from_sb(sb), statp, NULL);
}
STATIC int
@@ -719,13 +696,13 @@ xfs_fs_remount(
int *flags,
char *options)
{
- vfs_t *vfsp = vfs_from_sb(sb);
+ bhv_vfs_t *vfsp = vfs_from_sb(sb);
struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
int error;
- VFS_PARSEARGS(vfsp, options, args, 1, error);
+ error = bhv_vfs_parseargs(vfsp, options, args, 1);
if (!error)
- VFS_MNTUPDATE(vfsp, flags, args, error);
+ error = bhv_vfs_mntupdate(vfsp, flags, args);
kmem_free(args, sizeof(*args));
return -error;
}
@@ -734,7 +711,7 @@ STATIC void
xfs_fs_lockfs(
struct super_block *sb)
{
- VFS_FREEZE(vfs_from_sb(sb));
+ bhv_vfs_freeze(vfs_from_sb(sb));
}
STATIC int
@@ -742,11 +719,7 @@ xfs_fs_show_options(
struct seq_file *m,
struct vfsmount *mnt)
{
- struct vfs *vfsp = vfs_from_sb(mnt->mnt_sb);
- int error;
-
- VFS_SHOWARGS(vfsp, m, error);
- return error;
+ return -bhv_vfs_showargs(vfs_from_sb(mnt->mnt_sb), m);
}
STATIC int
@@ -754,11 +727,7 @@ xfs_fs_quotasync(
struct super_block *sb,
int type)
{
- struct vfs *vfsp = vfs_from_sb(sb);
- int error;
-
- VFS_QUOTACTL(vfsp, Q_XQUOTASYNC, 0, (caddr_t)NULL, error);
- return -error;
+ return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XQUOTASYNC, 0, NULL);
}
STATIC int
@@ -766,11 +735,7 @@ xfs_fs_getxstate(
struct super_block *sb,
struct fs_quota_stat *fqs)
{
- struct vfs *vfsp = vfs_from_sb(sb);
- int error;
-
- VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error);
- return -error;
+ return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
}
STATIC int
@@ -779,11 +744,7 @@ xfs_fs_setxstate(
unsigned int flags,
int op)
{
- struct vfs *vfsp = vfs_from_sb(sb);
- int error;
-
- VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error);
- return -error;
+ return -bhv_vfs_quotactl(vfs_from_sb(sb), op, 0, (caddr_t)&flags);
}
STATIC int
@@ -793,13 +754,10 @@ xfs_fs_getxquota(
qid_t id,
struct fs_disk_quota *fdq)
{
- struct vfs *vfsp = vfs_from_sb(sb);
- int error, getmode;
-
- getmode = (type == USRQUOTA) ? Q_XGETQUOTA :
- ((type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETPQUOTA);
- VFS_QUOTACTL(vfsp, getmode, id, (caddr_t)fdq, error);
- return -error;
+ return -bhv_vfs_quotactl(vfs_from_sb(sb),
+ (type == USRQUOTA) ? Q_XGETQUOTA :
+ ((type == GRPQUOTA) ? Q_XGETGQUOTA :
+ Q_XGETPQUOTA), id, (caddr_t)fdq);
}
STATIC int
@@ -809,13 +767,10 @@ xfs_fs_setxquota(
qid_t id,
struct fs_disk_quota *fdq)
{
- struct vfs *vfsp = vfs_from_sb(sb);
- int error, setmode;
-
- setmode = (type == USRQUOTA) ? Q_XSETQLIM :
- ((type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETPQLIM);
- VFS_QUOTACTL(vfsp, setmode, id, (caddr_t)fdq, error);
- return -error;
+ return -bhv_vfs_quotactl(vfs_from_sb(sb),
+ (type == USRQUOTA) ? Q_XSETQLIM :
+ ((type == GRPQUOTA) ? Q_XSETGQLIM :
+ Q_XSETPQLIM), id, (caddr_t)fdq);
}
STATIC int
@@ -824,34 +779,32 @@ xfs_fs_fill_super(
void *data,
int silent)
{
- vnode_t *rootvp;
- struct vfs *vfsp = vfs_allocate(sb);
+ struct bhv_vnode *rootvp;
+ struct bhv_vfs *vfsp = vfs_allocate(sb);
struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
struct kstatfs statvfs;
- int error, error2;
+ int error;
bhv_insert_all_vfsops(vfsp);
- VFS_PARSEARGS(vfsp, (char *)data, args, 0, error);
+ error = bhv_vfs_parseargs(vfsp, (char *)data, args, 0);
if (error) {
bhv_remove_all_vfsops(vfsp, 1);
goto fail_vfsop;
}
sb_min_blocksize(sb, BBSIZE);
-#ifdef CONFIG_XFS_EXPORT
sb->s_export_op = &xfs_export_operations;
-#endif
sb->s_qcop = &xfs_quotactl_operations;
sb->s_op = &xfs_super_operations;
- VFS_MOUNT(vfsp, args, NULL, error);
+ error = bhv_vfs_mount(vfsp, args, NULL);
if (error) {
bhv_remove_all_vfsops(vfsp, 1);
goto fail_vfsop;
}
- VFS_STATVFS(vfsp, &statvfs, NULL, error);
+ error = bhv_vfs_statvfs(vfsp, &statvfs, NULL);
if (error)
goto fail_unmount;
@@ -863,7 +816,7 @@ xfs_fs_fill_super(
sb->s_time_gran = 1;
set_posix_acl_flag(sb);
- VFS_ROOT(vfsp, &rootvp, error);
+ error = bhv_vfs_root(vfsp, &rootvp);
if (error)
goto fail_unmount;
@@ -892,7 +845,7 @@ fail_vnrele:
}
fail_unmount:
- VFS_UNMOUNT(vfsp, 0, NULL, error2);
+ bhv_vfs_unmount(vfsp, 0, NULL);
fail_vfsop:
vfs_deallocate(vfsp);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 376b96cb513..33dd1ca1324 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -105,7 +105,7 @@ struct block_device;
extern __uint64_t xfs_max_file_offset(unsigned int);
-extern void xfs_initialize_vnode(bhv_desc_t *, vnode_t *, bhv_desc_t *, int);
+extern void xfs_initialize_vnode(bhv_desc_t *, bhv_vnode_t *, bhv_desc_t *, int);
extern void xfs_flush_inode(struct xfs_inode *);
extern void xfs_flush_device(struct xfs_inode *);
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index 7079cc83721..4af97682bec 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -120,6 +120,11 @@ STATIC ctl_table xfs_table[] = {
&sysctl_intvec, NULL,
&xfs_params.rotorstep.min, &xfs_params.rotorstep.max},
+ {XFS_INHERIT_NODFRG, "inherit_nodefrag", &xfs_params.inherit_nodfrg.val,
+ sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+ &sysctl_intvec, NULL,
+ &xfs_params.inherit_nodfrg.min, &xfs_params.inherit_nodfrg.max},
+
/* please keep this the last entry */
#ifdef CONFIG_PROC_FS
{XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear.val,
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.h b/fs/xfs/linux-2.6/xfs_sysctl.h
index bc8c11f1372..a631fb8cc5a 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.h
+++ b/fs/xfs/linux-2.6/xfs_sysctl.h
@@ -46,6 +46,7 @@ typedef struct xfs_param {
xfs_sysctl_val_t xfs_buf_age; /* Metadata buffer age before flush. */
xfs_sysctl_val_t inherit_nosym; /* Inherit the "nosymlinks" flag. */
xfs_sysctl_val_t rotorstep; /* inode32 AG rotoring control knob */
+ xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */
} xfs_param_t;
/*
@@ -84,6 +85,7 @@ enum {
/* XFS_IO_BYPASS = 18 */
XFS_INHERIT_NOSYM = 19,
XFS_ROTORSTEP = 20,
+ XFS_INHERIT_NODFRG = 21,
};
extern xfs_param_t xfs_params;
diff --git a/fs/xfs/linux-2.6/xfs_vfs.c b/fs/xfs/linux-2.6/xfs_vfs.c
index 6f7c9f7a862..6145e8bd0be 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.c
+++ b/fs/xfs/linux-2.6/xfs_vfs.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_imap.h"
#include "xfs_alloc.h"
@@ -104,7 +103,7 @@ vfs_mntupdate(
int
vfs_root(
struct bhv_desc *bdp,
- struct vnode **vpp)
+ struct bhv_vnode **vpp)
{
struct bhv_desc *next = bdp;
@@ -117,15 +116,15 @@ vfs_root(
int
vfs_statvfs(
struct bhv_desc *bdp,
- xfs_statfs_t *sp,
- struct vnode *vp)
+ bhv_statvfs_t *statp,
+ struct bhv_vnode *vp)
{
struct bhv_desc *next = bdp;
ASSERT(next);
while (! (bhvtovfsops(next))->vfs_statvfs)
next = BHV_NEXT(next);
- return ((*bhvtovfsops(next)->vfs_statvfs)(next, sp, vp));
+ return ((*bhvtovfsops(next)->vfs_statvfs)(next, statp, vp));
}
int
@@ -145,7 +144,7 @@ vfs_sync(
int
vfs_vget(
struct bhv_desc *bdp,
- struct vnode **vpp,
+ struct bhv_vnode **vpp,
struct fid *fidp)
{
struct bhv_desc *next = bdp;
@@ -187,7 +186,7 @@ vfs_quotactl(
void
vfs_init_vnode(
struct bhv_desc *bdp,
- struct vnode *vp,
+ struct bhv_vnode *vp,
struct bhv_desc *bp,
int unlock)
{
@@ -226,13 +225,13 @@ vfs_freeze(
((*bhvtovfsops(next)->vfs_freeze)(next));
}
-vfs_t *
+bhv_vfs_t *
vfs_allocate(
struct super_block *sb)
{
- struct vfs *vfsp;
+ struct bhv_vfs *vfsp;
- vfsp = kmem_zalloc(sizeof(vfs_t), KM_SLEEP);
+ vfsp = kmem_zalloc(sizeof(bhv_vfs_t), KM_SLEEP);
bhv_head_init(VFS_BHVHEAD(vfsp), "vfs");
INIT_LIST_HEAD(&vfsp->vfs_sync_list);
spin_lock_init(&vfsp->vfs_sync_lock);
@@ -247,25 +246,25 @@ vfs_allocate(
return vfsp;
}
-vfs_t *
+bhv_vfs_t *
vfs_from_sb(
struct super_block *sb)
{
- return (vfs_t *)sb->s_fs_info;
+ return (bhv_vfs_t *)sb->s_fs_info;
}
void
vfs_deallocate(
- struct vfs *vfsp)
+ struct bhv_vfs *vfsp)
{
bhv_head_destroy(VFS_BHVHEAD(vfsp));
- kmem_free(vfsp, sizeof(vfs_t));
+ kmem_free(vfsp, sizeof(bhv_vfs_t));
}
void
vfs_insertops(
- struct vfs *vfsp,
- struct bhv_vfsops *vfsops)
+ struct bhv_vfs *vfsp,
+ struct bhv_module_vfsops *vfsops)
{
struct bhv_desc *bdp;
@@ -276,9 +275,9 @@ vfs_insertops(
void
vfs_insertbhv(
- struct vfs *vfsp,
+ struct bhv_vfs *vfsp,
struct bhv_desc *bdp,
- struct vfsops *vfsops,
+ struct bhv_vfsops *vfsops,
void *mount)
{
bhv_desc_init(bdp, mount, vfsp, vfsops);
@@ -287,7 +286,7 @@ vfs_insertbhv(
void
bhv_remove_vfsops(
- struct vfs *vfsp,
+ struct bhv_vfs *vfsp,
int pos)
{
struct bhv_desc *bhv;
@@ -301,7 +300,7 @@ bhv_remove_vfsops(
void
bhv_remove_all_vfsops(
- struct vfs *vfsp,
+ struct bhv_vfs *vfsp,
int freebase)
{
struct xfs_mount *mp;
@@ -317,7 +316,7 @@ bhv_remove_all_vfsops(
void
bhv_insert_all_vfsops(
- struct vfs *vfsp)
+ struct bhv_vfs *vfsp)
{
struct xfs_mount *mp;
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
index 841200c0309..91fc2c4b335 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -21,42 +21,40 @@
#include <linux/vfs.h>
#include "xfs_fs.h"
+struct bhv_vfs;
+struct bhv_vnode;
+
struct fid;
-struct vfs;
struct cred;
-struct vnode;
-struct kstatfs;
struct seq_file;
struct super_block;
struct xfs_mount_args;
-typedef struct kstatfs xfs_statfs_t;
+typedef struct kstatfs bhv_statvfs_t;
-typedef struct vfs_sync_work {
+typedef struct bhv_vfs_sync_work {
struct list_head w_list;
- struct vfs *w_vfs;
+ struct bhv_vfs *w_vfs;
void *w_data; /* syncer routine argument */
- void (*w_syncer)(struct vfs *, void *);
-} vfs_sync_work_t;
+ void (*w_syncer)(struct bhv_vfs *, void *);
+} bhv_vfs_sync_work_t;
-typedef struct vfs {
+typedef struct bhv_vfs {
u_int vfs_flag; /* flags */
xfs_fsid_t vfs_fsid; /* file system ID */
xfs_fsid_t *vfs_altfsid; /* An ID fixed for life of FS */
bhv_head_t vfs_bh; /* head of vfs behavior chain */
struct super_block *vfs_super; /* generic superblock pointer */
struct task_struct *vfs_sync_task; /* generalised sync thread */
- vfs_sync_work_t vfs_sync_work; /* work item for VFS_SYNC */
+ bhv_vfs_sync_work_t vfs_sync_work; /* work item for VFS_SYNC */
struct list_head vfs_sync_list; /* sync thread work item list */
spinlock_t vfs_sync_lock; /* work item list lock */
- int vfs_sync_seq; /* sync thread generation no. */
+ int vfs_sync_seq; /* sync thread generation no. */
wait_queue_head_t vfs_wait_single_sync_task;
-} vfs_t;
-
-#define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */
+} bhv_vfs_t;
-#define bhvtovfs(bdp) ( (struct vfs *)BHV_VOBJ(bdp) )
-#define bhvtovfsops(bdp) ( (struct vfsops *)BHV_OPS(bdp) )
+#define bhvtovfs(bdp) ( (struct bhv_vfs *)BHV_VOBJ(bdp) )
+#define bhvtovfsops(bdp) ( (struct bhv_vfsops *)BHV_OPS(bdp) )
#define VFS_BHVHEAD(vfs) ( &(vfs)->vfs_bh )
#define VFS_REMOVEBHV(vfs, bdp) ( bhv_remove(VFS_BHVHEAD(vfs), bdp) )
@@ -71,7 +69,7 @@ typedef enum {
VFS_BHV_QM, /* quota manager */
VFS_BHV_IO, /* IO path */
VFS_BHV_END /* housekeeping end-of-range */
-} vfs_bhv_t;
+} bhv_vfs_type_t;
#define VFS_POSITION_XFS (BHV_POSITION_BASE)
#define VFS_POSITION_DM (VFS_POSITION_BASE+10)
@@ -81,8 +79,9 @@ typedef enum {
#define VFS_RDONLY 0x0001 /* read-only vfs */
#define VFS_GRPID 0x0002 /* group-ID assigned from directory */
#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */
-#define VFS_32BITINODES 0x0008 /* do not use inums above 32 bits */
-#define VFS_END 0x0008 /* max flag */
+#define VFS_UMOUNT 0x0008 /* unmount in progress */
+#define VFS_32BITINODES 0x0010 /* do not use inums above 32 bits */
+#define VFS_END 0x0010 /* max flag */
#define SYNC_ATTR 0x0001 /* sync attributes */
#define SYNC_CLOSE 0x0002 /* close file system down */
@@ -92,7 +91,14 @@ typedef enum {
#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
-#define SYNC_QUIESCE 0x0100 /* quiesce filesystem for a snapshot */
+#define SYNC_QUIESCE 0x0100 /* quiesce fileystem for a snapshot */
+
+#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */
+#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
+#define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */
+#define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */
+#define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */
+#define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */
typedef int (*vfs_mount_t)(bhv_desc_t *,
struct xfs_mount_args *, struct cred *);
@@ -102,18 +108,19 @@ typedef int (*vfs_showargs_t)(bhv_desc_t *, struct seq_file *);
typedef int (*vfs_unmount_t)(bhv_desc_t *, int, struct cred *);
typedef int (*vfs_mntupdate_t)(bhv_desc_t *, int *,
struct xfs_mount_args *);
-typedef int (*vfs_root_t)(bhv_desc_t *, struct vnode **);
-typedef int (*vfs_statvfs_t)(bhv_desc_t *, xfs_statfs_t *, struct vnode *);
+typedef int (*vfs_root_t)(bhv_desc_t *, struct bhv_vnode **);
+typedef int (*vfs_statvfs_t)(bhv_desc_t *, bhv_statvfs_t *,
+ struct bhv_vnode *);
typedef int (*vfs_sync_t)(bhv_desc_t *, int, struct cred *);
-typedef int (*vfs_vget_t)(bhv_desc_t *, struct vnode **, struct fid *);
+typedef int (*vfs_vget_t)(bhv_desc_t *, struct bhv_vnode **, struct fid *);
typedef int (*vfs_dmapiops_t)(bhv_desc_t *, caddr_t);
typedef int (*vfs_quotactl_t)(bhv_desc_t *, int, int, caddr_t);
typedef void (*vfs_init_vnode_t)(bhv_desc_t *,
- struct vnode *, bhv_desc_t *, int);
+ struct bhv_vnode *, bhv_desc_t *, int);
typedef void (*vfs_force_shutdown_t)(bhv_desc_t *, int, char *, int);
typedef void (*vfs_freeze_t)(bhv_desc_t *);
-typedef struct vfsops {
+typedef struct bhv_vfsops {
bhv_position_t vf_position; /* behavior chain position */
vfs_mount_t vfs_mount; /* mount file system */
vfs_parseargs_t vfs_parseargs; /* parse mount options */
@@ -129,82 +136,82 @@ typedef struct vfsops {
vfs_init_vnode_t vfs_init_vnode; /* initialize a new vnode */
vfs_force_shutdown_t vfs_force_shutdown; /* crash and burn */
vfs_freeze_t vfs_freeze; /* freeze fs for snapshot */
-} vfsops_t;
+} bhv_vfsops_t;
/*
- * VFS's. Operates on vfs structure pointers (starts at bhv head).
+ * Virtual filesystem operations, operating from head bhv.
*/
-#define VHEAD(v) ((v)->vfs_fbhv)
-#define VFS_MOUNT(v, ma,cr, rv) ((rv) = vfs_mount(VHEAD(v), ma,cr))
-#define VFS_PARSEARGS(v, o,ma,f, rv) ((rv) = vfs_parseargs(VHEAD(v), o,ma,f))
-#define VFS_SHOWARGS(v, m, rv) ((rv) = vfs_showargs(VHEAD(v), m))
-#define VFS_UNMOUNT(v, f, cr, rv) ((rv) = vfs_unmount(VHEAD(v), f,cr))
-#define VFS_MNTUPDATE(v, fl, args, rv) ((rv) = vfs_mntupdate(VHEAD(v), fl, args))
-#define VFS_ROOT(v, vpp, rv) ((rv) = vfs_root(VHEAD(v), vpp))
-#define VFS_STATVFS(v, sp,vp, rv) ((rv) = vfs_statvfs(VHEAD(v), sp,vp))
-#define VFS_SYNC(v, flag,cr, rv) ((rv) = vfs_sync(VHEAD(v), flag,cr))
-#define VFS_VGET(v, vpp,fidp, rv) ((rv) = vfs_vget(VHEAD(v), vpp,fidp))
-#define VFS_DMAPIOPS(v, p, rv) ((rv) = vfs_dmapiops(VHEAD(v), p))
-#define VFS_QUOTACTL(v, c,id,p, rv) ((rv) = vfs_quotactl(VHEAD(v), c,id,p))
-#define VFS_INIT_VNODE(v, vp,b,ul) ( vfs_init_vnode(VHEAD(v), vp,b,ul) )
-#define VFS_FORCE_SHUTDOWN(v, fl,f,l) ( vfs_force_shutdown(VHEAD(v), fl,f,l) )
-#define VFS_FREEZE(v) ( vfs_freeze(VHEAD(v)) )
+#define VFSHEAD(v) ((v)->vfs_bh.bh_first)
+#define bhv_vfs_mount(v, ma,cr) vfs_mount(VFSHEAD(v), ma,cr)
+#define bhv_vfs_parseargs(v, o,ma,f) vfs_parseargs(VFSHEAD(v), o,ma,f)
+#define bhv_vfs_showargs(v, m) vfs_showargs(VFSHEAD(v), m)
+#define bhv_vfs_unmount(v, f,cr) vfs_unmount(VFSHEAD(v), f,cr)
+#define bhv_vfs_mntupdate(v, fl,args) vfs_mntupdate(VFSHEAD(v), fl,args)
+#define bhv_vfs_root(v, vpp) vfs_root(VFSHEAD(v), vpp)
+#define bhv_vfs_statvfs(v, sp,vp) vfs_statvfs(VFSHEAD(v), sp,vp)
+#define bhv_vfs_sync(v, flag,cr) vfs_sync(VFSHEAD(v), flag,cr)
+#define bhv_vfs_vget(v, vpp,fidp) vfs_vget(VFSHEAD(v), vpp,fidp)
+#define bhv_vfs_dmapiops(v, p) vfs_dmapiops(VFSHEAD(v), p)
+#define bhv_vfs_quotactl(v, c,id,p) vfs_quotactl(VFSHEAD(v), c,id,p)
+#define bhv_vfs_init_vnode(v, vp,b,ul) vfs_init_vnode(VFSHEAD(v), vp,b,ul)
+#define bhv_vfs_force_shutdown(v,u,f,l) vfs_force_shutdown(VFSHEAD(v), u,f,l)
+#define bhv_vfs_freeze(v) vfs_freeze(VFSHEAD(v))
/*
- * PVFS's. Operates on behavior descriptor pointers.
+ * Virtual filesystem operations, operating from next bhv.
*/
-#define PVFS_MOUNT(b, ma,cr, rv) ((rv) = vfs_mount(b, ma,cr))
-#define PVFS_PARSEARGS(b, o,ma,f, rv) ((rv) = vfs_parseargs(b, o,ma,f))
-#define PVFS_SHOWARGS(b, m, rv) ((rv) = vfs_showargs(b, m))
-#define PVFS_UNMOUNT(b, f,cr, rv) ((rv) = vfs_unmount(b, f,cr))
-#define PVFS_MNTUPDATE(b, fl, args, rv) ((rv) = vfs_mntupdate(b, fl, args))
-#define PVFS_ROOT(b, vpp, rv) ((rv) = vfs_root(b, vpp))
-#define PVFS_STATVFS(b, sp,vp, rv) ((rv) = vfs_statvfs(b, sp,vp))
-#define PVFS_SYNC(b, flag,cr, rv) ((rv) = vfs_sync(b, flag,cr))
-#define PVFS_VGET(b, vpp,fidp, rv) ((rv) = vfs_vget(b, vpp,fidp))
-#define PVFS_DMAPIOPS(b, p, rv) ((rv) = vfs_dmapiops(b, p))
-#define PVFS_QUOTACTL(b, c,id,p, rv) ((rv) = vfs_quotactl(b, c,id,p))
-#define PVFS_INIT_VNODE(b, vp,b2,ul) ( vfs_init_vnode(b, vp,b2,ul) )
-#define PVFS_FORCE_SHUTDOWN(b, fl,f,l) ( vfs_force_shutdown(b, fl,f,l) )
-#define PVFS_FREEZE(b) ( vfs_freeze(b) )
+#define bhv_next_vfs_mount(b, ma,cr) vfs_mount(b, ma,cr)
+#define bhv_next_vfs_parseargs(b, o,ma,f) vfs_parseargs(b, o,ma,f)
+#define bhv_next_vfs_showargs(b, m) vfs_showargs(b, m)
+#define bhv_next_vfs_unmount(b, f,cr) vfs_unmount(b, f,cr)
+#define bhv_next_vfs_mntupdate(b, fl,args) vfs_mntupdate(b, fl, args)
+#define bhv_next_vfs_root(b, vpp) vfs_root(b, vpp)
+#define bhv_next_vfs_statvfs(b, sp,vp) vfs_statvfs(b, sp,vp)
+#define bhv_next_vfs_sync(b, flag,cr) vfs_sync(b, flag,cr)
+#define bhv_next_vfs_vget(b, vpp,fidp) vfs_vget(b, vpp,fidp)
+#define bhv_next_vfs_dmapiops(b, p) vfs_dmapiops(b, p)
+#define bhv_next_vfs_quotactl(b, c,id,p) vfs_quotactl(b, c,id,p)
+#define bhv_next_vfs_init_vnode(b, vp,b2,ul) vfs_init_vnode(b, vp,b2,ul)
+#define bhv_next_force_shutdown(b, fl,f,l) vfs_force_shutdown(b, fl,f,l)
+#define bhv_next_vfs_freeze(b) vfs_freeze(b)
extern int vfs_mount(bhv_desc_t *, struct xfs_mount_args *, struct cred *);
extern int vfs_parseargs(bhv_desc_t *, char *, struct xfs_mount_args *, int);
extern int vfs_showargs(bhv_desc_t *, struct seq_file *);
extern int vfs_unmount(bhv_desc_t *, int, struct cred *);
extern int vfs_mntupdate(bhv_desc_t *, int *, struct xfs_mount_args *);
-extern int vfs_root(bhv_desc_t *, struct vnode **);
-extern int vfs_statvfs(bhv_desc_t *, xfs_statfs_t *, struct vnode *);
+extern int vfs_root(bhv_desc_t *, struct bhv_vnode **);
+extern int vfs_statvfs(bhv_desc_t *, bhv_statvfs_t *, struct bhv_vnode *);
extern int vfs_sync(bhv_desc_t *, int, struct cred *);
-extern int vfs_vget(bhv_desc_t *, struct vnode **, struct fid *);
+extern int vfs_vget(bhv_desc_t *, struct bhv_vnode **, struct fid *);
extern int vfs_dmapiops(bhv_desc_t *, caddr_t);
extern int vfs_quotactl(bhv_desc_t *, int, int, caddr_t);
-extern void vfs_init_vnode(bhv_desc_t *, struct vnode *, bhv_desc_t *, int);
+extern void vfs_init_vnode(bhv_desc_t *, struct bhv_vnode *, bhv_desc_t *, int);
extern void vfs_force_shutdown(bhv_desc_t *, int, char *, int);
extern void vfs_freeze(bhv_desc_t *);
-typedef struct bhv_vfsops {
- struct vfsops bhv_common;
+#define vfs_test_for_freeze(vfs) ((vfs)->vfs_super->s_frozen)
+#define vfs_wait_for_freeze(vfs,l) vfs_check_frozen((vfs)->vfs_super, (l))
+
+typedef struct bhv_module_vfsops {
+ struct bhv_vfsops bhv_common;
void * bhv_custom;
-} bhv_vfsops_t;
+} bhv_module_vfsops_t;
-#define vfs_bhv_lookup(v, id) ( bhv_lookup_range(&(v)->vfs_bh, (id), (id)) )
-#define vfs_bhv_custom(b) ( ((bhv_vfsops_t *)BHV_OPS(b))->bhv_custom )
-#define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o))
-#define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL )
+#define vfs_bhv_lookup(v, id) (bhv_lookup_range(&(v)->vfs_bh, (id), (id)))
+#define vfs_bhv_custom(b) (((bhv_module_vfsops_t*)BHV_OPS(b))->bhv_custom)
+#define vfs_bhv_set_custom(b,o) ((b)->bhv_custom = (void *)(o))
+#define vfs_bhv_clr_custom(b) ((b)->bhv_custom = NULL)
-extern vfs_t *vfs_allocate(struct super_block *);
-extern vfs_t *vfs_from_sb(struct super_block *);
-extern void vfs_deallocate(vfs_t *);
-extern void vfs_insertops(vfs_t *, bhv_vfsops_t *);
-extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *);
+extern bhv_vfs_t *vfs_allocate(struct super_block *);
+extern bhv_vfs_t *vfs_from_sb(struct super_block *);
+extern void vfs_deallocate(bhv_vfs_t *);
+extern void vfs_insertbhv(bhv_vfs_t *, bhv_desc_t *, bhv_vfsops_t *, void *);
-extern void bhv_insert_all_vfsops(struct vfs *);
-extern void bhv_remove_all_vfsops(struct vfs *, int);
-extern void bhv_remove_vfsops(struct vfs *, int);
+extern void vfs_insertops(bhv_vfs_t *, bhv_module_vfsops_t *);
-#define fs_frozen(vfsp) ((vfsp)->vfs_super->s_frozen)
-#define fs_check_frozen(vfsp, level) \
- vfs_check_frozen(vfsp->vfs_super, level);
+extern void bhv_insert_all_vfsops(struct bhv_vfs *);
+extern void bhv_remove_all_vfsops(struct bhv_vfs *, int);
+extern void bhv_remove_vfsops(struct bhv_vfs *, int);
#endif /* __XFS_VFS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index d27c25b27cc..6628d96b6fd 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -39,7 +39,7 @@ vn_init(void)
void
vn_iowait(
- struct vnode *vp)
+ bhv_vnode_t *vp)
{
wait_queue_head_t *wq = vptosync(vp);
@@ -48,17 +48,33 @@ vn_iowait(
void
vn_iowake(
- struct vnode *vp)
+ bhv_vnode_t *vp)
{
if (atomic_dec_and_test(&vp->v_iocount))
wake_up(vptosync(vp));
}
-struct vnode *
+/*
+ * Volume managers supporting multiple paths can send back ENODEV when the
+ * final path disappears. In this case continuing to fill the page cache
+ * with dirty data which cannot be written out is evil, so prevent that.
+ */
+void
+vn_ioerror(
+ bhv_vnode_t *vp,
+ int error,
+ char *f,
+ int l)
+{
+ if (unlikely(error == -ENODEV))
+ bhv_vfs_force_shutdown(vp->v_vfsp, SHUTDOWN_DEVICE_REQ, f, l);
+}
+
+bhv_vnode_t *
vn_initialize(
struct inode *inode)
{
- struct vnode *vp = vn_from_inode(inode);
+ bhv_vnode_t *vp = vn_from_inode(inode);
XFS_STATS_INC(vn_active);
XFS_STATS_INC(vn_alloc);
@@ -94,8 +110,8 @@ vn_initialize(
*/
void
vn_revalidate_core(
- struct vnode *vp,
- vattr_t *vap)
+ bhv_vnode_t *vp,
+ bhv_vattr_t *vap)
{
struct inode *inode = vn_to_inode(vp);
@@ -130,14 +146,14 @@ vn_revalidate_core(
*/
int
__vn_revalidate(
- struct vnode *vp,
- struct vattr *vattr)
+ bhv_vnode_t *vp,
+ bhv_vattr_t *vattr)
{
int error;
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS;
- VOP_GETATTR(vp, vattr, 0, NULL, error);
+ error = bhv_vop_getattr(vp, vattr, 0, NULL);
if (likely(!error)) {
vn_revalidate_core(vp, vattr);
VUNMODIFY(vp);
@@ -147,9 +163,9 @@ __vn_revalidate(
int
vn_revalidate(
- struct vnode *vp)
+ bhv_vnode_t *vp)
{
- vattr_t vattr;
+ bhv_vattr_t vattr;
return __vn_revalidate(vp, &vattr);
}
@@ -157,9 +173,9 @@ vn_revalidate(
/*
* Add a reference to a referenced vnode.
*/
-struct vnode *
+bhv_vnode_t *
vn_hold(
- struct vnode *vp)
+ bhv_vnode_t *vp)
{
struct inode *inode;
@@ -192,31 +208,31 @@ vn_hold(
* Vnode tracing code.
*/
void
-vn_trace_entry(vnode_t *vp, const char *func, inst_t *ra)
+vn_trace_entry(bhv_vnode_t *vp, const char *func, inst_t *ra)
{
KTRACE_ENTER(vp, VNODE_KTRACE_ENTRY, func, 0, ra);
}
void
-vn_trace_exit(vnode_t *vp, const char *func, inst_t *ra)
+vn_trace_exit(bhv_vnode_t *vp, const char *func, inst_t *ra)
{
KTRACE_ENTER(vp, VNODE_KTRACE_EXIT, func, 0, ra);
}
void
-vn_trace_hold(vnode_t *vp, char *file, int line, inst_t *ra)
+vn_trace_hold(bhv_vnode_t *vp, char *file, int line, inst_t *ra)
{
KTRACE_ENTER(vp, VNODE_KTRACE_HOLD, file, line, ra);
}
void
-vn_trace_ref(vnode_t *vp, char *file, int line, inst_t *ra)
+vn_trace_ref(bhv_vnode_t *vp, char *file, int line, inst_t *ra)
{
KTRACE_ENTER(vp, VNODE_KTRACE_REF, file, line, ra);
}
void
-vn_trace_rele(vnode_t *vp, char *file, int line, inst_t *ra)
+vn_trace_rele(bhv_vnode_t *vp, char *file, int line, inst_t *ra)
{
KTRACE_ENTER(vp, VNODE_KTRACE_RELE, file, line, ra);
}
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 2a8e16c2235..35c6a01963a 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -14,57 +14,35 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Portions Copyright (c) 1989, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*/
#ifndef __XFS_VNODE_H__
#define __XFS_VNODE_H__
struct uio;
struct file;
-struct vattr;
+struct bhv_vfs;
+struct bhv_vattr;
struct xfs_iomap;
struct attrlist_cursor_kern;
+typedef struct dentry bhv_vname_t;
+typedef __u64 bhv_vnumber_t;
-typedef xfs_ino_t vnumber_t;
-typedef struct dentry vname_t;
-typedef bhv_head_t vn_bhv_head_t;
+typedef enum bhv_vflags {
+ VMODIFIED = 0x08, /* XFS inode state possibly differs */
+ /* to the Linux inode state. */
+ VTRUNCATED = 0x40, /* truncated down so flush-on-close */
+} bhv_vflags_t;
/*
* MP locking protocols:
* v_flag, v_vfsp VN_LOCK/VN_UNLOCK
*/
-typedef struct vnode {
- __u32 v_flag; /* vnode flags (see below) */
- struct vfs *v_vfsp; /* ptr to containing VFS */
- vnumber_t v_number; /* in-core vnode number */
- vn_bhv_head_t v_bh; /* behavior head */
+typedef struct bhv_vnode {
+ bhv_vflags_t v_flag; /* vnode flags (see above) */
+ bhv_vfs_t *v_vfsp; /* ptr to containing VFS */
+ bhv_vnumber_t v_number; /* in-core vnode number */
+ bhv_head_t v_bh; /* behavior head */
spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */
atomic_t v_iocount; /* outstanding I/O count */
#ifdef XFS_VNODE_TRACE
@@ -72,7 +50,7 @@ typedef struct vnode {
#endif
struct inode v_inode; /* Linux inode */
/* inode MUST be last */
-} vnode_t;
+} bhv_vnode_t;
#define VN_ISLNK(vp) S_ISLNK((vp)->v_inode.i_mode)
#define VN_ISREG(vp) S_ISREG((vp)->v_inode.i_mode)
@@ -80,9 +58,6 @@ typedef struct vnode {
#define VN_ISCHR(vp) S_ISCHR((vp)->v_inode.i_mode)
#define VN_ISBLK(vp) S_ISBLK((vp)->v_inode.i_mode)
-#define v_fbhv v_bh.bh_first /* first behavior */
-#define v_fops v_bh.bh_first->bd_ops /* first behavior ops */
-
#define VNODE_POSITION_BASE BHV_POSITION_BASE /* chain bottom */
#define VNODE_POSITION_TOP BHV_POSITION_TOP /* chain top */
#define VNODE_POSITION_INVALID BHV_POSITION_INVALID /* invalid pos. num */
@@ -104,8 +79,8 @@ typedef enum {
/*
* Macros for dealing with the behavior descriptor inside of the vnode.
*/
-#define BHV_TO_VNODE(bdp) ((vnode_t *)BHV_VOBJ(bdp))
-#define BHV_TO_VNODE_NULL(bdp) ((vnode_t *)BHV_VOBJNULL(bdp))
+#define BHV_TO_VNODE(bdp) ((bhv_vnode_t *)BHV_VOBJ(bdp))
+#define BHV_TO_VNODE_NULL(bdp) ((bhv_vnode_t *)BHV_VOBJNULL(bdp))
#define VN_BHV_HEAD(vp) ((bhv_head_t *)(&((vp)->v_bh)))
#define vn_bhv_head_init(bhp,name) bhv_head_init(bhp,name)
@@ -116,35 +91,29 @@ typedef enum {
/*
* Vnode to Linux inode mapping.
*/
-static inline struct vnode *vn_from_inode(struct inode *inode)
+static inline struct bhv_vnode *vn_from_inode(struct inode *inode)
{
- return (vnode_t *)list_entry(inode, vnode_t, v_inode);
+ return (bhv_vnode_t *)list_entry(inode, bhv_vnode_t, v_inode);
}
-static inline struct inode *vn_to_inode(struct vnode *vnode)
+static inline struct inode *vn_to_inode(struct bhv_vnode *vnode)
{
return &vnode->v_inode;
}
/*
- * Vnode flags.
- */
-#define VMODIFIED 0x8 /* XFS inode state possibly differs */
- /* to the Linux inode state. */
-
-/*
- * Values for the VOP_RWLOCK and VOP_RWUNLOCK flags parameter.
+ * Values for the vop_rwlock/rwunlock flags parameter.
*/
-typedef enum vrwlock {
+typedef enum bhv_vrwlock {
VRWLOCK_NONE,
VRWLOCK_READ,
VRWLOCK_WRITE,
VRWLOCK_WRITE_DIRECT,
VRWLOCK_TRY_READ,
VRWLOCK_TRY_WRITE
-} vrwlock_t;
+} bhv_vrwlock_t;
/*
- * Return values for VOP_INACTIVE. A return value of
+ * Return values for bhv_vop_inactive. A return value of
* VN_INACTIVE_NOCACHE implies that the file system behavior
* has disassociated its state and bhv_desc_t from the vnode.
*/
@@ -152,18 +121,20 @@ typedef enum vrwlock {
#define VN_INACTIVE_NOCACHE 1
/*
- * Values for the cmd code given to VOP_VNODE_CHANGE.
+ * Values for the cmd code given to vop_vnode_change.
*/
-typedef enum vchange {
+typedef enum bhv_vchange {
VCHANGE_FLAGS_FRLOCKS = 0,
VCHANGE_FLAGS_ENF_LOCKING = 1,
VCHANGE_FLAGS_TRUNCATED = 2,
VCHANGE_FLAGS_PAGE_DIRTY = 3,
VCHANGE_FLAGS_IOEXCL_COUNT = 4
-} vchange_t;
+} bhv_vchange_t;
+typedef enum { L_FALSE, L_TRUE } lastclose_t;
typedef int (*vop_open_t)(bhv_desc_t *, struct cred *);
+typedef int (*vop_close_t)(bhv_desc_t *, int, lastclose_t, struct cred *);
typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *,
const struct iovec *, unsigned int,
loff_t *, int, struct cred *);
@@ -181,27 +152,27 @@ typedef ssize_t (*vop_splice_write_t)(bhv_desc_t *, struct pipe_inode_info *,
struct cred *);
typedef int (*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *,
int, unsigned int, void __user *);
-typedef int (*vop_getattr_t)(bhv_desc_t *, struct vattr *, int,
+typedef int (*vop_getattr_t)(bhv_desc_t *, struct bhv_vattr *, int,
struct cred *);
-typedef int (*vop_setattr_t)(bhv_desc_t *, struct vattr *, int,
+typedef int (*vop_setattr_t)(bhv_desc_t *, struct bhv_vattr *, int,
struct cred *);
typedef int (*vop_access_t)(bhv_desc_t *, int, struct cred *);
-typedef int (*vop_lookup_t)(bhv_desc_t *, vname_t *, vnode_t **,
- int, vnode_t *, struct cred *);
-typedef int (*vop_create_t)(bhv_desc_t *, vname_t *, struct vattr *,
- vnode_t **, struct cred *);
-typedef int (*vop_remove_t)(bhv_desc_t *, vname_t *, struct cred *);
-typedef int (*vop_link_t)(bhv_desc_t *, vnode_t *, vname_t *,
- struct cred *);
-typedef int (*vop_rename_t)(bhv_desc_t *, vname_t *, vnode_t *, vname_t *,
+typedef int (*vop_lookup_t)(bhv_desc_t *, bhv_vname_t *, bhv_vnode_t **,
+ int, bhv_vnode_t *, struct cred *);
+typedef int (*vop_create_t)(bhv_desc_t *, bhv_vname_t *, struct bhv_vattr *,
+ bhv_vnode_t **, struct cred *);
+typedef int (*vop_remove_t)(bhv_desc_t *, bhv_vname_t *, struct cred *);
+typedef int (*vop_link_t)(bhv_desc_t *, bhv_vnode_t *, bhv_vname_t *,
struct cred *);
-typedef int (*vop_mkdir_t)(bhv_desc_t *, vname_t *, struct vattr *,
- vnode_t **, struct cred *);
-typedef int (*vop_rmdir_t)(bhv_desc_t *, vname_t *, struct cred *);
+typedef int (*vop_rename_t)(bhv_desc_t *, bhv_vname_t *, bhv_vnode_t *,
+ bhv_vname_t *, struct cred *);
+typedef int (*vop_mkdir_t)(bhv_desc_t *, bhv_vname_t *, struct bhv_vattr *,
+ bhv_vnode_t **, struct cred *);
+typedef int (*vop_rmdir_t)(bhv_desc_t *, bhv_vname_t *, struct cred *);
typedef int (*vop_readdir_t)(bhv_desc_t *, struct uio *, struct cred *,
int *);
-typedef int (*vop_symlink_t)(bhv_desc_t *, vname_t *, struct vattr *,
- char *, vnode_t **, struct cred *);
+typedef int (*vop_symlink_t)(bhv_desc_t *, bhv_vname_t *, struct bhv_vattr*,
+ char *, bhv_vnode_t **, struct cred *);
typedef int (*vop_readlink_t)(bhv_desc_t *, struct uio *, int,
struct cred *);
typedef int (*vop_fsync_t)(bhv_desc_t *, int, struct cred *,
@@ -209,8 +180,8 @@ typedef int (*vop_fsync_t)(bhv_desc_t *, int, struct cred *,
typedef int (*vop_inactive_t)(bhv_desc_t *, struct cred *);
typedef int (*vop_fid2_t)(bhv_desc_t *, struct fid *);
typedef int (*vop_release_t)(bhv_desc_t *);
-typedef int (*vop_rwlock_t)(bhv_desc_t *, vrwlock_t);
-typedef void (*vop_rwunlock_t)(bhv_desc_t *, vrwlock_t);
+typedef int (*vop_rwlock_t)(bhv_desc_t *, bhv_vrwlock_t);
+typedef void (*vop_rwunlock_t)(bhv_desc_t *, bhv_vrwlock_t);
typedef int (*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int,
struct xfs_iomap *, int *);
typedef int (*vop_reclaim_t)(bhv_desc_t *);
@@ -222,8 +193,8 @@ typedef int (*vop_attr_remove_t)(bhv_desc_t *, const char *,
int, struct cred *);
typedef int (*vop_attr_list_t)(bhv_desc_t *, char *, int, int,
struct attrlist_cursor_kern *, struct cred *);
-typedef void (*vop_link_removed_t)(bhv_desc_t *, vnode_t *, int);
-typedef void (*vop_vnode_change_t)(bhv_desc_t *, vchange_t, __psint_t);
+typedef void (*vop_link_removed_t)(bhv_desc_t *, bhv_vnode_t *, int);
+typedef void (*vop_vnode_change_t)(bhv_desc_t *, bhv_vchange_t, __psint_t);
typedef void (*vop_ptossvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
typedef void (*vop_pflushinvalvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
typedef int (*vop_pflushvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t,
@@ -231,9 +202,10 @@ typedef int (*vop_pflushvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t,
typedef int (*vop_iflush_t)(bhv_desc_t *, int);
-typedef struct vnodeops {
+typedef struct bhv_vnodeops {
bhv_position_t vn_position; /* position within behavior chain */
vop_open_t vop_open;
+ vop_close_t vop_close;
vop_read_t vop_read;
vop_write_t vop_write;
vop_sendfile_t vop_sendfile;
@@ -271,103 +243,80 @@ typedef struct vnodeops {
vop_pflushvp_t vop_flush_pages;
vop_release_t vop_release;
vop_iflush_t vop_iflush;
-} vnodeops_t;
+} bhv_vnodeops_t;
/*
- * VOP's.
- */
-#define _VOP_(op, vp) (*((vnodeops_t *)(vp)->v_fops)->op)
-
-#define VOP_READ(vp,file,iov,segs,offset,ioflags,cr,rv) \
- rv = _VOP_(vop_read, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr)
-#define VOP_WRITE(vp,file,iov,segs,offset,ioflags,cr,rv) \
- rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr)
-#define VOP_SENDFILE(vp,f,off,ioflags,cnt,act,targ,cr,rv) \
- rv = _VOP_(vop_sendfile, vp)((vp)->v_fbhv,f,off,ioflags,cnt,act,targ,cr)
-#define VOP_SPLICE_READ(vp,f,o,pipe,cnt,fl,iofl,cr,rv) \
- rv = _VOP_(vop_splice_read, vp)((vp)->v_fbhv,f,o,pipe,cnt,fl,iofl,cr)
-#define VOP_SPLICE_WRITE(vp,f,o,pipe,cnt,fl,iofl,cr,rv) \
- rv = _VOP_(vop_splice_write, vp)((vp)->v_fbhv,f,o,pipe,cnt,fl,iofl,cr)
-#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \
- rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n)
-#define VOP_OPEN(vp, cr, rv) \
- rv = _VOP_(vop_open, vp)((vp)->v_fbhv, cr)
-#define VOP_GETATTR(vp, vap, f, cr, rv) \
- rv = _VOP_(vop_getattr, vp)((vp)->v_fbhv, vap, f, cr)
-#define VOP_SETATTR(vp, vap, f, cr, rv) \
- rv = _VOP_(vop_setattr, vp)((vp)->v_fbhv, vap, f, cr)
-#define VOP_ACCESS(vp, mode, cr, rv) \
- rv = _VOP_(vop_access, vp)((vp)->v_fbhv, mode, cr)
-#define VOP_LOOKUP(vp,d,vpp,f,rdir,cr,rv) \
- rv = _VOP_(vop_lookup, vp)((vp)->v_fbhv,d,vpp,f,rdir,cr)
-#define VOP_CREATE(dvp,d,vap,vpp,cr,rv) \
- rv = _VOP_(vop_create, dvp)((dvp)->v_fbhv,d,vap,vpp,cr)
-#define VOP_REMOVE(dvp,d,cr,rv) \
- rv = _VOP_(vop_remove, dvp)((dvp)->v_fbhv,d,cr)
-#define VOP_LINK(tdvp,fvp,d,cr,rv) \
- rv = _VOP_(vop_link, tdvp)((tdvp)->v_fbhv,fvp,d,cr)
-#define VOP_RENAME(fvp,fnm,tdvp,tnm,cr,rv) \
- rv = _VOP_(vop_rename, fvp)((fvp)->v_fbhv,fnm,tdvp,tnm,cr)
-#define VOP_MKDIR(dp,d,vap,vpp,cr,rv) \
- rv = _VOP_(vop_mkdir, dp)((dp)->v_fbhv,d,vap,vpp,cr)
-#define VOP_RMDIR(dp,d,cr,rv) \
- rv = _VOP_(vop_rmdir, dp)((dp)->v_fbhv,d,cr)
-#define VOP_READDIR(vp,uiop,cr,eofp,rv) \
- rv = _VOP_(vop_readdir, vp)((vp)->v_fbhv,uiop,cr,eofp)
-#define VOP_SYMLINK(dvp,d,vap,tnm,vpp,cr,rv) \
- rv = _VOP_(vop_symlink, dvp) ((dvp)->v_fbhv,d,vap,tnm,vpp,cr)
-#define VOP_READLINK(vp,uiop,fl,cr,rv) \
- rv = _VOP_(vop_readlink, vp)((vp)->v_fbhv,uiop,fl,cr)
-#define VOP_FSYNC(vp,f,cr,b,e,rv) \
- rv = _VOP_(vop_fsync, vp)((vp)->v_fbhv,f,cr,b,e)
-#define VOP_INACTIVE(vp, cr, rv) \
- rv = _VOP_(vop_inactive, vp)((vp)->v_fbhv, cr)
-#define VOP_RELEASE(vp, rv) \
- rv = _VOP_(vop_release, vp)((vp)->v_fbhv)
-#define VOP_FID2(vp, fidp, rv) \
- rv = _VOP_(vop_fid2, vp)((vp)->v_fbhv, fidp)
-#define VOP_RWLOCK(vp,i) \
- (void)_VOP_(vop_rwlock, vp)((vp)->v_fbhv, i)
-#define VOP_RWLOCK_TRY(vp,i) \
- _VOP_(vop_rwlock, vp)((vp)->v_fbhv, i)
-#define VOP_RWUNLOCK(vp,i) \
- (void)_VOP_(vop_rwunlock, vp)((vp)->v_fbhv, i)
-#define VOP_FRLOCK(vp,c,fl,flags,offset,fr,rv) \
- rv = _VOP_(vop_frlock, vp)((vp)->v_fbhv,c,fl,flags,offset,fr)
-#define VOP_RECLAIM(vp, rv) \
- rv = _VOP_(vop_reclaim, vp)((vp)->v_fbhv)
-#define VOP_ATTR_GET(vp, name, val, vallenp, fl, cred, rv) \
- rv = _VOP_(vop_attr_get, vp)((vp)->v_fbhv,name,val,vallenp,fl,cred)
-#define VOP_ATTR_SET(vp, name, val, vallen, fl, cred, rv) \
- rv = _VOP_(vop_attr_set, vp)((vp)->v_fbhv,name,val,vallen,fl,cred)
-#define VOP_ATTR_REMOVE(vp, name, flags, cred, rv) \
- rv = _VOP_(vop_attr_remove, vp)((vp)->v_fbhv,name,flags,cred)
-#define VOP_ATTR_LIST(vp, buf, buflen, fl, cursor, cred, rv) \
- rv = _VOP_(vop_attr_list, vp)((vp)->v_fbhv,buf,buflen,fl,cursor,cred)
-#define VOP_LINK_REMOVED(vp, dvp, linkzero) \
- (void)_VOP_(vop_link_removed, vp)((vp)->v_fbhv, dvp, linkzero)
-#define VOP_VNODE_CHANGE(vp, cmd, val) \
- (void)_VOP_(vop_vnode_change, vp)((vp)->v_fbhv,cmd,val)
-/*
- * These are page cache functions that now go thru VOPs.
- * 'last' parameter is unused and left in for IRIX compatibility
+ * Virtual node operations, operating from head bhv.
*/
-#define VOP_TOSS_PAGES(vp, first, last, fiopt) \
- _VOP_(vop_tosspages, vp)((vp)->v_fbhv,first, last, fiopt)
-/*
- * 'last' parameter is unused and left in for IRIX compatibility
- */
-#define VOP_FLUSHINVAL_PAGES(vp, first, last, fiopt) \
- _VOP_(vop_flushinval_pages, vp)((vp)->v_fbhv,first,last,fiopt)
-/*
- * 'last' parameter is unused and left in for IRIX compatibility
- */
-#define VOP_FLUSH_PAGES(vp, first, last, flags, fiopt, rv) \
- rv = _VOP_(vop_flush_pages, vp)((vp)->v_fbhv,first,last,flags,fiopt)
-#define VOP_IOCTL(vp, inode, filp, fl, cmd, arg, rv) \
- rv = _VOP_(vop_ioctl, vp)((vp)->v_fbhv,inode,filp,fl,cmd,arg)
-#define VOP_IFLUSH(vp, flags, rv) \
- rv = _VOP_(vop_iflush, vp)((vp)->v_fbhv, flags)
+#define VNHEAD(vp) ((vp)->v_bh.bh_first)
+#define VOP(op, vp) (*((bhv_vnodeops_t *)VNHEAD(vp)->bd_ops)->op)
+#define bhv_vop_open(vp, cr) VOP(vop_open, vp)(VNHEAD(vp),cr)
+#define bhv_vop_close(vp, f,last,cr) VOP(vop_close, vp)(VNHEAD(vp),f,last,cr)
+#define bhv_vop_read(vp,file,iov,segs,offset,ioflags,cr) \
+ VOP(vop_read, vp)(VNHEAD(vp),file,iov,segs,offset,ioflags,cr)
+#define bhv_vop_write(vp,file,iov,segs,offset,ioflags,cr) \
+ VOP(vop_write, vp)(VNHEAD(vp),file,iov,segs,offset,ioflags,cr)
+#define bhv_vop_sendfile(vp,f,off,ioflags,cnt,act,targ,cr) \
+ VOP(vop_sendfile, vp)(VNHEAD(vp),f,off,ioflags,cnt,act,targ,cr)
+#define bhv_vop_splice_read(vp,f,o,pipe,cnt,fl,iofl,cr) \
+ VOP(vop_splice_read, vp)(VNHEAD(vp),f,o,pipe,cnt,fl,iofl,cr)
+#define bhv_vop_splice_write(vp,f,o,pipe,cnt,fl,iofl,cr) \
+ VOP(vop_splice_write, vp)(VNHEAD(vp),f,o,pipe,cnt,fl,iofl,cr)
+#define bhv_vop_bmap(vp,of,sz,rw,b,n) \
+ VOP(vop_bmap, vp)(VNHEAD(vp),of,sz,rw,b,n)
+#define bhv_vop_getattr(vp, vap,f,cr) \
+ VOP(vop_getattr, vp)(VNHEAD(vp), vap,f,cr)
+#define bhv_vop_setattr(vp, vap,f,cr) \
+ VOP(vop_setattr, vp)(VNHEAD(vp), vap,f,cr)
+#define bhv_vop_access(vp, mode,cr) VOP(vop_access, vp)(VNHEAD(vp), mode,cr)
+#define bhv_vop_lookup(vp,d,vpp,f,rdir,cr) \
+ VOP(vop_lookup, vp)(VNHEAD(vp),d,vpp,f,rdir,cr)
+#define bhv_vop_create(dvp,d,vap,vpp,cr) \
+ VOP(vop_create, dvp)(VNHEAD(dvp),d,vap,vpp,cr)
+#define bhv_vop_remove(dvp,d,cr) VOP(vop_remove, dvp)(VNHEAD(dvp),d,cr)
+#define bhv_vop_link(dvp,fvp,d,cr) VOP(vop_link, dvp)(VNHEAD(dvp),fvp,d,cr)
+#define bhv_vop_rename(fvp,fnm,tdvp,tnm,cr) \
+ VOP(vop_rename, fvp)(VNHEAD(fvp),fnm,tdvp,tnm,cr)
+#define bhv_vop_mkdir(dp,d,vap,vpp,cr) \
+ VOP(vop_mkdir, dp)(VNHEAD(dp),d,vap,vpp,cr)
+#define bhv_vop_rmdir(dp,d,cr) VOP(vop_rmdir, dp)(VNHEAD(dp),d,cr)
+#define bhv_vop_readdir(vp,uiop,cr,eofp) \
+ VOP(vop_readdir, vp)(VNHEAD(vp),uiop,cr,eofp)
+#define bhv_vop_symlink(dvp,d,vap,tnm,vpp,cr) \
+ VOP(vop_symlink, dvp)(VNHEAD(dvp),d,vap,tnm,vpp,cr)
+#define bhv_vop_readlink(vp,uiop,fl,cr) \
+ VOP(vop_readlink, vp)(VNHEAD(vp),uiop,fl,cr)
+#define bhv_vop_fsync(vp,f,cr,b,e) VOP(vop_fsync, vp)(VNHEAD(vp),f,cr,b,e)
+#define bhv_vop_inactive(vp,cr) VOP(vop_inactive, vp)(VNHEAD(vp),cr)
+#define bhv_vop_release(vp) VOP(vop_release, vp)(VNHEAD(vp))
+#define bhv_vop_fid2(vp,fidp) VOP(vop_fid2, vp)(VNHEAD(vp),fidp)
+#define bhv_vop_rwlock(vp,i) VOP(vop_rwlock, vp)(VNHEAD(vp),i)
+#define bhv_vop_rwlock_try(vp,i) VOP(vop_rwlock, vp)(VNHEAD(vp),i)
+#define bhv_vop_rwunlock(vp,i) VOP(vop_rwunlock, vp)(VNHEAD(vp),i)
+#define bhv_vop_frlock(vp,c,fl,flags,offset,fr) \
+ VOP(vop_frlock, vp)(VNHEAD(vp),c,fl,flags,offset,fr)
+#define bhv_vop_reclaim(vp) VOP(vop_reclaim, vp)(VNHEAD(vp))
+#define bhv_vop_attr_get(vp, name, val, vallenp, fl, cred) \
+ VOP(vop_attr_get, vp)(VNHEAD(vp),name,val,vallenp,fl,cred)
+#define bhv_vop_attr_set(vp, name, val, vallen, fl, cred) \
+ VOP(vop_attr_set, vp)(VNHEAD(vp),name,val,vallen,fl,cred)
+#define bhv_vop_attr_remove(vp, name, flags, cred) \
+ VOP(vop_attr_remove, vp)(VNHEAD(vp),name,flags,cred)
+#define bhv_vop_attr_list(vp, buf, buflen, fl, cursor, cred) \
+ VOP(vop_attr_list, vp)(VNHEAD(vp),buf,buflen,fl,cursor,cred)
+#define bhv_vop_link_removed(vp, dvp, linkzero) \
+ VOP(vop_link_removed, vp)(VNHEAD(vp), dvp, linkzero)
+#define bhv_vop_vnode_change(vp, cmd, val) \
+ VOP(vop_vnode_change, vp)(VNHEAD(vp), cmd, val)
+#define bhv_vop_toss_pages(vp, first, last, fiopt) \
+ VOP(vop_tosspages, vp)(VNHEAD(vp), first, last, fiopt)
+#define bhv_vop_flushinval_pages(vp, first, last, fiopt) \
+ VOP(vop_flushinval_pages, vp)(VNHEAD(vp),first,last,fiopt)
+#define bhv_vop_flush_pages(vp, first, last, flags, fiopt) \
+ VOP(vop_flush_pages, vp)(VNHEAD(vp),first,last,flags,fiopt)
+#define bhv_vop_ioctl(vp, inode, filp, fl, cmd, arg) \
+ VOP(vop_ioctl, vp)(VNHEAD(vp),inode,filp,fl,cmd,arg)
+#define bhv_vop_iflush(vp, flags) VOP(vop_iflush, vp)(VNHEAD(vp), flags)
/*
* Flags for read/write calls - same values as IRIX
@@ -377,7 +326,7 @@ typedef struct vnodeops {
#define IO_INVIS 0x00020 /* don't update inode timestamps */
/*
- * Flags for VOP_IFLUSH call
+ * Flags for vop_iflush call
*/
#define FLUSH_SYNC 1 /* wait for flush to complete */
#define FLUSH_INODE 2 /* flush the inode itself */
@@ -385,8 +334,7 @@ typedef struct vnodeops {
* this inode out to disk */
/*
- * Flush/Invalidate options for VOP_TOSS_PAGES, VOP_FLUSHINVAL_PAGES and
- * VOP_FLUSH_PAGES.
+ * Flush/Invalidate options for vop_toss/flush/flushinval_pages.
*/
#define FI_NONE 0 /* none */
#define FI_REMAPF 1 /* Do a remapf prior to the operation */
@@ -398,7 +346,7 @@ typedef struct vnodeops {
* Vnode attributes. va_mask indicates those attributes the caller
* wants to set or extract.
*/
-typedef struct vattr {
+typedef struct bhv_vattr {
int va_mask; /* bit-mask of attributes present */
mode_t va_mode; /* file access mode and type */
xfs_nlink_t va_nlink; /* number of references to file */
@@ -418,7 +366,7 @@ typedef struct vattr {
u_long va_nextents; /* number of extents in file */
u_long va_anextents; /* number of attr extents in file */
prid_t va_projid; /* project id */
-} vattr_t;
+} bhv_vattr_t;
/*
* setattr or getattr attributes
@@ -492,29 +440,17 @@ typedef struct vattr {
(VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID)
extern void vn_init(void);
-extern vnode_t *vn_initialize(struct inode *);
-
-/*
- * vnode_map structures _must_ match vn_epoch and vnode structure sizes.
- */
-typedef struct vnode_map {
- vfs_t *v_vfsp;
- vnumber_t v_number; /* in-core vnode number */
- xfs_ino_t v_ino; /* inode # */
-} vmap_t;
-
-#define VMAP(vp, vmap) {(vmap).v_vfsp = (vp)->v_vfsp, \
- (vmap).v_number = (vp)->v_number, \
- (vmap).v_ino = (vp)->v_inode.i_ino; }
+extern bhv_vnode_t *vn_initialize(struct inode *);
+extern int vn_revalidate(struct bhv_vnode *);
+extern int __vn_revalidate(struct bhv_vnode *, bhv_vattr_t *);
+extern void vn_revalidate_core(struct bhv_vnode *, bhv_vattr_t *);
-extern int vn_revalidate(struct vnode *);
-extern int __vn_revalidate(struct vnode *, vattr_t *);
-extern void vn_revalidate_core(struct vnode *, vattr_t *);
+extern void vn_iowait(struct bhv_vnode *vp);
+extern void vn_iowake(struct bhv_vnode *vp);
-extern void vn_iowait(struct vnode *vp);
-extern void vn_iowake(struct vnode *vp);
+extern void vn_ioerror(struct bhv_vnode *vp, int error, char *f, int l);
-static inline int vn_count(struct vnode *vp)
+static inline int vn_count(struct bhv_vnode *vp)
{
return atomic_read(&vn_to_inode(vp)->i_count);
}
@@ -522,7 +458,7 @@ static inline int vn_count(struct vnode *vp)
/*
* Vnode reference counting functions (and macros for compatibility).
*/
-extern vnode_t *vn_hold(struct vnode *);
+extern bhv_vnode_t *vn_hold(struct bhv_vnode *);
#if defined(XFS_VNODE_TRACE)
#define VN_HOLD(vp) \
@@ -536,7 +472,7 @@ extern vnode_t *vn_hold(struct vnode *);
#define VN_RELE(vp) (iput(vn_to_inode(vp)))
#endif
-static inline struct vnode *vn_grab(struct vnode *vp)
+static inline struct bhv_vnode *vn_grab(struct bhv_vnode *vp)
{
struct inode *inode = igrab(vn_to_inode(vp));
return inode ? vn_from_inode(inode) : NULL;
@@ -554,32 +490,39 @@ static inline struct vnode *vn_grab(struct vnode *vp)
*/
#define VN_LOCK(vp) mutex_spinlock(&(vp)->v_lock)
#define VN_UNLOCK(vp, s) mutex_spinunlock(&(vp)->v_lock, s)
-#define VN_FLAGSET(vp,b) vn_flagset(vp,b)
-#define VN_FLAGCLR(vp,b) vn_flagclr(vp,b)
-static __inline__ void vn_flagset(struct vnode *vp, uint flag)
+static __inline__ void vn_flagset(struct bhv_vnode *vp, uint flag)
{
spin_lock(&vp->v_lock);
vp->v_flag |= flag;
spin_unlock(&vp->v_lock);
}
-static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
+static __inline__ uint vn_flagclr(struct bhv_vnode *vp, uint flag)
{
+ uint cleared;
+
spin_lock(&vp->v_lock);
+ cleared = (vp->v_flag & flag);
vp->v_flag &= ~flag;
spin_unlock(&vp->v_lock);
+ return cleared;
}
+#define VMODIFY(vp) vn_flagset(vp, VMODIFIED)
+#define VUNMODIFY(vp) vn_flagclr(vp, VMODIFIED)
+#define VTRUNCATE(vp) vn_flagset(vp, VTRUNCATED)
+#define VUNTRUNCATE(vp) vn_flagclr(vp, VTRUNCATED)
+
/*
* Dealing with bad inodes
*/
-static inline void vn_mark_bad(struct vnode *vp)
+static inline void vn_mark_bad(struct bhv_vnode *vp)
{
make_bad_inode(vn_to_inode(vp));
}
-static inline int VN_BAD(struct vnode *vp)
+static inline int VN_BAD(struct bhv_vnode *vp)
{
return is_bad_inode(vn_to_inode(vp));
}
@@ -587,18 +530,18 @@ static inline int VN_BAD(struct vnode *vp)
/*
* Extracting atime values in various formats
*/
-static inline void vn_atime_to_bstime(struct vnode *vp, xfs_bstime_t *bs_atime)
+static inline void vn_atime_to_bstime(bhv_vnode_t *vp, xfs_bstime_t *bs_atime)
{
bs_atime->tv_sec = vp->v_inode.i_atime.tv_sec;
bs_atime->tv_nsec = vp->v_inode.i_atime.tv_nsec;
}
-static inline void vn_atime_to_timespec(struct vnode *vp, struct timespec *ts)
+static inline void vn_atime_to_timespec(bhv_vnode_t *vp, struct timespec *ts)
{
*ts = vp->v_inode.i_atime;
}
-static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
+static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt)
{
*tt = vp->v_inode.i_atime.tv_sec;
}
@@ -610,11 +553,10 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages)
#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \
PAGECACHE_TAG_DIRTY)
-#define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED)
-#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED)
+#define VN_TRUNC(vp) ((vp)->v_flag & VTRUNCATED)
/*
- * Flags to VOP_SETATTR/VOP_GETATTR.
+ * Flags to vop_setattr/getattr.
*/
#define ATTR_UTIME 0x01 /* non-default utime(2) request */
#define ATTR_DMI 0x08 /* invocation from a DMI function */
@@ -624,7 +566,7 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
#define ATTR_NOSIZETOK 0x400 /* Don't get the SIZE token */
/*
- * Flags to VOP_FSYNC and VOP_RECLAIM.
+ * Flags to vop_fsync/reclaim.
*/
#define FSYNC_NOWAIT 0 /* asynchronous flush */
#define FSYNC_WAIT 0x1 /* synchronous fsync or forced reclaim */
@@ -643,11 +585,11 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
#define VNODE_KTRACE_REF 4
#define VNODE_KTRACE_RELE 5
-extern void vn_trace_entry(struct vnode *, const char *, inst_t *);
-extern void vn_trace_exit(struct vnode *, const char *, inst_t *);
-extern void vn_trace_hold(struct vnode *, char *, int, inst_t *);
-extern void vn_trace_ref(struct vnode *, char *, int, inst_t *);
-extern void vn_trace_rele(struct vnode *, char *, int, inst_t *);
+extern void vn_trace_entry(struct bhv_vnode *, const char *, inst_t *);
+extern void vn_trace_exit(struct bhv_vnode *, const char *, inst_t *);
+extern void vn_trace_hold(struct bhv_vnode *, char *, int, inst_t *);
+extern void vn_trace_ref(struct bhv_vnode *, char *, int, inst_t *);
+extern void vn_trace_rele(struct bhv_vnode *, char *, int, inst_t *);
#define VN_TRACE(vp) \
vn_trace_ref(vp, __FILE__, __LINE__, (inst_t *)__return_address)
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 772ac48329e..3aa77153185 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -444,7 +442,7 @@ xfs_qm_dqalloc(
XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,
&firstblock,
XFS_QM_DQALLOC_SPACE_RES(mp),
- &map, &nmaps, &flist))) {
+ &map, &nmaps, &flist, NULL))) {
goto error0;
}
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -559,7 +557,7 @@ xfs_qm_dqtobp(
error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB,
XFS_BMAPI_METADATA,
- NULL, 0, &map, &nmaps, NULL);
+ NULL, 0, &map, &nmaps, NULL, NULL);
xfs_iunlock(quotip, XFS_ILOCK_SHARED);
if (error)
@@ -1261,7 +1259,7 @@ xfs_qm_dqflush(
if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id),
0, XFS_QMOPT_DOWARN, "dqflush (incore copy)")) {
- xfs_force_shutdown(dqp->q_mount, XFS_CORRUPT_INCORE);
+ xfs_force_shutdown(dqp->q_mount, SHUTDOWN_CORRUPT_INCORE);
return XFS_ERROR(EIO);
}
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index c0c629663a5..78d3ab95c5f 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -119,7 +119,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
*/
#define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\
(dqp)->dq_flags |= XFS_DQ_FLOCKED; }
-#define xfs_dqfunlock(dqp) { ASSERT(valusema(&((dqp)->q_flock)) <= 0); \
+#define xfs_dqfunlock(dqp) { ASSERT(issemalocked(&((dqp)->q_flock))); \
vsema(&((dqp)->q_flock)); \
(dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); }
@@ -128,7 +128,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
#define XFS_DQ_PINUNLOCK(dqp, s) mutex_spinunlock( \
&(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s)
-#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (valusema(&((dqp)->q_flock)) <= 0)
+#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock)))
#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 546f48af882..5b2dcc58b24 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -248,7 +246,7 @@ xfs_qm_dquot_logitem_pushbuf(
* inode flush completed and the inode was taken off the AIL.
* So, just get out.
*/
- if ((valusema(&(dqp->q_flock)) > 0) ||
+ if (!issemalocked(&(dqp->q_flock)) ||
((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
qip->qli_pushbuf_flag = 0;
xfs_dqunlock(dqp);
@@ -261,7 +259,7 @@ xfs_qm_dquot_logitem_pushbuf(
if (bp != NULL) {
if (XFS_BUF_ISDELAYWRITE(bp)) {
dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
- (valusema(&(dqp->q_flock)) <= 0));
+ issemalocked(&(dqp->q_flock)));
qip->qli_pushbuf_flag = 0;
xfs_dqunlock(dqp);
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 7fb5eca9bd5..e23e45535c4 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -33,7 +32,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -1603,7 +1601,7 @@ xfs_qm_dqiterate(
maxlblkcnt - lblkno,
XFS_BMAPI_METADATA,
NULL,
- 0, map, &nmaps, NULL);
+ 0, map, &nmaps, NULL, NULL);
xfs_iunlock(qip, XFS_ILOCK_SHARED);
if (error)
break;
@@ -1905,9 +1903,7 @@ xfs_qm_quotacheck(
*/
if ((error = xfs_bulkstat(mp, &lastino, &count,
xfs_qm_dqusage_adjust, NULL,
- structsz, NULL,
- BULKSTAT_FG_IGET|BULKSTAT_FG_VFSLOCKED,
- &done)))
+ structsz, NULL, BULKSTAT_FG_IGET, &done)))
break;
} while (! done);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index 6838b36d95a..e95e99f7168 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -33,7 +32,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -129,7 +127,7 @@ xfs_qm_parseargs(
return XFS_ERROR(EINVAL);
}
- PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error);
+ error = bhv_next_vfs_parseargs(BHV_NEXT(bhv), options, args, update);
if (!error && !referenced)
bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_QM);
return error;
@@ -140,9 +138,8 @@ xfs_qm_showargs(
struct bhv_desc *bhv,
struct seq_file *m)
{
- struct vfs *vfsp = bhvtovfs(bhv);
+ struct bhv_vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
- int error;
if (mp->m_qflags & XFS_UQUOTA_ACCT) {
(mp->m_qflags & XFS_UQUOTA_ENFD) ?
@@ -165,8 +162,7 @@ xfs_qm_showargs(
if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
seq_puts(m, "," MNTOPT_NOQUOTA);
- PVFS_SHOWARGS(BHV_NEXT(bhv), m, error);
- return error;
+ return bhv_next_vfs_showargs(BHV_NEXT(bhv), m);
}
STATIC int
@@ -175,14 +171,67 @@ xfs_qm_mount(
struct xfs_mount_args *args,
struct cred *cr)
{
- struct vfs *vfsp = bhvtovfs(bhv);
+ struct bhv_vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
- int error;
if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA | XFSMNT_PQUOTA))
xfs_qm_mount_quotainit(mp, args->flags);
- PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error);
- return error;
+ return bhv_next_vfs_mount(BHV_NEXT(bhv), args, cr);
+}
+
+/*
+ * Directory tree accounting is implemented using project quotas, where
+ * the project identifier is inherited from parent directories.
+ * A statvfs (df, etc.) of a directory that is using project quota should
+ * return a statvfs of the project, not the entire filesystem.
+ * This makes such trees appear as if they are filesystems in themselves.
+ */
+STATIC int
+xfs_qm_statvfs(
+ struct bhv_desc *bhv,
+ bhv_statvfs_t *statp,
+ struct bhv_vnode *vnode)
+{
+ xfs_mount_t *mp;
+ xfs_inode_t *ip;
+ xfs_dquot_t *dqp;
+ xfs_disk_dquot_t *dp;
+ __uint64_t limit;
+ int error;
+
+ error = bhv_next_vfs_statvfs(BHV_NEXT(bhv), statp, vnode);
+ if (error || !vnode)
+ return error;
+
+ mp = XFS_BHVTOM(bhv);
+ ip = xfs_vtoi(vnode);
+
+ if (!(ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT))
+ return 0;
+ if (!(mp->m_qflags & XFS_PQUOTA_ACCT))
+ return 0;
+ if (!(mp->m_qflags & XFS_OQUOTA_ENFD))
+ return 0;
+
+ if (xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp))
+ return 0;
+ dp = &dqp->q_core;
+
+ limit = dp->d_blk_softlimit ? dp->d_blk_softlimit : dp->d_blk_hardlimit;
+ if (limit && statp->f_blocks > limit) {
+ statp->f_blocks = limit;
+ statp->f_bfree = (statp->f_blocks > dp->d_bcount) ?
+ (statp->f_blocks - dp->d_bcount) : 0;
+ }
+ limit = dp->d_ino_softlimit ? dp->d_ino_softlimit : dp->d_ino_hardlimit;
+ if (limit && statp->f_files > limit) {
+ statp->f_files = limit;
+ statp->f_ffree = (statp->f_files > dp->d_icount) ?
+ (statp->f_ffree - dp->d_icount) : 0;
+ }
+
+ xfs_qm_dqput(dqp);
+ return 0;
}
STATIC int
@@ -191,7 +240,7 @@ xfs_qm_syncall(
int flags,
cred_t *credp)
{
- struct vfs *vfsp = bhvtovfs(bhv);
+ struct bhv_vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
@@ -210,8 +259,7 @@ xfs_qm_syncall(
}
}
}
- PVFS_SYNC(BHV_NEXT(bhv), flags, credp, error);
- return error;
+ return bhv_next_vfs_sync(BHV_NEXT(bhv), flags, credp);
}
STATIC int
@@ -346,11 +394,12 @@ STATIC struct xfs_qmops xfs_qmcore_xfs = {
.xfs_dqtrxops = &xfs_trans_dquot_ops,
};
-struct bhv_vfsops xfs_qmops = { {
+struct bhv_module_vfsops xfs_qmops = { {
BHV_IDENTITY_INIT(VFS_BHV_QM, VFS_POSITION_QM),
.vfs_parseargs = xfs_qm_parseargs,
.vfs_showargs = xfs_qm_showargs,
.vfs_mount = xfs_qm_mount,
+ .vfs_statvfs = xfs_qm_statvfs,
.vfs_sync = xfs_qm_syncall,
.vfs_quotactl = xfs_qm_quotactl, },
};
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c
index 0570f773355..6f858fb81a3 100644
--- a/fs/xfs/quota/xfs_qm_stats.c
+++ b/fs/xfs/quota/xfs_qm_stats.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index c55db463bbf..ed620c4d159 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -26,7 +26,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -35,7 +34,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -91,8 +89,8 @@ xfs_qm_quotactl(
xfs_caddr_t addr)
{
xfs_mount_t *mp;
+ bhv_vfs_t *vfsp;
int error;
- struct vfs *vfsp;
vfsp = bhvtovfs(bdp);
mp = XFS_VFSTOM(vfsp);
@@ -1035,7 +1033,7 @@ xfs_qm_dqrele_all_inodes(
{
xfs_inode_t *ip, *topino;
uint ireclaims;
- vnode_t *vp;
+ bhv_vnode_t *vp;
boolean_t vnode_refd;
ASSERT(mp->m_quotainfo);
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 9168918db25..0242e9666e8 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -33,7 +32,6 @@
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_attr_sf.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index b08b3d9345b..36fbeccdc72 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -47,7 +47,7 @@ cmn_err(register int level, char *fmt, ...)
va_start(ap, fmt);
if (*fmt == '!') fp++;
len = vsprintf(message, fp, ap);
- if (message[len-1] != '\n')
+ if (level != CE_DEBUG && message[len-1] != '\n')
strcat(message, "\n");
printk("%s%s", err_level[level], message);
va_end(ap);
@@ -68,7 +68,7 @@ icmn_err(register int level, char *fmt, va_list ap)
level = XFS_MAX_ERR_LEVEL;
spin_lock_irqsave(&xfs_err_lock,flags);
len = vsprintf(message, fmt, ap);
- if (message[len-1] != '\n')
+ if (level != CE_DEBUG && message[len-1] != '\n')
strcat(message, "\n");
spin_unlock_irqrestore(&xfs_err_lock,flags);
printk("%s%s", err_level[level], message);
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index e3bf58112e7..4f54dca662a 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -33,9 +33,6 @@ extern void cmn_err(int, char *, ...)
__attribute__ ((format (printf, 2, 3)));
extern void assfail(char *expr, char *f, int l);
-#define prdev(fmt,targ,args...) \
- printk("Device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
-
#define ASSERT_ALWAYS(expr) \
(unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 2539af34eb6..4b0cb474be4 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -21,12 +21,10 @@
#include "xfs_bit.h"
#include "xfs_inum.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -39,15 +37,15 @@
#include <linux/capability.h>
#include <linux/posix_acl_xattr.h>
-STATIC int xfs_acl_setmode(vnode_t *, xfs_acl_t *, int *);
+STATIC int xfs_acl_setmode(bhv_vnode_t *, xfs_acl_t *, int *);
STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *);
STATIC void xfs_acl_get_endian(xfs_acl_t *);
STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *);
STATIC int xfs_acl_invalid(xfs_acl_t *);
STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *);
-STATIC void xfs_acl_get_attr(vnode_t *, xfs_acl_t *, int, int, int *);
-STATIC void xfs_acl_set_attr(vnode_t *, xfs_acl_t *, int, int *);
-STATIC int xfs_acl_allow_set(vnode_t *, int);
+STATIC void xfs_acl_get_attr(bhv_vnode_t *, xfs_acl_t *, int, int, int *);
+STATIC void xfs_acl_set_attr(bhv_vnode_t *, xfs_acl_t *, int, int *);
+STATIC int xfs_acl_allow_set(bhv_vnode_t *, int);
kmem_zone_t *xfs_acl_zone;
@@ -57,7 +55,7 @@ kmem_zone_t *xfs_acl_zone;
*/
int
xfs_acl_vhasacl_access(
- vnode_t *vp)
+ bhv_vnode_t *vp)
{
int error;
@@ -70,7 +68,7 @@ xfs_acl_vhasacl_access(
*/
int
xfs_acl_vhasacl_default(
- vnode_t *vp)
+ bhv_vnode_t *vp)
{
int error;
@@ -209,7 +207,7 @@ posix_acl_xfs_to_xattr(
int
xfs_acl_vget(
- vnode_t *vp,
+ bhv_vnode_t *vp,
void *acl,
size_t size,
int kind)
@@ -241,10 +239,10 @@ xfs_acl_vget(
goto out;
}
if (kind == _ACL_TYPE_ACCESS) {
- vattr_t va;
+ bhv_vattr_t va;
va.va_mask = XFS_AT_MODE;
- VOP_GETATTR(vp, &va, 0, sys_cred, error);
+ error = bhv_vop_getattr(vp, &va, 0, sys_cred);
if (error)
goto out;
xfs_acl_sync_mode(va.va_mode, xfs_acl);
@@ -260,7 +258,7 @@ out:
int
xfs_acl_vremove(
- vnode_t *vp,
+ bhv_vnode_t *vp,
int kind)
{
int error;
@@ -268,9 +266,9 @@ xfs_acl_vremove(
VN_HOLD(vp);
error = xfs_acl_allow_set(vp, kind);
if (!error) {
- VOP_ATTR_REMOVE(vp, kind == _ACL_TYPE_DEFAULT?
- SGI_ACL_DEFAULT: SGI_ACL_FILE,
- ATTR_ROOT, sys_cred, error);
+ error = bhv_vop_attr_remove(vp, kind == _ACL_TYPE_DEFAULT?
+ SGI_ACL_DEFAULT: SGI_ACL_FILE,
+ ATTR_ROOT, sys_cred);
if (error == ENOATTR)
error = 0; /* 'scool */
}
@@ -280,7 +278,7 @@ xfs_acl_vremove(
int
xfs_acl_vset(
- vnode_t *vp,
+ bhv_vnode_t *vp,
void *acl,
size_t size,
int kind)
@@ -370,10 +368,10 @@ xfs_acl_iaccess(
STATIC int
xfs_acl_allow_set(
- vnode_t *vp,
+ bhv_vnode_t *vp,
int kind)
{
- vattr_t va;
+ bhv_vattr_t va;
int error;
if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
@@ -383,7 +381,7 @@ xfs_acl_allow_set(
if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
return EROFS;
va.va_mask = XFS_AT_UID;
- VOP_GETATTR(vp, &va, 0, NULL, error);
+ error = bhv_vop_getattr(vp, &va, 0, NULL);
if (error)
return error;
if (va.va_uid != current->fsuid && !capable(CAP_FOWNER))
@@ -606,7 +604,7 @@ xfs_acl_get_endian(
*/
STATIC void
xfs_acl_get_attr(
- vnode_t *vp,
+ bhv_vnode_t *vp,
xfs_acl_t *aclp,
int kind,
int flags,
@@ -616,9 +614,9 @@ xfs_acl_get_attr(
ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1);
flags |= ATTR_ROOT;
- VOP_ATTR_GET(vp,
- kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE : SGI_ACL_DEFAULT,
- (char *)aclp, &len, flags, sys_cred, *error);
+ *error = bhv_vop_attr_get(vp, kind == _ACL_TYPE_ACCESS ?
+ SGI_ACL_FILE : SGI_ACL_DEFAULT,
+ (char *)aclp, &len, flags, sys_cred);
if (*error || (flags & ATTR_KERNOVAL))
return;
xfs_acl_get_endian(aclp);
@@ -629,7 +627,7 @@ xfs_acl_get_attr(
*/
STATIC void
xfs_acl_set_attr(
- vnode_t *vp,
+ bhv_vnode_t *vp,
xfs_acl_t *aclp,
int kind,
int *error)
@@ -654,19 +652,19 @@ xfs_acl_set_attr(
INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm);
}
INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
- VOP_ATTR_SET(vp,
- kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE: SGI_ACL_DEFAULT,
- (char *)newacl, len, ATTR_ROOT, sys_cred, *error);
+ *error = bhv_vop_attr_set(vp, kind == _ACL_TYPE_ACCESS ?
+ SGI_ACL_FILE: SGI_ACL_DEFAULT,
+ (char *)newacl, len, ATTR_ROOT, sys_cred);
_ACL_FREE(newacl);
}
int
xfs_acl_vtoacl(
- vnode_t *vp,
+ bhv_vnode_t *vp,
xfs_acl_t *access_acl,
xfs_acl_t *default_acl)
{
- vattr_t va;
+ bhv_vattr_t va;
int error = 0;
if (access_acl) {
@@ -678,7 +676,7 @@ xfs_acl_vtoacl(
if (!error) {
/* Got the ACL, need the mode... */
va.va_mask = XFS_AT_MODE;
- VOP_GETATTR(vp, &va, 0, sys_cred, error);
+ error = bhv_vop_getattr(vp, &va, 0, sys_cred);
}
if (error)
@@ -701,8 +699,8 @@ xfs_acl_vtoacl(
*/
int
xfs_acl_inherit(
- vnode_t *vp,
- vattr_t *vap,
+ bhv_vnode_t *vp,
+ bhv_vattr_t *vap,
xfs_acl_t *pdaclp)
{
xfs_acl_t *cacl;
@@ -757,11 +755,11 @@ xfs_acl_inherit(
*/
STATIC int
xfs_acl_setmode(
- vnode_t *vp,
+ bhv_vnode_t *vp,
xfs_acl_t *acl,
int *basicperms)
{
- vattr_t va;
+ bhv_vattr_t va;
xfs_acl_entry_t *ap;
xfs_acl_entry_t *gap = NULL;
int i, error, nomask = 1;
@@ -776,7 +774,7 @@ xfs_acl_setmode(
* mode. The m:: bits take precedence over the g:: bits.
*/
va.va_mask = XFS_AT_MODE;
- VOP_GETATTR(vp, &va, 0, sys_cred, error);
+ error = bhv_vop_getattr(vp, &va, 0, sys_cred);
if (error)
return error;
@@ -810,8 +808,7 @@ xfs_acl_setmode(
if (gap && nomask)
va.va_mode |= gap->ae_perm << 3;
- VOP_SETATTR(vp, &va, 0, sys_cred, error);
- return error;
+ return bhv_vop_setattr(vp, &va, 0, sys_cred);
}
/*
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 538d0d65b04..f853cf1a627 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -50,7 +50,7 @@ typedef struct xfs_acl {
#ifdef CONFIG_XFS_POSIX_ACL
struct vattr;
-struct vnode;
+struct bhv_vnode;
struct xfs_inode;
extern struct kmem_zone *xfs_acl_zone;
@@ -58,14 +58,14 @@ extern struct kmem_zone *xfs_acl_zone;
(zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone)
-extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *);
+extern int xfs_acl_inherit(struct bhv_vnode *, struct bhv_vattr *, xfs_acl_t *);
extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
-extern int xfs_acl_vtoacl(struct vnode *, xfs_acl_t *, xfs_acl_t *);
-extern int xfs_acl_vhasacl_access(struct vnode *);
-extern int xfs_acl_vhasacl_default(struct vnode *);
-extern int xfs_acl_vset(struct vnode *, void *, size_t, int);
-extern int xfs_acl_vget(struct vnode *, void *, size_t, int);
-extern int xfs_acl_vremove(struct vnode *vp, int);
+extern int xfs_acl_vtoacl(struct bhv_vnode *, xfs_acl_t *, xfs_acl_t *);
+extern int xfs_acl_vhasacl_access(struct bhv_vnode *);
+extern int xfs_acl_vhasacl_default(struct bhv_vnode *);
+extern int xfs_acl_vset(struct bhv_vnode *, void *, size_t, int);
+extern int xfs_acl_vget(struct bhv_vnode *, void *, size_t, int);
+extern int xfs_acl_vremove(struct bhv_vnode *, int);
#define _ACL_TYPE_ACCESS 1
#define _ACL_TYPE_DEFAULT 2
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 8558226281c..eef6763f3a6 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -1862,7 +1860,7 @@ xfs_alloc_fix_freelist(
(pag->pagf_longest - delta) :
(pag->pagf_flcount > 0 || pag->pagf_longest > 0);
if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
- (args->minleft &&
+ (!(flags & XFS_ALLOC_FLAG_FREEING) &&
(int)(pag->pagf_freeblks + pag->pagf_flcount -
need - args->total) <
(int)args->minleft)) {
@@ -1898,7 +1896,7 @@ xfs_alloc_fix_freelist(
longest = (longest > delta) ? (longest - delta) :
(be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
- (args->minleft &&
+ (!(flags & XFS_ALLOC_FLAG_FREEING) &&
(int)(be32_to_cpu(agf->agf_freeblks) +
be32_to_cpu(agf->agf_flcount) - need - args->total) <
(int)args->minleft)) {
@@ -1951,8 +1949,14 @@ xfs_alloc_fix_freelist(
* the restrictions correctly. Can happen for free calls
* on a completely full ag.
*/
- if (targs.agbno == NULLAGBLOCK)
+ if (targs.agbno == NULLAGBLOCK) {
+ if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
+ xfs_trans_brelse(tp, agflbp);
+ args->agbp = NULL;
+ return 0;
+ }
break;
+ }
/*
* Put each allocated block on the list.
*/
@@ -2360,8 +2364,19 @@ xfs_alloc_vextent(
if (args->agno == sagno &&
type == XFS_ALLOCTYPE_START_BNO)
args->type = XFS_ALLOCTYPE_THIS_AG;
- if (++(args->agno) == mp->m_sb.sb_agcount)
- args->agno = 0;
+ /*
+ * For the first allocation, we can try any AG to get
+ * space. However, if we already have allocated a
+ * block, we don't want to try AGs whose number is below
+ * sagno. Otherwise, we may end up with out-of-order
+ * locking of AGF, which might cause deadlock.
+ */
+ if (++(args->agno) == mp->m_sb.sb_agcount) {
+ if (args->firstblock != NULLFSBLOCK)
+ args->agno = sagno;
+ else
+ args->agno = 0;
+ }
/*
* Reached the starting a.g., must either be done
* or switch to non-trylock mode.
@@ -2443,7 +2458,7 @@ xfs_free_extent(
args.minlen = args.minleft = args.minalignslop = 0;
down_read(&args.mp->m_peraglock);
args.pag = &args.mp->m_perag[args.agno];
- if ((error = xfs_alloc_fix_freelist(&args, 0)))
+ if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
goto error0;
#ifdef DEBUG
ASSERT(args.agbp != NULL);
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 2d1f8928b26..650591f999a 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -41,6 +41,7 @@ typedef enum xfs_alloctype
* Flags for xfs_alloc_fix_freelist.
*/
#define XFS_ALLOC_FLAG_TRYLOCK 0x00000001 /* use trylock for buffer locking */
+#define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
/*
* Argument structure for xfs_alloc routines.
@@ -70,6 +71,7 @@ typedef struct xfs_alloc_arg {
char wasfromfl; /* set if allocation is from freelist */
char isfl; /* set if is freelist blocks - !acctg */
char userdata; /* set if this is user data */
+ xfs_fsblock_t firstblock; /* io first block allocated */
} xfs_alloc_arg_t;
/*
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index a1d92da86cc..7446556e802 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index b6e1e02bbb2..1a210104327 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -27,7 +27,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
@@ -35,7 +34,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -1910,7 +1908,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- NULL, 0, map, &nmap, NULL);
+ NULL, 0, map, &nmap, NULL, NULL);
if (error)
return(error);
ASSERT(nmap >= 1);
@@ -1988,7 +1986,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA |
XFS_BMAPI_WRITE,
args->firstblock, args->total, &map, &nmap,
- args->flist);
+ args->flist, NULL);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
*args->firstblock, &committed);
@@ -2039,7 +2037,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- args->firstblock, 0, &map, &nmap, NULL);
+ args->firstblock, 0, &map, &nmap,
+ NULL, NULL);
if (error) {
return(error);
}
@@ -2104,7 +2103,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
args->firstblock, 0, &map, &nmap,
- args->flist);
+ args->flist, NULL);
if (error) {
return(error);
}
@@ -2142,7 +2141,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
XFS_BMAP_INIT(args->flist, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- 1, args->firstblock, args->flist, &done);
+ 1, args->firstblock, args->flist,
+ NULL, &done);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
*args->firstblock, &committed);
@@ -2322,56 +2322,56 @@ xfs_attr_trace_enter(int type, char *where,
STATIC int
posix_acl_access_set(
- vnode_t *vp, char *name, void *data, size_t size, int xflags)
+ bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
return xfs_acl_vset(vp, data, size, _ACL_TYPE_ACCESS);
}
STATIC int
posix_acl_access_remove(
- struct vnode *vp, char *name, int xflags)
+ bhv_vnode_t *vp, char *name, int xflags)
{
return xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
}
STATIC int
posix_acl_access_get(
- vnode_t *vp, char *name, void *data, size_t size, int xflags)
+ bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
return xfs_acl_vget(vp, data, size, _ACL_TYPE_ACCESS);
}
STATIC int
posix_acl_access_exists(
- vnode_t *vp)
+ bhv_vnode_t *vp)
{
return xfs_acl_vhasacl_access(vp);
}
STATIC int
posix_acl_default_set(
- vnode_t *vp, char *name, void *data, size_t size, int xflags)
+ bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
return xfs_acl_vset(vp, data, size, _ACL_TYPE_DEFAULT);
}
STATIC int
posix_acl_default_get(
- vnode_t *vp, char *name, void *data, size_t size, int xflags)
+ bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
return xfs_acl_vget(vp, data, size, _ACL_TYPE_DEFAULT);
}
STATIC int
posix_acl_default_remove(
- struct vnode *vp, char *name, int xflags)
+ bhv_vnode_t *vp, char *name, int xflags)
{
return xfs_acl_vremove(vp, _ACL_TYPE_DEFAULT);
}
STATIC int
posix_acl_default_exists(
- vnode_t *vp)
+ bhv_vnode_t *vp)
{
return xfs_acl_vhasacl_default(vp);
}
@@ -2404,21 +2404,18 @@ STATIC struct attrnames *attr_system_names[] =
STATIC int
attr_generic_set(
- struct vnode *vp, char *name, void *data, size_t size, int xflags)
+ bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
- int error;
-
- VOP_ATTR_SET(vp, name, data, size, xflags, NULL, error);
- return -error;
+ return -bhv_vop_attr_set(vp, name, data, size, xflags, NULL);
}
STATIC int
attr_generic_get(
- struct vnode *vp, char *name, void *data, size_t size, int xflags)
+ bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
int error, asize = size;
- VOP_ATTR_GET(vp, name, data, &asize, xflags, NULL, error);
+ error = bhv_vop_attr_get(vp, name, data, &asize, xflags, NULL);
if (!error)
return asize;
return -error;
@@ -2426,12 +2423,9 @@ attr_generic_get(
STATIC int
attr_generic_remove(
- struct vnode *vp, char *name, int xflags)
+ bhv_vnode_t *vp, char *name, int xflags)
{
- int error;
-
- VOP_ATTR_REMOVE(vp, name, xflags, NULL, error);
- return -error;
+ return -bhv_vop_attr_remove(vp, name, xflags, NULL);
}
STATIC int
@@ -2459,7 +2453,7 @@ attr_generic_listadd(
STATIC int
attr_system_list(
- struct vnode *vp,
+ bhv_vnode_t *vp,
void *data,
size_t size,
ssize_t *result)
@@ -2481,12 +2475,12 @@ attr_system_list(
int
attr_generic_list(
- struct vnode *vp, void *data, size_t size, int xflags, ssize_t *result)
+ bhv_vnode_t *vp, void *data, size_t size, int xflags, ssize_t *result)
{
attrlist_cursor_kern_t cursor = { 0 };
int error;
- VOP_ATTR_LIST(vp, data, size, xflags, &cursor, NULL, error);
+ error = bhv_vop_attr_list(vp, data, size, xflags, &cursor, NULL);
if (error > 0)
return -error;
*result = -error;
@@ -2514,7 +2508,7 @@ attr_lookup_namespace(
*/
STATIC int
attr_user_capable(
- struct vnode *vp,
+ bhv_vnode_t *vp,
cred_t *cred)
{
struct inode *inode = vn_to_inode(vp);
@@ -2532,7 +2526,7 @@ attr_user_capable(
STATIC int
attr_trusted_capable(
- struct vnode *vp,
+ bhv_vnode_t *vp,
cred_t *cred)
{
struct inode *inode = vn_to_inode(vp);
@@ -2546,7 +2540,7 @@ attr_trusted_capable(
STATIC int
attr_secure_capable(
- struct vnode *vp,
+ bhv_vnode_t *vp,
cred_t *cred)
{
return -ENOSECURITY;
@@ -2554,7 +2548,7 @@ attr_secure_capable(
STATIC int
attr_system_set(
- struct vnode *vp, char *name, void *data, size_t size, int xflags)
+ bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
attrnames_t *namesp;
int error;
@@ -2573,7 +2567,7 @@ attr_system_set(
STATIC int
attr_system_get(
- struct vnode *vp, char *name, void *data, size_t size, int xflags)
+ bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
attrnames_t *namesp;
@@ -2585,7 +2579,7 @@ attr_system_get(
STATIC int
attr_system_remove(
- struct vnode *vp, char *name, int xflags)
+ bhv_vnode_t *vp, char *name, int xflags)
{
attrnames_t *namesp;
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index b2c7b9fcded..981633f6c07 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -36,13 +36,13 @@
*========================================================================*/
struct cred;
-struct vnode;
+struct bhv_vnode;
-typedef int (*attrset_t)(struct vnode *, char *, void *, size_t, int);
-typedef int (*attrget_t)(struct vnode *, char *, void *, size_t, int);
-typedef int (*attrremove_t)(struct vnode *, char *, int);
-typedef int (*attrexists_t)(struct vnode *);
-typedef int (*attrcapable_t)(struct vnode *, struct cred *);
+typedef int (*attrset_t)(struct bhv_vnode *, char *, void *, size_t, int);
+typedef int (*attrget_t)(struct bhv_vnode *, char *, void *, size_t, int);
+typedef int (*attrremove_t)(struct bhv_vnode *, char *, int);
+typedef int (*attrexists_t)(struct bhv_vnode *);
+typedef int (*attrcapable_t)(struct bhv_vnode *, struct cred *);
typedef struct attrnames {
char * attr_name;
@@ -63,7 +63,7 @@ extern struct attrnames attr_trusted;
extern struct attrnames *attr_namespaces[ATTR_NAMECOUNT];
extern attrnames_t *attr_lookup_namespace(char *, attrnames_t **, int);
-extern int attr_generic_list(struct vnode *, void *, size_t, int, ssize_t *);
+extern int attr_generic_list(struct bhv_vnode *, void *, size_t, int, ssize_t *);
#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 9462be86aa1..9455051f012 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
@@ -34,7 +33,6 @@
#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -2990,7 +2988,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
nmap = 1;
error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- NULL, 0, &map, &nmap, NULL);
+ NULL, 0, &map, &nmap, NULL, NULL);
if (error) {
return(error);
}
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 26939d364bc..3a613753906 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -24,13 +24,11 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -40,13 +38,15 @@
#include "xfs_mount.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
+#include "xfs_dir2_data.h"
+#include "xfs_dir2_leaf.h"
+#include "xfs_dir2_block.h"
#include "xfs_inode_item.h"
#include "xfs_extfree_item.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_dir_leaf.h"
#include "xfs_attr_leaf.h"
#include "xfs_rw.h"
#include "xfs_quota.h"
@@ -101,6 +101,7 @@ xfs_bmap_add_extent(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd); /* OK to allocate reserved blocks */
@@ -118,6 +119,7 @@ xfs_bmap_add_extent_delay_real(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd); /* OK to allocate reserved blocks */
/*
@@ -131,6 +133,7 @@ xfs_bmap_add_extent_hole_delay(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp,/* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd); /* OK to allocate reserved blocks */
/*
@@ -144,6 +147,7 @@ xfs_bmap_add_extent_hole_real(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork); /* data or attr fork */
/*
@@ -156,7 +160,8 @@ xfs_bmap_add_extent_unwritten_real(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp); /* inode logging flags */
+ int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta); /* Change made to incore extents */
/*
* xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
@@ -203,6 +208,7 @@ xfs_bmap_del_extent(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp,/* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd); /* OK to allocate reserved blocks */
@@ -510,7 +516,7 @@ xfs_bmap_add_attrfork_local(
dargs.total = mp->m_dirblkfsbs;
dargs.whichfork = XFS_DATA_FORK;
dargs.trans = tp;
- error = XFS_DIR_SHORTFORM_TO_SINGLE(mp, &dargs);
+ error = xfs_dir2_sf_to_block(&dargs);
} else
error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
XFS_DATA_FORK);
@@ -530,6 +536,7 @@ xfs_bmap_add_extent(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd) /* OK to use reserved data blocks */
{
@@ -567,6 +574,15 @@ xfs_bmap_add_extent(
logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
} else
logflags = 0;
+ /* DELTA: single new extent */
+ if (delta) {
+ if (delta->xed_startoff > new->br_startoff)
+ delta->xed_startoff = new->br_startoff;
+ if (delta->xed_blockcount <
+ new->br_startoff + new->br_blockcount)
+ delta->xed_blockcount = new->br_startoff +
+ new->br_blockcount;
+ }
}
/*
* Any kind of new delayed allocation goes here.
@@ -576,7 +592,7 @@ xfs_bmap_add_extent(
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, cur, new,
- &logflags, rsvd)))
+ &logflags, delta, rsvd)))
goto done;
}
/*
@@ -587,7 +603,7 @@ xfs_bmap_add_extent(
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
- &logflags, whichfork)))
+ &logflags, delta, whichfork)))
goto done;
} else {
xfs_bmbt_irec_t prev; /* old extent at offset idx */
@@ -612,17 +628,17 @@ xfs_bmap_add_extent(
XFS_BTCUR_BPRV_WASDEL);
if ((error = xfs_bmap_add_extent_delay_real(ip,
idx, &cur, new, &da_new, first, flist,
- &logflags, rsvd)))
+ &logflags, delta, rsvd)))
goto done;
} else if (new->br_state == XFS_EXT_NORM) {
ASSERT(new->br_state == XFS_EXT_NORM);
if ((error = xfs_bmap_add_extent_unwritten_real(
- ip, idx, &cur, new, &logflags)))
+ ip, idx, &cur, new, &logflags, delta)))
goto done;
} else {
ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
if ((error = xfs_bmap_add_extent_unwritten_real(
- ip, idx, &cur, new, &logflags)))
+ ip, idx, &cur, new, &logflags, delta)))
goto done;
}
ASSERT(*curp == cur || *curp == NULL);
@@ -635,7 +651,7 @@ xfs_bmap_add_extent(
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
- new, &logflags, whichfork)))
+ new, &logflags, delta, whichfork)))
goto done;
}
}
@@ -700,6 +716,7 @@ xfs_bmap_add_extent_delay_real(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd) /* OK to use reserved data block allocation */
{
xfs_btree_cur_t *cur; /* btree cursor */
@@ -716,8 +733,8 @@ xfs_bmap_add_extent_delay_real(
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
int state = 0;/* state bits, accessed thru macros */
- xfs_filblks_t temp; /* value for dnew calculations */
- xfs_filblks_t temp2; /* value for dnew calculations */
+ xfs_filblks_t temp=0; /* value for dnew calculations */
+ xfs_filblks_t temp2=0;/* value for dnew calculations */
int tmp_rval; /* partial logging flags */
enum { /* bit number definitions for state */
LEFT_CONTIG, RIGHT_CONTIG,
@@ -839,6 +856,11 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
*dnew = 0;
+ /* DELTA: Three in-core extents are replaced by one. */
+ temp = LEFT.br_startoff;
+ temp2 = LEFT.br_blockcount +
+ PREV.br_blockcount +
+ RIGHT.br_blockcount;
break;
case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
@@ -872,6 +894,10 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
*dnew = 0;
+ /* DELTA: Two in-core extents are replaced by one. */
+ temp = LEFT.br_startoff;
+ temp2 = LEFT.br_blockcount +
+ PREV.br_blockcount;
break;
case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
@@ -906,6 +932,10 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
*dnew = 0;
+ /* DELTA: Two in-core extents are replaced by one. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount +
+ RIGHT.br_blockcount;
break;
case MASK2(LEFT_FILLING, RIGHT_FILLING):
@@ -936,6 +966,9 @@ xfs_bmap_add_extent_delay_real(
ASSERT(i == 1);
}
*dnew = 0;
+ /* DELTA: The in-core extent described by new changed type. */
+ temp = new->br_startoff;
+ temp2 = new->br_blockcount;
break;
case MASK2(LEFT_FILLING, LEFT_CONTIG):
@@ -978,6 +1011,10 @@ xfs_bmap_add_extent_delay_real(
xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx,
XFS_DATA_FORK);
*dnew = temp;
+ /* DELTA: The boundary between two in-core extents moved. */
+ temp = LEFT.br_startoff;
+ temp2 = LEFT.br_blockcount +
+ PREV.br_blockcount;
break;
case MASK(LEFT_FILLING):
@@ -1025,6 +1062,9 @@ xfs_bmap_add_extent_delay_real(
xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1,
XFS_DATA_FORK);
*dnew = temp;
+ /* DELTA: One in-core extent is split in two. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount;
break;
case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
@@ -1067,6 +1107,10 @@ xfs_bmap_add_extent_delay_real(
xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
XFS_DATA_FORK);
*dnew = temp;
+ /* DELTA: The boundary between two in-core extents moved. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount +
+ RIGHT.br_blockcount;
break;
case MASK(RIGHT_FILLING):
@@ -1112,6 +1156,9 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
*dnew = temp;
+ /* DELTA: One in-core extent is split in two. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount;
break;
case 0:
@@ -1194,6 +1241,9 @@ xfs_bmap_add_extent_delay_real(
xfs_bmap_trace_post_update(fname, "0", ip, idx + 2,
XFS_DATA_FORK);
*dnew = temp + temp2;
+ /* DELTA: One in-core extent is split in three. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount;
break;
case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
@@ -1209,6 +1259,13 @@ xfs_bmap_add_extent_delay_real(
ASSERT(0);
}
*curp = cur;
+ if (delta) {
+ temp2 += temp;
+ if (delta->xed_startoff > temp)
+ delta->xed_startoff = temp;
+ if (delta->xed_blockcount < temp2)
+ delta->xed_blockcount = temp2;
+ }
done:
*logflagsp = rval;
return error;
@@ -1235,7 +1292,8 @@ xfs_bmap_add_extent_unwritten_real(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp) /* inode logging flags */
+ int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta) /* Change made to incore extents */
{
xfs_btree_cur_t *cur; /* btree cursor */
xfs_bmbt_rec_t *ep; /* extent entry for idx */
@@ -1252,6 +1310,8 @@ xfs_bmap_add_extent_unwritten_real(
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
int state = 0;/* state bits, accessed thru macros */
+ xfs_filblks_t temp=0;
+ xfs_filblks_t temp2=0;
enum { /* bit number definitions for state */
LEFT_CONTIG, RIGHT_CONTIG,
LEFT_FILLING, RIGHT_FILLING,
@@ -1380,6 +1440,11 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_blockcount, LEFT.br_state)))
goto done;
}
+ /* DELTA: Three in-core extents are replaced by one. */
+ temp = LEFT.br_startoff;
+ temp2 = LEFT.br_blockcount +
+ PREV.br_blockcount +
+ RIGHT.br_blockcount;
break;
case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
@@ -1419,6 +1484,10 @@ xfs_bmap_add_extent_unwritten_real(
LEFT.br_state)))
goto done;
}
+ /* DELTA: Two in-core extents are replaced by one. */
+ temp = LEFT.br_startoff;
+ temp2 = LEFT.br_blockcount +
+ PREV.br_blockcount;
break;
case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
@@ -1459,6 +1528,10 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
+ /* DELTA: Two in-core extents are replaced by one. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount +
+ RIGHT.br_blockcount;
break;
case MASK2(LEFT_FILLING, RIGHT_FILLING):
@@ -1487,6 +1560,9 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
+ /* DELTA: The in-core extent described by new changed type. */
+ temp = new->br_startoff;
+ temp2 = new->br_blockcount;
break;
case MASK2(LEFT_FILLING, LEFT_CONTIG):
@@ -1534,6 +1610,10 @@ xfs_bmap_add_extent_unwritten_real(
LEFT.br_state))
goto done;
}
+ /* DELTA: The boundary between two in-core extents moved. */
+ temp = LEFT.br_startoff;
+ temp2 = LEFT.br_blockcount +
+ PREV.br_blockcount;
break;
case MASK(LEFT_FILLING):
@@ -1574,6 +1654,9 @@ xfs_bmap_add_extent_unwritten_real(
goto done;
ASSERT(i == 1);
}
+ /* DELTA: One in-core extent is split in two. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount;
break;
case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
@@ -1617,6 +1700,10 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
+ /* DELTA: The boundary between two in-core extents moved. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount +
+ RIGHT.br_blockcount;
break;
case MASK(RIGHT_FILLING):
@@ -1657,6 +1744,9 @@ xfs_bmap_add_extent_unwritten_real(
goto done;
ASSERT(i == 1);
}
+ /* DELTA: One in-core extent is split in two. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount;
break;
case 0:
@@ -1710,6 +1800,9 @@ xfs_bmap_add_extent_unwritten_real(
goto done;
ASSERT(i == 1);
}
+ /* DELTA: One in-core extent is split in three. */
+ temp = PREV.br_startoff;
+ temp2 = PREV.br_blockcount;
break;
case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
@@ -1725,6 +1818,13 @@ xfs_bmap_add_extent_unwritten_real(
ASSERT(0);
}
*curp = cur;
+ if (delta) {
+ temp2 += temp;
+ if (delta->xed_startoff > temp)
+ delta->xed_startoff = temp;
+ if (delta->xed_blockcount < temp2)
+ delta->xed_blockcount = temp2;
+ }
done:
*logflagsp = rval;
return error;
@@ -1753,6 +1853,7 @@ xfs_bmap_add_extent_hole_delay(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd) /* OK to allocate reserved blocks */
{
xfs_bmbt_rec_t *ep; /* extent record for idx */
@@ -1765,7 +1866,8 @@ xfs_bmap_add_extent_hole_delay(
xfs_filblks_t oldlen=0; /* old indirect size */
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int state; /* state bits, accessed thru macros */
- xfs_filblks_t temp; /* temp for indirect calculations */
+ xfs_filblks_t temp=0; /* temp for indirect calculations */
+ xfs_filblks_t temp2=0;
enum { /* bit number definitions for state */
LEFT_CONTIG, RIGHT_CONTIG,
LEFT_DELAY, RIGHT_DELAY,
@@ -1844,6 +1946,9 @@ xfs_bmap_add_extent_hole_delay(
XFS_DATA_FORK);
xfs_iext_remove(ifp, idx, 1);
ip->i_df.if_lastex = idx - 1;
+ /* DELTA: Two in-core extents were replaced by one. */
+ temp2 = temp;
+ temp = left.br_startoff;
break;
case MASK(LEFT_CONTIG):
@@ -1864,6 +1969,9 @@ xfs_bmap_add_extent_hole_delay(
xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1,
XFS_DATA_FORK);
ip->i_df.if_lastex = idx - 1;
+ /* DELTA: One in-core extent grew into a hole. */
+ temp2 = temp;
+ temp = left.br_startoff;
break;
case MASK(RIGHT_CONTIG):
@@ -1881,6 +1989,9 @@ xfs_bmap_add_extent_hole_delay(
NULLSTARTBLOCK((int)newlen), temp, right.br_state);
xfs_bmap_trace_post_update(fname, "RC", ip, idx, XFS_DATA_FORK);
ip->i_df.if_lastex = idx;
+ /* DELTA: One in-core extent grew into a hole. */
+ temp2 = temp;
+ temp = new->br_startoff;
break;
case 0:
@@ -1894,6 +2005,9 @@ xfs_bmap_add_extent_hole_delay(
XFS_DATA_FORK);
xfs_iext_insert(ifp, idx, 1, new);
ip->i_df.if_lastex = idx;
+ /* DELTA: A new in-core extent was added in a hole. */
+ temp2 = new->br_blockcount;
+ temp = new->br_startoff;
break;
}
if (oldlen != newlen) {
@@ -1904,6 +2018,13 @@ xfs_bmap_add_extent_hole_delay(
* Nothing to do for disk quota accounting here.
*/
}
+ if (delta) {
+ temp2 += temp;
+ if (delta->xed_startoff > temp)
+ delta->xed_startoff = temp;
+ if (delta->xed_blockcount < temp2)
+ delta->xed_blockcount = temp2;
+ }
*logflagsp = 0;
return 0;
#undef MASK
@@ -1925,6 +2046,7 @@ xfs_bmap_add_extent_hole_real(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork) /* data or attr fork */
{
xfs_bmbt_rec_t *ep; /* pointer to extent entry ins. point */
@@ -1936,7 +2058,10 @@ xfs_bmap_add_extent_hole_real(
xfs_ifork_t *ifp; /* inode fork pointer */
xfs_bmbt_irec_t left; /* left neighbor extent entry */
xfs_bmbt_irec_t right; /* right neighbor extent entry */
+ int rval=0; /* return value (logging flags) */
int state; /* state bits, accessed thru macros */
+ xfs_filblks_t temp=0;
+ xfs_filblks_t temp2=0;
enum { /* bit number definitions for state */
LEFT_CONTIG, RIGHT_CONTIG,
LEFT_DELAY, RIGHT_DELAY,
@@ -1993,6 +2118,7 @@ xfs_bmap_add_extent_hole_real(
left.br_blockcount + new->br_blockcount +
right.br_blockcount <= MAXEXTLEN));
+ error = 0;
/*
* Select which case we're in here, and implement it.
*/
@@ -2018,25 +2144,35 @@ xfs_bmap_add_extent_hole_real(
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
if (cur == NULL) {
- *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
- return 0;
+ rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+ } else {
+ rval = XFS_ILOG_CORE;
+ if ((error = xfs_bmbt_lookup_eq(cur,
+ right.br_startoff,
+ right.br_startblock,
+ right.br_blockcount, &i)))
+ goto done;
+ ASSERT(i == 1);
+ if ((error = xfs_bmbt_delete(cur, &i)))
+ goto done;
+ ASSERT(i == 1);
+ if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+ goto done;
+ ASSERT(i == 1);
+ if ((error = xfs_bmbt_update(cur, left.br_startoff,
+ left.br_startblock,
+ left.br_blockcount +
+ new->br_blockcount +
+ right.br_blockcount,
+ left.br_state)))
+ goto done;
}
- *logflagsp = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
- right.br_startblock, right.br_blockcount, &i)))
- return error;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_delete(cur, &i)))
- return error;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_decrement(cur, 0, &i)))
- return error;
- ASSERT(i == 1);
- error = xfs_bmbt_update(cur, left.br_startoff,
- left.br_startblock,
- left.br_blockcount + new->br_blockcount +
- right.br_blockcount, left.br_state);
- return error;
+ /* DELTA: Two in-core extents were replaced by one. */
+ temp = left.br_startoff;
+ temp2 = left.br_blockcount +
+ new->br_blockcount +
+ right.br_blockcount;
+ break;
case MASK(LEFT_CONTIG):
/*
@@ -2050,19 +2186,27 @@ xfs_bmap_add_extent_hole_real(
xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork);
ifp->if_lastex = idx - 1;
if (cur == NULL) {
- *logflagsp = XFS_ILOG_FEXT(whichfork);
- return 0;
+ rval = XFS_ILOG_FEXT(whichfork);
+ } else {
+ rval = 0;
+ if ((error = xfs_bmbt_lookup_eq(cur,
+ left.br_startoff,
+ left.br_startblock,
+ left.br_blockcount, &i)))
+ goto done;
+ ASSERT(i == 1);
+ if ((error = xfs_bmbt_update(cur, left.br_startoff,
+ left.br_startblock,
+ left.br_blockcount +
+ new->br_blockcount,
+ left.br_state)))
+ goto done;
}
- *logflagsp = 0;
- if ((error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
- left.br_startblock, left.br_blockcount, &i)))
- return error;
- ASSERT(i == 1);
- error = xfs_bmbt_update(cur, left.br_startoff,
- left.br_startblock,
- left.br_blockcount + new->br_blockcount,
- left.br_state);
- return error;
+ /* DELTA: One in-core extent grew. */
+ temp = left.br_startoff;
+ temp2 = left.br_blockcount +
+ new->br_blockcount;
+ break;
case MASK(RIGHT_CONTIG):
/*
@@ -2077,19 +2221,27 @@ xfs_bmap_add_extent_hole_real(
xfs_bmap_trace_post_update(fname, "RC", ip, idx, whichfork);
ifp->if_lastex = idx;
if (cur == NULL) {
- *logflagsp = XFS_ILOG_FEXT(whichfork);
- return 0;
+ rval = XFS_ILOG_FEXT(whichfork);
+ } else {
+ rval = 0;
+ if ((error = xfs_bmbt_lookup_eq(cur,
+ right.br_startoff,
+ right.br_startblock,
+ right.br_blockcount, &i)))
+ goto done;
+ ASSERT(i == 1);
+ if ((error = xfs_bmbt_update(cur, new->br_startoff,
+ new->br_startblock,
+ new->br_blockcount +
+ right.br_blockcount,
+ right.br_state)))
+ goto done;
}
- *logflagsp = 0;
- if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
- right.br_startblock, right.br_blockcount, &i)))
- return error;
- ASSERT(i == 1);
- error = xfs_bmbt_update(cur, new->br_startoff,
- new->br_startblock,
- new->br_blockcount + right.br_blockcount,
- right.br_state);
- return error;
+ /* DELTA: One in-core extent grew. */
+ temp = new->br_startoff;
+ temp2 = new->br_blockcount +
+ right.br_blockcount;
+ break;
case 0:
/*
@@ -2104,29 +2256,41 @@ xfs_bmap_add_extent_hole_real(
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
if (cur == NULL) {
- *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
- return 0;
+ rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+ } else {
+ rval = XFS_ILOG_CORE;
+ if ((error = xfs_bmbt_lookup_eq(cur,
+ new->br_startoff,
+ new->br_startblock,
+ new->br_blockcount, &i)))
+ goto done;
+ ASSERT(i == 0);
+ cur->bc_rec.b.br_state = new->br_state;
+ if ((error = xfs_bmbt_insert(cur, &i)))
+ goto done;
+ ASSERT(i == 1);
}
- *logflagsp = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
- new->br_startblock, new->br_blockcount, &i)))
- return error;
- ASSERT(i == 0);
- cur->bc_rec.b.br_state = new->br_state;
- if ((error = xfs_bmbt_insert(cur, &i)))
- return error;
- ASSERT(i == 1);
- return 0;
+ /* DELTA: A new extent was added in a hole. */
+ temp = new->br_startoff;
+ temp2 = new->br_blockcount;
+ break;
+ }
+ if (delta) {
+ temp2 += temp;
+ if (delta->xed_startoff > temp)
+ delta->xed_startoff = temp;
+ if (delta->xed_blockcount < temp2)
+ delta->xed_blockcount = temp2;
}
+done:
+ *logflagsp = rval;
+ return error;
#undef MASK
#undef MASK2
#undef STATE_SET
#undef STATE_TEST
#undef STATE_SET_TEST
#undef SWITCH_STATE
- /* NOTREACHED */
- ASSERT(0);
- return 0; /* keep gcc quite */
}
/*
@@ -2598,6 +2762,7 @@ xfs_bmap_btalloc(
args.mp = mp;
args.fsbno = ap->rval;
args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
+ args.firstblock = ap->firstblock;
blen = 0;
if (nullfb) {
args.type = XFS_ALLOCTYPE_START_BNO;
@@ -2657,7 +2822,7 @@ xfs_bmap_btalloc(
else
args.minlen = ap->alen;
} else if (ap->low) {
- args.type = XFS_ALLOCTYPE_FIRST_AG;
+ args.type = XFS_ALLOCTYPE_START_BNO;
args.total = args.minlen = ap->minlen;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -2669,7 +2834,7 @@ xfs_bmap_btalloc(
args.prod = ap->ip->i_d.di_extsize;
if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
args.mod = (xfs_extlen_t)(args.prod - args.mod);
- } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) {
+ } else if (mp->m_sb.sb_blocksize >= NBPP) {
args.prod = 1;
args.mod = 0;
} else {
@@ -2885,6 +3050,7 @@ xfs_bmap_del_extent(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *del, /* data to remove from extents */
int *logflagsp, /* inode logging flags */
+ xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd) /* OK to allocate reserved blocks */
{
@@ -3193,6 +3359,14 @@ xfs_bmap_del_extent(
if (da_old > da_new)
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int)(da_old - da_new),
rsvd);
+ if (delta) {
+ /* DELTA: report the original extent. */
+ if (delta->xed_startoff > got.br_startoff)
+ delta->xed_startoff = got.br_startoff;
+ if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
+ delta->xed_blockcount = got.br_startoff +
+ got.br_blockcount;
+ }
done:
*logflagsp = flags;
return error;
@@ -3279,6 +3453,7 @@ xfs_bmap_extents_to_btree(
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
args.tp = tp;
args.mp = mp;
+ args.firstblock = *firstblock;
if (*firstblock == NULLFSBLOCK) {
args.type = XFS_ALLOCTYPE_START_BNO;
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
@@ -3414,6 +3589,7 @@ xfs_bmap_local_to_extents(
args.tp = tp;
args.mp = ip->i_mount;
+ args.firstblock = *firstblock;
ASSERT((ifp->if_flags &
(XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
/*
@@ -3753,7 +3929,7 @@ xfs_bunmap_trace(
if (ip->i_rwtrace == NULL)
return;
ktrace_enter(ip->i_rwtrace,
- (void *)(__psint_t)XFS_BUNMAPI,
+ (void *)(__psint_t)XFS_BUNMAP,
(void *)ip,
(void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
(void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
@@ -4087,8 +4263,8 @@ xfs_bmap_finish(
if (!XFS_FORCED_SHUTDOWN(mp))
xfs_force_shutdown(mp,
(error == EFSCORRUPTED) ?
- XFS_CORRUPT_INCORE :
- XFS_METADATA_IO_ERROR);
+ SHUTDOWN_CORRUPT_INCORE :
+ SHUTDOWN_META_IO_ERROR);
return error;
}
xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
@@ -4538,7 +4714,8 @@ xfs_bmapi(
xfs_extlen_t total, /* total blocks needed */
xfs_bmbt_irec_t *mval, /* output: map values */
int *nmap, /* i/o: mval size/count */
- xfs_bmap_free_t *flist) /* i/o: list extents to free */
+ xfs_bmap_free_t *flist, /* i/o: list extents to free */
+ xfs_extdelta_t *delta) /* o: change made to incore extents */
{
xfs_fsblock_t abno; /* allocated block number */
xfs_extlen_t alen; /* allocated extent length */
@@ -4650,6 +4827,10 @@ xfs_bmapi(
end = bno + len;
obno = bno;
bma.ip = NULL;
+ if (delta) {
+ delta->xed_startoff = NULLFILEOFF;
+ delta->xed_blockcount = 0;
+ }
while (bno < end && n < *nmap) {
/*
* Reading past eof, act as though there's a hole
@@ -4886,8 +5067,8 @@ xfs_bmapi(
got.br_state = XFS_EXT_UNWRITTEN;
}
error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
- firstblock, flist, &tmp_logflags, whichfork,
- (flags & XFS_BMAPI_RSVBLOCKS));
+ firstblock, flist, &tmp_logflags, delta,
+ whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
logflags |= tmp_logflags;
if (error)
goto error0;
@@ -4983,8 +5164,8 @@ xfs_bmapi(
}
mval->br_state = XFS_EXT_NORM;
error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
- firstblock, flist, &tmp_logflags, whichfork,
- (flags & XFS_BMAPI_RSVBLOCKS));
+ firstblock, flist, &tmp_logflags, delta,
+ whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
logflags |= tmp_logflags;
if (error)
goto error0;
@@ -5073,7 +5254,14 @@ xfs_bmapi(
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
error = 0;
-
+ if (delta && delta->xed_startoff != NULLFILEOFF) {
+ /* A change was actually made.
+ * Note that delta->xed_blockount is an offset at this
+ * point and needs to be converted to a block count.
+ */
+ ASSERT(delta->xed_blockcount > delta->xed_startoff);
+ delta->xed_blockcount -= delta->xed_startoff;
+ }
error0:
/*
* Log everything. Do this after conversion, there's no point in
@@ -5185,6 +5373,8 @@ xfs_bunmapi(
xfs_fsblock_t *firstblock, /* first allocated block
controls a.g. for allocs */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
+ xfs_extdelta_t *delta, /* o: change made to incore
+ extents */
int *done) /* set if not done yet */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -5242,6 +5432,10 @@ xfs_bunmapi(
bno = start + len - 1;
ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
&prev);
+ if (delta) {
+ delta->xed_startoff = NULLFILEOFF;
+ delta->xed_blockcount = 0;
+ }
/*
* Check to see if the given block number is past the end of the
* file, back up to the last block if so...
@@ -5340,7 +5534,8 @@ xfs_bunmapi(
}
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
- firstblock, flist, &logflags, XFS_DATA_FORK, 0);
+ firstblock, flist, &logflags, delta,
+ XFS_DATA_FORK, 0);
if (error)
goto error0;
goto nodelete;
@@ -5394,7 +5589,7 @@ xfs_bunmapi(
prev.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
&prev, firstblock, flist, &logflags,
- XFS_DATA_FORK, 0);
+ delta, XFS_DATA_FORK, 0);
if (error)
goto error0;
goto nodelete;
@@ -5403,7 +5598,7 @@ xfs_bunmapi(
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx, &cur,
&del, firstblock, flist, &logflags,
- XFS_DATA_FORK, 0);
+ delta, XFS_DATA_FORK, 0);
if (error)
goto error0;
goto nodelete;
@@ -5456,7 +5651,7 @@ xfs_bunmapi(
goto error0;
}
error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
- &tmp_logflags, whichfork, rsvd);
+ &tmp_logflags, delta, whichfork, rsvd);
logflags |= tmp_logflags;
if (error)
goto error0;
@@ -5513,6 +5708,14 @@ nodelete:
ASSERT(ifp->if_ext_max ==
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
error = 0;
+ if (delta && delta->xed_startoff != NULLFILEOFF) {
+ /* A change was actually made.
+ * Note that delta->xed_blockount is an offset at this
+ * point and needs to be converted to a block count.
+ */
+ ASSERT(delta->xed_blockcount > delta->xed_startoff);
+ delta->xed_blockcount -= delta->xed_startoff;
+ }
error0:
/*
* Log everything. Do this after conversion, there's no point in
@@ -5556,7 +5759,7 @@ xfs_getbmap(
__int64_t fixlen; /* length for -1 case */
int i; /* extent number */
xfs_inode_t *ip; /* xfs incore inode pointer */
- vnode_t *vp; /* corresponding vnode */
+ bhv_vnode_t *vp; /* corresponding vnode */
int lock; /* lock state */
xfs_bmbt_irec_t *map; /* buffer for user's data */
xfs_mount_t *mp; /* file system mount point */
@@ -5653,7 +5856,7 @@ xfs_getbmap(
if (whichfork == XFS_DATA_FORK && ip->i_delayed_blks) {
/* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
- VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
+ error = bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
}
ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0);
@@ -5689,7 +5892,8 @@ xfs_getbmap(
nmap = (nexleft > subnex) ? subnex : nexleft;
error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
XFS_BB_TO_FSB(mp, bmv->bmv_length),
- bmapi_flags, NULL, 0, map, &nmap, NULL);
+ bmapi_flags, NULL, 0, map, &nmap,
+ NULL, NULL);
if (error)
goto unlock_and_return;
ASSERT(nmap <= subnex);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 8e0d73d9ccc..80e93409b78 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -26,6 +26,20 @@ struct xfs_mount;
struct xfs_trans;
/*
+ * DELTA: describe a change to the in-core extent list.
+ *
+ * Internally the use of xed_blockount is somewhat funky.
+ * xed_blockcount contains an offset much of the time because this
+ * makes merging changes easier. (xfs_fileoff_t and xfs_filblks_t are
+ * the same underlying type).
+ */
+typedef struct xfs_extdelta
+{
+ xfs_fileoff_t xed_startoff; /* offset of range */
+ xfs_filblks_t xed_blockcount; /* blocks in range */
+} xfs_extdelta_t;
+
+/*
* List of extents to be free "later".
* The list is kept sorted on xbf_startblock.
*/
@@ -275,7 +289,9 @@ xfs_bmapi(
xfs_extlen_t total, /* total blocks needed */
struct xfs_bmbt_irec *mval, /* output: map values */
int *nmap, /* i/o: mval size/count */
- xfs_bmap_free_t *flist); /* i/o: list extents to free */
+ xfs_bmap_free_t *flist, /* i/o: list extents to free */
+ xfs_extdelta_t *delta); /* o: change made to incore
+ extents */
/*
* Map file blocks to filesystem blocks, simple version.
@@ -309,6 +325,8 @@ xfs_bunmapi(
xfs_fsblock_t *firstblock, /* first allocated block
controls a.g. for allocs */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
+ xfs_extdelta_t *delta, /* o: change made to incore
+ extents */
int *done); /* set if not done yet */
/*
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index bea44709afb..18fb7385d71 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -1569,12 +1567,11 @@ xfs_bmbt_split(
lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp));
left = XFS_BUF_TO_BMBT_BLOCK(lbp);
args.fsbno = cur->bc_private.b.firstblock;
+ args.firstblock = args.fsbno;
if (args.fsbno == NULLFSBLOCK) {
args.fsbno = lbno;
args.type = XFS_ALLOCTYPE_START_BNO;
- } else if (cur->bc_private.b.flist->xbf_low)
- args.type = XFS_ALLOCTYPE_FIRST_AG;
- else
+ } else
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.mod = args.minleft = args.alignment = args.total = args.isfl =
args.userdata = args.minalignslop = 0;
@@ -2356,6 +2353,7 @@ xfs_bmbt_newroot(
args.userdata = args.minalignslop = 0;
args.minlen = args.maxlen = args.prod = 1;
args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
+ args.firstblock = args.fsbno;
if (args.fsbno == NULLFSBLOCK) {
#ifdef DEBUG
if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), level))) {
@@ -2365,9 +2363,7 @@ xfs_bmbt_newroot(
#endif
args.fsbno = INT_GET(*pp, ARCH_CONVERT);
args.type = XFS_ALLOCTYPE_START_BNO;
- } else if (args.wasdel)
- args.type = XFS_ALLOCTYPE_FIRST_AG;
- else
+ } else
args.type = XFS_ALLOCTYPE_NEAR_BNO;
if ((error = xfs_alloc_vextent(&args))) {
XFS_BMBT_TRACE_CURSOR(cur, ERROR);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 52d5d095fc3..ee2255bd656 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 5fed15682dd..a4aa53974f7 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -23,7 +23,6 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_buf_item.h"
@@ -1030,9 +1029,9 @@ xfs_buf_iodone_callbacks(
if ((XFS_BUF_TARGET(bp) != lasttarg) ||
(time_after(jiffies, (lasttime + 5*HZ)))) {
lasttime = jiffies;
- prdev("XFS write error in file system meta-data "
- "block 0x%llx in %s",
- XFS_BUF_TARGET(bp),
+ cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
+ " block 0x%llx in %s",
+ XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
(__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
}
lasttarg = XFS_BUF_TARGET(bp);
@@ -1108,7 +1107,7 @@ xfs_buf_error_relse(
XFS_BUF_ERROR(bp,0);
xfs_buftrace("BUF_ERROR_RELSE", bp);
if (! XFS_FORCED_SHUTDOWN(mp))
- xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
/*
* We have to unpin the pinned buffers so do the
* callbacks.
diff --git a/fs/xfs/xfs_cap.h b/fs/xfs/xfs_cap.h
index d0035c6e951..7a0e482dd43 100644
--- a/fs/xfs/xfs_cap.h
+++ b/fs/xfs/xfs_cap.h
@@ -49,12 +49,12 @@ typedef struct xfs_cap_set {
#include <linux/posix_cap_xattr.h>
-struct vnode;
+struct bhv_vnode;
-extern int xfs_cap_vhascap(struct vnode *);
-extern int xfs_cap_vset(struct vnode *, void *, size_t);
-extern int xfs_cap_vget(struct vnode *, void *, size_t);
-extern int xfs_cap_vremove(struct vnode *vp);
+extern int xfs_cap_vhascap(struct bhv_vnode *);
+extern int xfs_cap_vset(struct bhv_vnode *, void *, size_t);
+extern int xfs_cap_vget(struct bhv_vnode *, void *, size_t);
+extern int xfs_cap_vremove(struct bhv_vnode *);
#define _CAP_EXISTS xfs_cap_vhascap
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 8988b905117..32ab61d17ac 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -43,7 +41,6 @@
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
-#include "xfs_dir_leaf.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
@@ -159,7 +156,7 @@ xfs_da_split(xfs_da_state_t *state)
max = state->path.active - 1;
ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
- state->path.blk[max].magic == XFS_DIRX_LEAF_MAGIC(state->mp));
+ state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
addblk = &state->path.blk[max]; /* initial dummy value */
for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
@@ -199,38 +196,7 @@ xfs_da_split(xfs_da_state_t *state)
return(error); /* GROT: attr inconsistent */
addblk = newblk;
break;
- case XFS_DIR_LEAF_MAGIC:
- ASSERT(XFS_DIR_IS_V1(state->mp));
- error = xfs_dir_leaf_split(state, oldblk, newblk);
- if ((error != 0) && (error != ENOSPC)) {
- return(error); /* GROT: dir is inconsistent */
- }
- if (!error) {
- addblk = newblk;
- break;
- }
- /*
- * Entry wouldn't fit, split the leaf again.
- */
- state->extravalid = 1;
- if (state->inleaf) {
- state->extraafter = 0; /* before newblk */
- error = xfs_dir_leaf_split(state, oldblk,
- &state->extrablk);
- if (error)
- return(error); /* GROT: dir incon. */
- addblk = newblk;
- } else {
- state->extraafter = 1; /* after newblk */
- error = xfs_dir_leaf_split(state, newblk,
- &state->extrablk);
- if (error)
- return(error); /* GROT: dir incon. */
- addblk = newblk;
- }
- break;
case XFS_DIR2_LEAFN_MAGIC:
- ASSERT(XFS_DIR_IS_V2(state->mp));
error = xfs_dir2_leafn_split(state, oldblk, newblk);
if (error)
return error;
@@ -363,7 +329,6 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
(char *)oldroot);
} else {
- ASSERT(XFS_DIR_IS_V2(mp));
ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
leaf = (xfs_dir2_leaf_t *)oldroot;
size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
@@ -379,8 +344,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
* Set up the new root node.
*/
error = xfs_da_node_create(args,
- args->whichfork == XFS_DATA_FORK &&
- XFS_DIR_IS_V2(mp) ? mp->m_dirleafblk : 0,
+ (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
if (error)
return(error);
@@ -427,10 +391,9 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
/*
- * With V2 the extra block is data or freespace.
+ * With V2 dirs the extra block is data or freespace.
*/
- useextra = state->extravalid && (XFS_DIR_IS_V1(state->mp) ||
- state->args->whichfork == XFS_ATTR_FORK);
+ useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
newcount = 1 + useextra;
/*
* Do we have to split the node?
@@ -624,7 +587,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
ASSERT(newblk->blkno != 0);
- if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))
+ if (state->args->whichfork == XFS_DATA_FORK)
ASSERT(newblk->blkno >= mp->m_dirleafblk &&
newblk->blkno < mp->m_dirfreeblk);
@@ -670,7 +633,7 @@ xfs_da_join(xfs_da_state_t *state)
save_blk = &state->altpath.blk[ state->path.active-1 ];
ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
- drop_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp));
+ drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
/*
* Walk back up the tree joining/deallocating as necessary.
@@ -693,17 +656,7 @@ xfs_da_join(xfs_da_state_t *state)
return(0);
xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
break;
- case XFS_DIR_LEAF_MAGIC:
- ASSERT(XFS_DIR_IS_V1(state->mp));
- error = xfs_dir_leaf_toosmall(state, &action);
- if (error)
- return(error);
- if (action == 0)
- return(0);
- xfs_dir_leaf_unbalance(state, drop_blk, save_blk);
- break;
case XFS_DIR2_LEAFN_MAGIC:
- ASSERT(XFS_DIR_IS_V2(state->mp));
error = xfs_dir2_leafn_toosmall(state, &action);
if (error)
return error;
@@ -790,7 +743,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
ASSERT(bp != NULL);
blkinfo = bp->data;
if (be16_to_cpu(oldroot->hdr.level) == 1) {
- ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+ ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIR2_LEAFN_MAGIC ||
be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC);
} else {
ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC);
@@ -951,14 +904,7 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
if (count == 0)
return;
break;
- case XFS_DIR_LEAF_MAGIC:
- ASSERT(XFS_DIR_IS_V1(state->mp));
- lasthash = xfs_dir_leaf_lasthash(blk->bp, &count);
- if (count == 0)
- return;
- break;
case XFS_DIR2_LEAFN_MAGIC:
- ASSERT(XFS_DIR_IS_V2(state->mp));
lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
if (count == 0)
return;
@@ -1117,10 +1063,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
* Descend thru the B-tree searching each level for the right
* node to use, until the right hashval is found.
*/
- if (args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(state->mp))
- blkno = state->mp->m_dirleafblk;
- else
- blkno = 0;
+ blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
for (blk = &state->path.blk[0], state->path.active = 1;
state->path.active <= XFS_DA_NODE_MAXDEPTH;
blk++, state->path.active++) {
@@ -1137,7 +1080,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
}
curr = blk->bp->data;
ASSERT(be16_to_cpu(curr->magic) == XFS_DA_NODE_MAGIC ||
- be16_to_cpu(curr->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+ be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC ||
be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC);
/*
@@ -1190,16 +1133,10 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
blk->index = probe;
blkno = be32_to_cpu(btree->before);
}
- }
- else if (be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC) {
+ } else if (be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC) {
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
- }
- else if (be16_to_cpu(curr->magic) == XFS_DIR_LEAF_MAGIC) {
- blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL);
- break;
- }
- else if (be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC) {
+ } else if (be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC) {
blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
break;
}
@@ -1212,12 +1149,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
* next leaf and keep searching.
*/
for (;;) {
- if (blk->magic == XFS_DIR_LEAF_MAGIC) {
- ASSERT(XFS_DIR_IS_V1(state->mp));
- retval = xfs_dir_leaf_lookup_int(blk->bp, args,
- &blk->index);
- } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
- ASSERT(XFS_DIR_IS_V2(state->mp));
+ if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
&blk->index, state);
}
@@ -1270,7 +1202,7 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
old_info = old_blk->bp->data;
new_info = new_blk->bp->data;
ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
- old_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+ old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
old_blk->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
@@ -1280,12 +1212,7 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
case XFS_ATTR_LEAF_MAGIC:
before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
break;
- case XFS_DIR_LEAF_MAGIC:
- ASSERT(XFS_DIR_IS_V1(state->mp));
- before = xfs_dir_leaf_order(old_blk->bp, new_blk->bp);
- break;
case XFS_DIR2_LEAFN_MAGIC:
- ASSERT(XFS_DIR_IS_V2(state->mp));
before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
break;
case XFS_DA_NODE_MAGIC:
@@ -1404,7 +1331,7 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
save_info = save_blk->bp->data;
drop_info = drop_blk->bp->data;
ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
- save_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+ save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
save_blk->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
@@ -1529,7 +1456,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
ASSERT(blk->bp != NULL);
info = blk->bp->data;
ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC ||
- be16_to_cpu(info->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
+ be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC ||
be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
blk->magic = be16_to_cpu(info->magic);
if (blk->magic == XFS_DA_NODE_MAGIC) {
@@ -1548,20 +1475,13 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
NULL);
break;
- case XFS_DIR_LEAF_MAGIC:
- ASSERT(XFS_DIR_IS_V1(state->mp));
- blk->hashval = xfs_dir_leaf_lasthash(blk->bp,
- NULL);
- break;
case XFS_DIR2_LEAFN_MAGIC:
- ASSERT(XFS_DIR_IS_V2(state->mp));
blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
NULL);
break;
default:
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
- blk->magic ==
- XFS_DIRX_LEAF_MAGIC(state->mp));
+ blk->magic == XFS_DIR2_LEAFN_MAGIC);
break;
}
}
@@ -1620,7 +1540,6 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
xfs_bmbt_irec_t *mapp;
xfs_inode_t *dp;
int nmap, error, w, count, c, got, i, mapi;
- xfs_fsize_t size;
xfs_trans_t *tp;
xfs_mount_t *mp;
@@ -1631,7 +1550,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
/*
* For new directories adjust the file offset and block count.
*/
- if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) {
+ if (w == XFS_DATA_FORK) {
bno = mp->m_dirleafblk;
count = mp->m_dirblkfsbs;
} else {
@@ -1641,10 +1560,9 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
/*
* Find a spot in the file space to put the new block.
*/
- if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, w))) {
+ if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, w)))
return error;
- }
- if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))
+ if (w == XFS_DATA_FORK)
ASSERT(bno >= mp->m_dirleafblk && bno < mp->m_dirfreeblk);
/*
* Try mapping it in one filesystem block.
@@ -1655,7 +1573,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
- args->flist))) {
+ args->flist, NULL))) {
return error;
}
ASSERT(nmap <= 1);
@@ -1676,7 +1594,8 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|
XFS_BMAPI_METADATA,
args->firstblock, args->total,
- &mapp[mapi], &nmap, args->flist))) {
+ &mapp[mapi], &nmap, args->flist,
+ NULL))) {
kmem_free(mapp, sizeof(*mapp) * count);
return error;
}
@@ -1705,19 +1624,6 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
if (mapp != &map)
kmem_free(mapp, sizeof(*mapp) * count);
*new_blkno = (xfs_dablk_t)bno;
- /*
- * For version 1 directories, adjust the file size if it changed.
- */
- if (w == XFS_DATA_FORK && XFS_DIR_IS_V1(mp)) {
- ASSERT(mapi == 1);
- if ((error = xfs_bmap_last_offset(tp, dp, &bno, w)))
- return error;
- size = XFS_FSB_TO_B(mp, bno);
- if (size != dp->i_d.di_size) {
- dp->i_d.di_size = size;
- xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
- }
- }
return 0;
}
@@ -1742,7 +1648,6 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
int error, w, entno, level, dead_level;
xfs_da_blkinfo_t *dead_info, *sib_info;
xfs_da_intnode_t *par_node, *dead_node;
- xfs_dir_leafblock_t *dead_leaf;
xfs_dir2_leaf_t *dead_leaf2;
xfs_dahash_t dead_hash;
@@ -1753,11 +1658,8 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
w = args->whichfork;
ASSERT(w == XFS_DATA_FORK);
mp = ip->i_mount;
- if (XFS_DIR_IS_V2(mp)) {
- lastoff = mp->m_dirfreeblk;
- error = xfs_bmap_last_before(tp, ip, &lastoff, w);
- } else
- error = xfs_bmap_last_offset(tp, ip, &lastoff, w);
+ lastoff = mp->m_dirfreeblk;
+ error = xfs_bmap_last_before(tp, ip, &lastoff, w);
if (error)
return error;
if (unlikely(lastoff == 0)) {
@@ -1780,14 +1682,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
/*
* Get values from the moved block.
*/
- if (be16_to_cpu(dead_info->magic) == XFS_DIR_LEAF_MAGIC) {
- ASSERT(XFS_DIR_IS_V1(mp));
- dead_leaf = (xfs_dir_leafblock_t *)dead_info;
- dead_level = 0;
- dead_hash =
- INT_GET(dead_leaf->entries[INT_GET(dead_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT);
- } else if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) {
- ASSERT(XFS_DIR_IS_V2(mp));
+ if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) {
dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
dead_level = 0;
dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
@@ -1842,7 +1737,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
xfs_da_buf_done(sib_buf);
sib_buf = NULL;
}
- par_blkno = XFS_DIR_IS_V1(mp) ? 0 : mp->m_dirleafblk;
+ par_blkno = mp->m_dirleafblk;
level = -1;
/*
* Walk down the tree looking for the parent of the moved block.
@@ -1941,8 +1836,6 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
{
xfs_inode_t *dp;
int done, error, w, count;
- xfs_fileoff_t bno;
- xfs_fsize_t size;
xfs_trans_t *tp;
xfs_mount_t *mp;
@@ -1950,7 +1843,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
w = args->whichfork;
tp = args->trans;
mp = dp->i_mount;
- if (w == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))
+ if (w == XFS_DATA_FORK)
count = mp->m_dirblkfsbs;
else
count = 1;
@@ -1961,34 +1854,17 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
*/
if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
XFS_BMAPI_AFLAG(w)|XFS_BMAPI_METADATA,
- 0, args->firstblock, args->flist,
+ 0, args->firstblock, args->flist, NULL,
&done)) == ENOSPC) {
if (w != XFS_DATA_FORK)
- goto done;
+ break;
if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
&dead_buf)))
- goto done;
- } else if (error)
- goto done;
- else
+ break;
+ } else {
break;
- }
- ASSERT(done);
- xfs_da_binval(tp, dead_buf);
- /*
- * Adjust the directory size for version 1.
- */
- if (w == XFS_DATA_FORK && XFS_DIR_IS_V1(mp)) {
- if ((error = xfs_bmap_last_offset(tp, dp, &bno, w)))
- return error;
- size = XFS_FSB_TO_B(dp->i_mount, bno);
- if (size != dp->i_d.di_size) {
- dp->i_d.di_size = size;
- xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
}
}
- return 0;
-done:
xfs_da_binval(tp, dead_buf);
return error;
}
@@ -2049,10 +1925,7 @@ xfs_da_do_buf(
xfs_dabuf_t *rbp;
mp = dp->i_mount;
- if (whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))
- nfsb = mp->m_dirblkfsbs;
- else
- nfsb = 1;
+ nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
mappedbno = *mappedbnop;
/*
* Caller doesn't have a mapping. -2 means don't complain
@@ -2086,7 +1959,7 @@ xfs_da_do_buf(
nfsb,
XFS_BMAPI_METADATA |
XFS_BMAPI_AFLAG(whichfork),
- NULL, 0, mapp, &nmap, NULL)))
+ NULL, 0, mapp, &nmap, NULL, NULL)))
goto exit0;
}
} else {
@@ -2198,7 +2071,6 @@ xfs_da_do_buf(
magic1 = be32_to_cpu(data->hdr.magic);
if (unlikely(
XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
- (magic != XFS_DIR_LEAF_MAGIC) &&
(magic != XFS_ATTR_LEAF_MAGIC) &&
(magic != XFS_DIR2_LEAF1_MAGIC) &&
(magic != XFS_DIR2_LEAFN_MAGIC) &&
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 243a730d5ec..4ab865ec8b8 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -36,14 +36,10 @@ struct zone;
* level in the Btree, and to identify which type of block this is.
*/
#define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */
-#define XFS_DIR_LEAF_MAGIC 0xfeeb /* magic number: directory leaf blks */
#define XFS_ATTR_LEAF_MAGIC 0xfbee /* magic number: attribute leaf blks */
#define XFS_DIR2_LEAF1_MAGIC 0xd2f1 /* magic number: v2 dirlf single blks */
#define XFS_DIR2_LEAFN_MAGIC 0xd2ff /* magic number: v2 dirlf multi blks */
-#define XFS_DIRX_LEAF_MAGIC(mp) \
- (XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC)
-
typedef struct xfs_da_blkinfo {
__be32 forw; /* previous block in list */
__be32 back; /* following block in list */
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 4968a6358e6..80562b60fb9 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -54,24 +52,14 @@ xfs_swapext(
xfs_swapext_t __user *sxu)
{
xfs_swapext_t *sxp;
- xfs_inode_t *ip=NULL, *tip=NULL, *ips[2];
- xfs_trans_t *tp;
+ xfs_inode_t *ip=NULL, *tip=NULL;
xfs_mount_t *mp;
- xfs_bstat_t *sbp;
struct file *fp = NULL, *tfp = NULL;
- vnode_t *vp, *tvp;
- static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
- int ilf_fields, tilf_fields;
+ bhv_vnode_t *vp, *tvp;
int error = 0;
- xfs_ifork_t *tempifp, *ifp, *tifp;
- __uint64_t tmp;
- int aforkblks = 0;
- int taforkblks = 0;
- char locked = 0;
sxp = kmem_alloc(sizeof(xfs_swapext_t), KM_MAYFAIL);
- tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
- if (!sxp || !tempifp) {
+ if (!sxp) {
error = XFS_ERROR(ENOMEM);
goto error0;
}
@@ -118,14 +106,56 @@ xfs_swapext(
mp = ip->i_mount;
- sbp = &sxp->sx_stat;
-
if (XFS_FORCED_SHUTDOWN(mp)) {
error = XFS_ERROR(EIO);
goto error0;
}
- locked = 1;
+ error = XFS_SWAP_EXTENTS(mp, &ip->i_iocore, &tip->i_iocore, sxp);
+
+ error0:
+ if (fp != NULL)
+ fput(fp);
+ if (tfp != NULL)
+ fput(tfp);
+
+ if (sxp != NULL)
+ kmem_free(sxp, sizeof(xfs_swapext_t));
+
+ return error;
+}
+
+int
+xfs_swap_extents(
+ xfs_inode_t *ip,
+ xfs_inode_t *tip,
+ xfs_swapext_t *sxp)
+{
+ xfs_mount_t *mp;
+ xfs_inode_t *ips[2];
+ xfs_trans_t *tp;
+ xfs_bstat_t *sbp = &sxp->sx_stat;
+ bhv_vnode_t *vp, *tvp;
+ xfs_ifork_t *tempifp, *ifp, *tifp;
+ int ilf_fields, tilf_fields;
+ static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
+ int error = 0;
+ int aforkblks = 0;
+ int taforkblks = 0;
+ __uint64_t tmp;
+ char locked = 0;
+
+ mp = ip->i_mount;
+
+ tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
+ if (!tempifp) {
+ error = XFS_ERROR(ENOMEM);
+ goto error0;
+ }
+
+ sbp = &sxp->sx_stat;
+ vp = XFS_ITOV(ip);
+ tvp = XFS_ITOV(tip);
/* Lock in i_ino order */
if (ip->i_ino < tip->i_ino) {
@@ -137,6 +167,7 @@ xfs_swapext(
}
xfs_lock_inodes(ips, 2, 0, lock_flags);
+ locked = 1;
/* Check permissions */
error = xfs_iaccess(ip, S_IWUSR, NULL);
@@ -169,7 +200,7 @@ xfs_swapext(
if (VN_CACHED(tvp) != 0) {
xfs_inval_cached_trace(&tip->i_iocore, 0, -1, 0, -1);
- VOP_FLUSHINVAL_PAGES(tvp, 0, -1, FI_REMAPF_LOCKED);
+ bhv_vop_flushinval_pages(tvp, 0, -1, FI_REMAPF_LOCKED);
}
/* Verify O_DIRECT for ftmp */
@@ -214,7 +245,7 @@ xfs_swapext(
/* We need to fail if the file is memory mapped. Once we have tossed
* all existing pages, the page fault will have no option
* but to go to the filesystem for pages. By making the page fault call
- * VOP_READ (or write in the case of autogrow) they block on the iolock
+ * vop_read (or write in the case of autogrow) they block on the iolock
* until we have switched the extents.
*/
if (VN_MAPPED(vp)) {
@@ -233,7 +264,7 @@ xfs_swapext(
* fields change.
*/
- VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF);
+ bhv_vop_toss_pages(vp, 0, -1, FI_REMAPF);
tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
if ((error = xfs_trans_reserve(tp, 0,
@@ -360,16 +391,7 @@ xfs_swapext(
xfs_iunlock(ip, lock_flags);
xfs_iunlock(tip, lock_flags);
}
-
- if (fp != NULL)
- fput(fp);
- if (tfp != NULL)
- fput(tfp);
-
- if (sxp != NULL)
- kmem_free(sxp, sizeof(xfs_swapext_t));
if (tempifp != NULL)
kmem_free(tempifp, sizeof(xfs_ifork_t));
-
return error;
}
diff --git a/fs/xfs/xfs_dfrag.h b/fs/xfs/xfs_dfrag.h
index f678559abc4..da178205be6 100644
--- a/fs/xfs/xfs_dfrag.h
+++ b/fs/xfs/xfs_dfrag.h
@@ -48,6 +48,9 @@ typedef struct xfs_swapext
*/
int xfs_swapext(struct xfs_swapext __user *sx);
+int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
+ struct xfs_swapext *sxp);
+
#endif /* __KERNEL__ */
#endif /* __XFS_DFRAG_H__ */
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index 79d0d9e1fba..b33826961c4 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -85,7 +85,6 @@ typedef struct xfs_dinode
union {
xfs_bmdr_block_t di_bmbt; /* btree root block */
xfs_bmbt_rec_32_t di_bmx[1]; /* extent list */
- xfs_dir_shortform_t di_dirsf; /* shortform directory */
xfs_dir2_sf_t di_dir2sf; /* shortform directory v2 */
char di_c[1]; /* local contents */
xfs_dev_t di_dev; /* device for S_IFCHR/S_IFBLK */
@@ -257,6 +256,7 @@ typedef enum xfs_dinode_fmt
#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */
#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
+#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
@@ -270,12 +270,13 @@ typedef enum xfs_dinode_fmt
#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
+#define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT)
#define XFS_DIFLAG_ANY \
(XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
- XFS_DIFLAG_EXTSZINHERIT)
+ XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG)
#endif /* __XFS_DINODE_H__ */
diff --git a/fs/xfs/xfs_dir.c b/fs/xfs/xfs_dir.c
deleted file mode 100644
index 9cc702a839a..00000000000
--- a/fs/xfs/xfs_dir.c
+++ /dev/null
@@ -1,1217 +0,0 @@
-/*
- * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_dir.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
-#include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_dir_sf.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_bmap.h"
-#include "xfs_dir_leaf.h"
-#include "xfs_error.h"
-
-/*
- * xfs_dir.c
- *
- * Provide the external interfaces to manage directories.
- */
-
-/*========================================================================
- * Function prototypes for the kernel.
- *========================================================================*/
-
-/*
- * Functions for the dirops interfaces.
- */
-static void xfs_dir_mount(struct xfs_mount *mp);
-
-static int xfs_dir_isempty(struct xfs_inode *dp);
-
-static int xfs_dir_init(struct xfs_trans *trans,
- struct xfs_inode *dir,
- struct xfs_inode *parent_dir);
-
-static int xfs_dir_createname(struct xfs_trans *trans,
- struct xfs_inode *dp,
- char *name_string,
- int name_len,
- xfs_ino_t inode_number,
- xfs_fsblock_t *firstblock,
- xfs_bmap_free_t *flist,
- xfs_extlen_t total);
-
-static int xfs_dir_lookup(struct xfs_trans *tp,
- struct xfs_inode *dp,
- char *name_string,
- int name_length,
- xfs_ino_t *inode_number);
-
-static int xfs_dir_removename(struct xfs_trans *trans,
- struct xfs_inode *dp,
- char *name_string,
- int name_length,
- xfs_ino_t ino,
- xfs_fsblock_t *firstblock,
- xfs_bmap_free_t *flist,
- xfs_extlen_t total);
-
-static int xfs_dir_getdents(struct xfs_trans *tp,
- struct xfs_inode *dp,
- struct uio *uiop,
- int *eofp);
-
-static int xfs_dir_replace(struct xfs_trans *tp,
- struct xfs_inode *dp,
- char *name_string,
- int name_length,
- xfs_ino_t inode_number,
- xfs_fsblock_t *firstblock,
- xfs_bmap_free_t *flist,
- xfs_extlen_t total);
-
-static int xfs_dir_canenter(struct xfs_trans *tp,
- struct xfs_inode *dp,
- char *name_string,
- int name_length);
-
-static int xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp,
- xfs_dinode_t *dip);
-
-xfs_dirops_t xfsv1_dirops = {
- .xd_mount = xfs_dir_mount,
- .xd_isempty = xfs_dir_isempty,
- .xd_init = xfs_dir_init,
- .xd_createname = xfs_dir_createname,
- .xd_lookup = xfs_dir_lookup,
- .xd_removename = xfs_dir_removename,
- .xd_getdents = xfs_dir_getdents,
- .xd_replace = xfs_dir_replace,
- .xd_canenter = xfs_dir_canenter,
- .xd_shortform_validate_ondisk = xfs_dir_shortform_validate_ondisk,
- .xd_shortform_to_single = xfs_dir_shortform_to_leaf,
-};
-
-/*
- * Internal routines when dirsize == XFS_LBSIZE(mp).
- */
-STATIC int xfs_dir_leaf_lookup(xfs_da_args_t *args);
-STATIC int xfs_dir_leaf_removename(xfs_da_args_t *args, int *number_entries,
- int *total_namebytes);
-STATIC int xfs_dir_leaf_getdents(xfs_trans_t *trans, xfs_inode_t *dp,
- uio_t *uio, int *eofp,
- xfs_dirent_t *dbp,
- xfs_dir_put_t put);
-STATIC int xfs_dir_leaf_replace(xfs_da_args_t *args);
-
-/*
- * Internal routines when dirsize > XFS_LBSIZE(mp).
- */
-STATIC int xfs_dir_node_addname(xfs_da_args_t *args);
-STATIC int xfs_dir_node_lookup(xfs_da_args_t *args);
-STATIC int xfs_dir_node_removename(xfs_da_args_t *args);
-STATIC int xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp,
- uio_t *uio, int *eofp,
- xfs_dirent_t *dbp,
- xfs_dir_put_t put);
-STATIC int xfs_dir_node_replace(xfs_da_args_t *args);
-
-#if defined(XFS_DIR_TRACE)
-ktrace_t *xfs_dir_trace_buf;
-#endif
-
-
-/*========================================================================
- * Overall external interface routines.
- *========================================================================*/
-
-xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot;
-
-/*
- * One-time startup routine called from xfs_init().
- */
-void
-xfs_dir_startup(void)
-{
- xfs_dir_hash_dot = xfs_da_hashname(".", 1);
- xfs_dir_hash_dotdot = xfs_da_hashname("..", 2);
-}
-
-/*
- * Initialize directory-related fields in the mount structure.
- */
-static void
-xfs_dir_mount(xfs_mount_t *mp)
-{
- uint shortcount, leafcount, count;
-
- mp->m_dirversion = 1;
- if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
- shortcount = (mp->m_attroffset -
- (uint)sizeof(xfs_dir_sf_hdr_t)) /
- (uint)sizeof(xfs_dir_sf_entry_t);
- leafcount = (XFS_LBSIZE(mp) -
- (uint)sizeof(xfs_dir_leaf_hdr_t)) /
- ((uint)sizeof(xfs_dir_leaf_entry_t) +
- (uint)sizeof(xfs_dir_leaf_name_t));
- } else {
- shortcount = (XFS_BMDR_SPACE_CALC(MINABTPTRS) -
- (uint)sizeof(xfs_dir_sf_hdr_t)) /
- (uint)sizeof(xfs_dir_sf_entry_t);
- leafcount = (XFS_LBSIZE(mp) -
- (uint)sizeof(xfs_dir_leaf_hdr_t)) /
- ((uint)sizeof(xfs_dir_leaf_entry_t) +
- (uint)sizeof(xfs_dir_leaf_name_t));
- }
- count = shortcount > leafcount ? shortcount : leafcount;
- mp->m_dircook_elog = xfs_da_log2_roundup(count + 1);
- ASSERT(mp->m_dircook_elog <= mp->m_sb.sb_blocklog);
- mp->m_dir_node_ents = mp->m_attr_node_ents =
- (XFS_LBSIZE(mp) - (uint)sizeof(xfs_da_node_hdr_t)) /
- (uint)sizeof(xfs_da_node_entry_t);
- mp->m_dir_magicpct = (XFS_LBSIZE(mp) * 37) / 100;
- mp->m_dirblksize = mp->m_sb.sb_blocksize;
- mp->m_dirblkfsbs = 1;
-}
-
-/*
- * Return 1 if directory contains only "." and "..".
- */
-static int
-xfs_dir_isempty(xfs_inode_t *dp)
-{
- xfs_dir_sf_hdr_t *hdr;
-
- ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
- if (dp->i_d.di_size == 0)
- return(1);
- if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
- return(0);
- hdr = (xfs_dir_sf_hdr_t *)dp->i_df.if_u1.if_data;
- return(hdr->count == 0);
-}
-
-/*
- * Initialize a directory with its "." and ".." entries.
- */
-static int
-xfs_dir_init(xfs_trans_t *trans, xfs_inode_t *dir, xfs_inode_t *parent_dir)
-{
- xfs_da_args_t args;
- int error;
-
- memset((char *)&args, 0, sizeof(args));
- args.dp = dir;
- args.trans = trans;
-
- ASSERT((dir->i_d.di_mode & S_IFMT) == S_IFDIR);
- if ((error = xfs_dir_ino_validate(trans->t_mountp, parent_dir->i_ino)))
- return error;
-
- return(xfs_dir_shortform_create(&args, parent_dir->i_ino));
-}
-
-/*
- * Generic handler routine to add a name to a directory.
- * Transitions directory from shortform to Btree as necessary.
- */
-static int /* error */
-xfs_dir_createname(xfs_trans_t *trans, xfs_inode_t *dp, char *name,
- int namelen, xfs_ino_t inum, xfs_fsblock_t *firstblock,
- xfs_bmap_free_t *flist, xfs_extlen_t total)
-{
- xfs_da_args_t args;
- int retval, newsize, done;
-
- ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
-
- if ((retval = xfs_dir_ino_validate(trans->t_mountp, inum)))
- return (retval);
-
- XFS_STATS_INC(xs_dir_create);
- /*
- * Fill in the arg structure for this request.
- */
- args.name = name;
- args.namelen = namelen;
- args.hashval = xfs_da_hashname(name, namelen);
- args.inumber = inum;
- args.dp = dp;
- args.firstblock = firstblock;
- args.flist = flist;
- args.total = total;
- args.whichfork = XFS_DATA_FORK;
- args.trans = trans;
- args.justcheck = 0;
- args.addname = args.oknoent = 1;
-
- /*
- * Decide on what work routines to call based on the inode size.
- */
- done = 0;
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- newsize = XFS_DIR_SF_ENTSIZE_BYNAME(args.namelen);
- if ((dp->i_d.di_size + newsize) <= XFS_IFORK_DSIZE(dp)) {
- retval = xfs_dir_shortform_addname(&args);
- done = 1;
- } else {
- if (total == 0)
- return XFS_ERROR(ENOSPC);
- retval = xfs_dir_shortform_to_leaf(&args);
- done = retval != 0;
- }
- }
- if (!done && xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
- retval = xfs_dir_leaf_addname(&args);
- done = retval != ENOSPC;
- if (!done) {
- if (total == 0)
- return XFS_ERROR(ENOSPC);
- retval = xfs_dir_leaf_to_node(&args);
- done = retval != 0;
- }
- }
- if (!done) {
- retval = xfs_dir_node_addname(&args);
- }
- return(retval);
-}
-
-/*
- * Generic handler routine to check if a name can be added to a directory,
- * without adding any blocks to the directory.
- */
-static int /* error */
-xfs_dir_canenter(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen)
-{
- xfs_da_args_t args;
- int retval, newsize;
-
- ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
- /*
- * Fill in the arg structure for this request.
- */
- args.name = name;
- args.namelen = namelen;
- args.hashval = xfs_da_hashname(name, namelen);
- args.inumber = 0;
- args.dp = dp;
- args.firstblock = NULL;
- args.flist = NULL;
- args.total = 0;
- args.whichfork = XFS_DATA_FORK;
- args.trans = trans;
- args.justcheck = args.addname = args.oknoent = 1;
-
- /*
- * Decide on what work routines to call based on the inode size.
- */
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- newsize = XFS_DIR_SF_ENTSIZE_BYNAME(args.namelen);
- if ((dp->i_d.di_size + newsize) <= XFS_IFORK_DSIZE(dp))
- retval = 0;
- else
- retval = XFS_ERROR(ENOSPC);
- } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
- retval = xfs_dir_leaf_addname(&args);
- } else {
- retval = xfs_dir_node_addname(&args);
- }
- return(retval);
-}
-
-/*
- * Generic handler routine to remove a name from a directory.
- * Transitions directory from Btree to shortform as necessary.
- */
-static int /* error */
-xfs_dir_removename(xfs_trans_t *trans, xfs_inode_t *dp, char *name,
- int namelen, xfs_ino_t ino, xfs_fsblock_t *firstblock,
- xfs_bmap_free_t *flist, xfs_extlen_t total)
-{
- xfs_da_args_t args;
- int count, totallen, newsize, retval;
-
- ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
- XFS_STATS_INC(xs_dir_remove);
- /*
- * Fill in the arg structure for this request.
- */
- args.name = name;
- args.namelen = namelen;
- args.hashval = xfs_da_hashname(name, namelen);
- args.inumber = ino;
- args.dp = dp;
- args.firstblock = firstblock;
- args.flist = flist;
- args.total = total;
- args.whichfork = XFS_DATA_FORK;
- args.trans = trans;
- args.justcheck = args.addname = args.oknoent = 0;
-
- /*
- * Decide on what work routines to call based on the inode size.
- */
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- retval = xfs_dir_shortform_removename(&args);
- } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
- retval = xfs_dir_leaf_removename(&args, &count, &totallen);
- if (retval == 0) {
- newsize = XFS_DIR_SF_ALLFIT(count, totallen);
- if (newsize <= XFS_IFORK_DSIZE(dp)) {
- retval = xfs_dir_leaf_to_shortform(&args);
- }
- }
- } else {
- retval = xfs_dir_node_removename(&args);
- }
- return(retval);
-}
-
-static int /* error */
-xfs_dir_lookup(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen,
- xfs_ino_t *inum)
-{
- xfs_da_args_t args;
- int retval;
-
- ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
-
- XFS_STATS_INC(xs_dir_lookup);
- /*
- * Fill in the arg structure for this request.
- */
- args.name = name;
- args.namelen = namelen;
- args.hashval = xfs_da_hashname(name, namelen);
- args.inumber = 0;
- args.dp = dp;
- args.firstblock = NULL;
- args.flist = NULL;
- args.total = 0;
- args.whichfork = XFS_DATA_FORK;
- args.trans = trans;
- args.justcheck = args.addname = 0;
- args.oknoent = 1;
-
- /*
- * Decide on what work routines to call based on the inode size.
- */
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- retval = xfs_dir_shortform_lookup(&args);
- } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
- retval = xfs_dir_leaf_lookup(&args);
- } else {
- retval = xfs_dir_node_lookup(&args);
- }
- if (retval == EEXIST)
- retval = 0;
- *inum = args.inumber;
- return(retval);
-}
-
-/*
- * Implement readdir.
- */
-static int /* error */
-xfs_dir_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, int *eofp)
-{
- xfs_dirent_t *dbp;
- int alignment, retval;
- xfs_dir_put_t put;
-
- XFS_STATS_INC(xs_dir_getdents);
- ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
-
- /*
- * If our caller has given us a single contiguous memory buffer,
- * just work directly within that buffer. If it's in user memory,
- * lock it down first.
- */
- alignment = sizeof(xfs_off_t) - 1;
- if ((uio->uio_iovcnt == 1) &&
- (((__psint_t)uio->uio_iov[0].iov_base & alignment) == 0) &&
- ((uio->uio_iov[0].iov_len & alignment) == 0)) {
- dbp = NULL;
- put = xfs_dir_put_dirent64_direct;
- } else {
- dbp = kmem_alloc(sizeof(*dbp) + MAXNAMELEN, KM_SLEEP);
- put = xfs_dir_put_dirent64_uio;
- }
-
- /*
- * Decide on what work routines to call based on the inode size.
- */
- *eofp = 0;
-
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- retval = xfs_dir_shortform_getdents(dp, uio, eofp, dbp, put);
- } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
- retval = xfs_dir_leaf_getdents(trans, dp, uio, eofp, dbp, put);
- } else {
- retval = xfs_dir_node_getdents(trans, dp, uio, eofp, dbp, put);
- }
- if (dbp != NULL)
- kmem_free(dbp, sizeof(*dbp) + MAXNAMELEN);
-
- return(retval);
-}
-
-static int /* error */
-xfs_dir_replace(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen,
- xfs_ino_t inum, xfs_fsblock_t *firstblock,
- xfs_bmap_free_t *flist, xfs_extlen_t total)
-{
- xfs_da_args_t args;
- int retval;
-
- ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
-
- if ((retval = xfs_dir_ino_validate(trans->t_mountp, inum)))
- return retval;
-
- /*
- * Fill in the arg structure for this request.
- */
- args.name = name;
- args.namelen = namelen;
- args.hashval = xfs_da_hashname(name, namelen);
- args.inumber = inum;
- args.dp = dp;
- args.firstblock = firstblock;
- args.flist = flist;
- args.total = total;
- args.whichfork = XFS_DATA_FORK;
- args.trans = trans;
- args.justcheck = args.addname = args.oknoent = 0;
-
- /*
- * Decide on what work routines to call based on the inode size.
- */
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- retval = xfs_dir_shortform_replace(&args);
- } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) {
- retval = xfs_dir_leaf_replace(&args);
- } else {
- retval = xfs_dir_node_replace(&args);
- }
-
- return(retval);
-}
-
-static int
-xfs_dir_shortform_validate_ondisk(xfs_mount_t *mp, xfs_dinode_t *dp)
-{
- xfs_ino_t ino;
- int namelen_sum;
- int count;
- xfs_dir_shortform_t *sf;
- xfs_dir_sf_entry_t *sfe;
- int i;
-
-
-
- if ((INT_GET(dp->di_core.di_mode, ARCH_CONVERT) & S_IFMT) != S_IFDIR) {
- return 0;
- }
- if (INT_GET(dp->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_LOCAL) {
- return 0;
- }
- if (INT_GET(dp->di_core.di_size, ARCH_CONVERT) < sizeof(sf->hdr)) {
- xfs_fs_cmn_err(CE_WARN, mp, "Invalid shortform size: dp 0x%p",
- dp);
- return 1;
- }
- sf = (xfs_dir_shortform_t *)(&dp->di_u.di_dirsf);
- ino = XFS_GET_DIR_INO8(sf->hdr.parent);
- if (xfs_dir_ino_validate(mp, ino))
- return 1;
-
- count = sf->hdr.count;
- if ((count < 0) || ((count * 10) > XFS_LITINO(mp))) {
- xfs_fs_cmn_err(CE_WARN, mp,
- "Invalid shortform count: dp 0x%p", dp);
- return(1);
- }
-
- if (count == 0) {
- return 0;
- }
-
- namelen_sum = 0;
- sfe = &sf->list[0];
- for (i = sf->hdr.count - 1; i >= 0; i--) {
- ino = XFS_GET_DIR_INO8(sfe->inumber);
- xfs_dir_ino_validate(mp, ino);
- if (sfe->namelen >= XFS_LITINO(mp)) {
- xfs_fs_cmn_err(CE_WARN, mp,
- "Invalid shortform namelen: dp 0x%p", dp);
- return 1;
- }
- namelen_sum += sfe->namelen;
- sfe = XFS_DIR_SF_NEXTENTRY(sfe);
- }
- if (namelen_sum >= XFS_LITINO(mp)) {
- xfs_fs_cmn_err(CE_WARN, mp,
- "Invalid shortform namelen: dp 0x%p", dp);
- return 1;
- }
-
- return 0;
-}
-
-/*========================================================================
- * External routines when dirsize == XFS_LBSIZE(dp->i_mount).
- *========================================================================*/
-
-/*
- * Add a name to the leaf directory structure
- * This is the external routine.
- */
-int
-xfs_dir_leaf_addname(xfs_da_args_t *args)
-{
- int index, retval;
- xfs_dabuf_t *bp;
-
- retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
- XFS_DATA_FORK);
- if (retval)
- return(retval);
- ASSERT(bp != NULL);
-
- retval = xfs_dir_leaf_lookup_int(bp, args, &index);
- if (retval == ENOENT)
- retval = xfs_dir_leaf_add(bp, args, index);
- xfs_da_buf_done(bp);
- return(retval);
-}
-
-/*
- * Remove a name from the leaf directory structure
- * This is the external routine.
- */
-STATIC int
-xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen)
-{
- xfs_dir_leafblock_t *leaf;
- int index, retval;
- xfs_dabuf_t *bp;
-
- retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
- XFS_DATA_FORK);
- if (retval)
- return(retval);
- ASSERT(bp != NULL);
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- retval = xfs_dir_leaf_lookup_int(bp, args, &index);
- if (retval == EEXIST) {
- (void)xfs_dir_leaf_remove(args->trans, bp, index);
- *count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
- *totallen = INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
- retval = 0;
- }
- xfs_da_buf_done(bp);
- return(retval);
-}
-
-/*
- * Look up a name in a leaf directory structure.
- * This is the external routine.
- */
-STATIC int
-xfs_dir_leaf_lookup(xfs_da_args_t *args)
-{
- int index, retval;
- xfs_dabuf_t *bp;
-
- retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
- XFS_DATA_FORK);
- if (retval)
- return(retval);
- ASSERT(bp != NULL);
- retval = xfs_dir_leaf_lookup_int(bp, args, &index);
- xfs_da_brelse(args->trans, bp);
- return(retval);
-}
-
-/*
- * Copy out directory entries for getdents(), for leaf directories.
- */
-STATIC int
-xfs_dir_leaf_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
- int *eofp, xfs_dirent_t *dbp, xfs_dir_put_t put)
-{
- xfs_dabuf_t *bp;
- int retval, eob;
-
- retval = xfs_da_read_buf(dp->i_transp, dp, 0, -1, &bp, XFS_DATA_FORK);
- if (retval)
- return(retval);
- ASSERT(bp != NULL);
- retval = xfs_dir_leaf_getdents_int(bp, dp, 0, uio, &eob, dbp, put, -1);
- xfs_da_brelse(trans, bp);
- *eofp = (eob == 0);
- return(retval);
-}
-
-/*
- * Look up a name in a leaf directory structure, replace the inode number.
- * This is the external routine.
- */
-STATIC int
-xfs_dir_leaf_replace(xfs_da_args_t *args)
-{
- int index, retval;
- xfs_dabuf_t *bp;
- xfs_ino_t inum;
- xfs_dir_leafblock_t *leaf;
- xfs_dir_leaf_entry_t *entry;
- xfs_dir_leaf_name_t *namest;
-
- inum = args->inumber;
- retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
- XFS_DATA_FORK);
- if (retval)
- return(retval);
- ASSERT(bp != NULL);
- retval = xfs_dir_leaf_lookup_int(bp, args, &index);
- if (retval == EEXIST) {
- leaf = bp->data;
- entry = &leaf->entries[index];
- namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
- /* XXX - replace assert? */
- XFS_DIR_SF_PUT_DIRINO(&inum, &namest->inumber);
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber)));
- xfs_da_buf_done(bp);
- retval = 0;
- } else
- xfs_da_brelse(args->trans, bp);
- return(retval);
-}
-
-
-/*========================================================================
- * External routines when dirsize > XFS_LBSIZE(mp).
- *========================================================================*/
-
-/*
- * Add a name to a Btree-format directory.
- *
- * This will involve walking down the Btree, and may involve splitting
- * leaf nodes and even splitting intermediate nodes up to and including
- * the root node (a special case of an intermediate node).
- */
-STATIC int
-xfs_dir_node_addname(xfs_da_args_t *args)
-{
- xfs_da_state_t *state;
- xfs_da_state_blk_t *blk;
- int retval, error;
-
- /*
- * Fill in bucket of arguments/results/context to carry around.
- */
- state = xfs_da_state_alloc();
- state->args = args;
- state->mp = args->dp->i_mount;
- state->blocksize = state->mp->m_sb.sb_blocksize;
- state->node_ents = state->mp->m_dir_node_ents;
-
- /*
- * Search to see if name already exists, and get back a pointer
- * to where it should go.
- */
- error = xfs_da_node_lookup_int(state, &retval);
- if (error)
- retval = error;
- if (retval != ENOENT)
- goto error;
- blk = &state->path.blk[ state->path.active-1 ];
- ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC);
- retval = xfs_dir_leaf_add(blk->bp, args, blk->index);
- if (retval == 0) {
- /*
- * Addition succeeded, update Btree hashvals.
- */
- if (!args->justcheck)
- xfs_da_fixhashpath(state, &state->path);
- } else {
- /*
- * Addition failed, split as many Btree elements as required.
- */
- if (args->total == 0) {
- ASSERT(retval == ENOSPC);
- goto error;
- }
- retval = xfs_da_split(state);
- }
-error:
- xfs_da_state_free(state);
-
- return(retval);
-}
-
-/*
- * Remove a name from a B-tree directory.
- *
- * This will involve walking down the Btree, and may involve joining
- * leaf nodes and even joining intermediate nodes up to and including
- * the root node (a special case of an intermediate node).
- */
-STATIC int
-xfs_dir_node_removename(xfs_da_args_t *args)
-{
- xfs_da_state_t *state;
- xfs_da_state_blk_t *blk;
- int retval, error;
-
- state = xfs_da_state_alloc();
- state->args = args;
- state->mp = args->dp->i_mount;
- state->blocksize = state->mp->m_sb.sb_blocksize;
- state->node_ents = state->mp->m_dir_node_ents;
-
- /*
- * Search to see if name exists, and get back a pointer to it.
- */
- error = xfs_da_node_lookup_int(state, &retval);
- if (error)
- retval = error;
- if (retval != EEXIST) {
- xfs_da_state_free(state);
- return(retval);
- }
-
- /*
- * Remove the name and update the hashvals in the tree.
- */
- blk = &state->path.blk[ state->path.active-1 ];
- ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC);
- retval = xfs_dir_leaf_remove(args->trans, blk->bp, blk->index);
- xfs_da_fixhashpath(state, &state->path);
-
- /*
- * Check to see if the tree needs to be collapsed.
- */
- error = 0;
- if (retval) {
- error = xfs_da_join(state);
- }
-
- xfs_da_state_free(state);
- if (error)
- return(error);
- return(0);
-}
-
-/*
- * Look up a filename in a int directory.
- * Use an internal routine to actually do all the work.
- */
-STATIC int
-xfs_dir_node_lookup(xfs_da_args_t *args)
-{
- xfs_da_state_t *state;
- int retval, error, i;
-
- state = xfs_da_state_alloc();
- state->args = args;
- state->mp = args->dp->i_mount;
- state->blocksize = state->mp->m_sb.sb_blocksize;
- state->node_ents = state->mp->m_dir_node_ents;
-
- /*
- * Search to see if name exists,
- * and get back a pointer to it.
- */
- error = xfs_da_node_lookup_int(state, &retval);
- if (error) {
- retval = error;
- }
-
- /*
- * If not in a transaction, we have to release all the buffers.
- */
- for (i = 0; i < state->path.active; i++) {
- xfs_da_brelse(args->trans, state->path.blk[i].bp);
- state->path.blk[i].bp = NULL;
- }
-
- xfs_da_state_free(state);
- return(retval);
-}
-
-STATIC int
-xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
- int *eofp, xfs_dirent_t *dbp, xfs_dir_put_t put)
-{
- xfs_da_intnode_t *node;
- xfs_da_node_entry_t *btree;
- xfs_dir_leafblock_t *leaf = NULL;
- xfs_dablk_t bno, nextbno;
- xfs_dahash_t cookhash;
- xfs_mount_t *mp;
- int error, eob, i;
- xfs_dabuf_t *bp;
- xfs_daddr_t nextda;
-
- /*
- * Pick up our context.
- */
- mp = dp->i_mount;
- bp = NULL;
- bno = XFS_DA_COOKIE_BNO(mp, uio->uio_offset);
- cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset);
-
- xfs_dir_trace_g_du("node: start", dp, uio);
-
- /*
- * Re-find our place, even if we're confused about what our place is.
- *
- * First we check the block number from the magic cookie, it is a
- * cache of where we ended last time. If we find a leaf block, and
- * the starting hashval in that block is less than our desired
- * hashval, then we run with it.
- */
- if (bno > 0) {
- error = xfs_da_read_buf(trans, dp, bno, -2, &bp, XFS_DATA_FORK);
- if ((error != 0) && (error != EFSCORRUPTED))
- return(error);
- if (bp)
- leaf = bp->data;
- if (bp && be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) {
- xfs_dir_trace_g_dub("node: block not a leaf",
- dp, uio, bno);
- xfs_da_brelse(trans, bp);
- bp = NULL;
- }
- if (bp && INT_GET(leaf->entries[0].hashval, ARCH_CONVERT) > cookhash) {
- xfs_dir_trace_g_dub("node: leaf hash too large",
- dp, uio, bno);
- xfs_da_brelse(trans, bp);
- bp = NULL;
- }
- if (bp &&
- cookhash > INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT)) {
- xfs_dir_trace_g_dub("node: leaf hash too small",
- dp, uio, bno);
- xfs_da_brelse(trans, bp);
- bp = NULL;
- }
- }
-
- /*
- * If we did not find a leaf block from the blockno in the cookie,
- * or we there was no blockno in the cookie (eg: first time thru),
- * the we start at the top of the Btree and re-find our hashval.
- */
- if (bp == NULL) {
- xfs_dir_trace_g_du("node: start at root" , dp, uio);
- bno = 0;
- for (;;) {
- error = xfs_da_read_buf(trans, dp, bno, -1, &bp,
- XFS_DATA_FORK);
- if (error)
- return(error);
- if (bp == NULL)
- return(XFS_ERROR(EFSCORRUPTED));
- node = bp->data;
- if (be16_to_cpu(node->hdr.info.magic) != XFS_DA_NODE_MAGIC)
- break;
- btree = &node->btree[0];
- xfs_dir_trace_g_dun("node: node detail", dp, uio, node);
- for (i = 0; i < be16_to_cpu(node->hdr.count); btree++, i++) {
- if (be32_to_cpu(btree->hashval) >= cookhash) {
- bno = be32_to_cpu(btree->before);
- break;
- }
- }
- if (i == be16_to_cpu(node->hdr.count)) {
- xfs_da_brelse(trans, bp);
- xfs_dir_trace_g_du("node: hash beyond EOF",
- dp, uio);
- uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0,
- XFS_DA_MAXHASH);
- *eofp = 1;
- return(0);
- }
- xfs_dir_trace_g_dub("node: going to block",
- dp, uio, bno);
- xfs_da_brelse(trans, bp);
- }
- }
- ASSERT(cookhash != XFS_DA_MAXHASH);
-
- /*
- * We've dropped down to the (first) leaf block that contains the
- * hashval we are interested in. Continue rolling upward thru the
- * leaf blocks until we fill up our buffer.
- */
- for (;;) {
- leaf = bp->data;
- if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC)) {
- xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf);
- xfs_da_brelse(trans, bp);
- XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)",
- XFS_ERRLEVEL_LOW, mp, leaf);
- return XFS_ERROR(EFSCORRUPTED);
- }
- xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf);
- if ((nextbno = be32_to_cpu(leaf->hdr.info.forw))) {
- nextda = xfs_da_reada_buf(trans, dp, nextbno,
- XFS_DATA_FORK);
- } else
- nextda = -1;
- error = xfs_dir_leaf_getdents_int(bp, dp, bno, uio, &eob, dbp,
- put, nextda);
- xfs_da_brelse(trans, bp);
- bno = nextbno;
- if (eob) {
- xfs_dir_trace_g_dub("node: E-O-B", dp, uio, bno);
- *eofp = 0;
- return(error);
- }
- if (bno == 0)
- break;
- error = xfs_da_read_buf(trans, dp, bno, nextda, &bp,
- XFS_DATA_FORK);
- if (error)
- return(error);
- if (unlikely(bp == NULL)) {
- XFS_ERROR_REPORT("xfs_dir_node_getdents(2)",
- XFS_ERRLEVEL_LOW, mp);
- return(XFS_ERROR(EFSCORRUPTED));
- }
- }
- *eofp = 1;
- xfs_dir_trace_g_du("node: E-O-F", dp, uio);
- return(0);
-}
-
-/*
- * Look up a filename in an int directory, replace the inode number.
- * Use an internal routine to actually do the lookup.
- */
-STATIC int
-xfs_dir_node_replace(xfs_da_args_t *args)
-{
- xfs_da_state_t *state;
- xfs_da_state_blk_t *blk;
- xfs_dir_leafblock_t *leaf;
- xfs_dir_leaf_entry_t *entry;
- xfs_dir_leaf_name_t *namest;
- xfs_ino_t inum;
- int retval, error, i;
- xfs_dabuf_t *bp;
-
- state = xfs_da_state_alloc();
- state->args = args;
- state->mp = args->dp->i_mount;
- state->blocksize = state->mp->m_sb.sb_blocksize;
- state->node_ents = state->mp->m_dir_node_ents;
- inum = args->inumber;
-
- /*
- * Search to see if name exists,
- * and get back a pointer to it.
- */
- error = xfs_da_node_lookup_int(state, &retval);
- if (error) {
- retval = error;
- }
-
- if (retval == EEXIST) {
- blk = &state->path.blk[state->path.active - 1];
- ASSERT(blk->magic == XFS_DIR_LEAF_MAGIC);
- bp = blk->bp;
- leaf = bp->data;
- entry = &leaf->entries[blk->index];
- namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
- /* XXX - replace assert ? */
- XFS_DIR_SF_PUT_DIRINO(&inum, &namest->inumber);
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, namest, sizeof(namest->inumber)));
- xfs_da_buf_done(bp);
- blk->bp = NULL;
- retval = 0;
- } else {
- i = state->path.active - 1;
- xfs_da_brelse(args->trans, state->path.blk[i].bp);
- state->path.blk[i].bp = NULL;
- }
- for (i = 0; i < state->path.active - 1; i++) {
- xfs_da_brelse(args->trans, state->path.blk[i].bp);
- state->path.blk[i].bp = NULL;
- }
-
- xfs_da_state_free(state);
- return(retval);
-}
-
-#if defined(XFS_DIR_TRACE)
-/*
- * Add a trace buffer entry for an inode and a uio.
- */
-void
-xfs_dir_trace_g_du(char *where, xfs_inode_t *dp, uio_t *uio)
-{
- xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DU, where,
- (void *)dp, (void *)dp->i_mount,
- (void *)((unsigned long)(uio->uio_offset >> 32)),
- (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
- (void *)(unsigned long)uio->uio_resid,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-}
-
-/*
- * Add a trace buffer entry for an inode and a uio.
- */
-void
-xfs_dir_trace_g_dub(char *where, xfs_inode_t *dp, uio_t *uio, xfs_dablk_t bno)
-{
- xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUB, where,
- (void *)dp, (void *)dp->i_mount,
- (void *)((unsigned long)(uio->uio_offset >> 32)),
- (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
- (void *)(unsigned long)uio->uio_resid,
- (void *)(unsigned long)bno,
- NULL, NULL, NULL, NULL, NULL, NULL);
-}
-
-/*
- * Add a trace buffer entry for an inode and a uio.
- */
-void
-xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio,
- xfs_da_intnode_t *node)
-{
- int last = be16_to_cpu(node->hdr.count) - 1;
-
- xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where,
- (void *)dp, (void *)dp->i_mount,
- (void *)((unsigned long)(uio->uio_offset >> 32)),
- (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
- (void *)(unsigned long)uio->uio_resid,
- (void *)(unsigned long)be32_to_cpu(node->hdr.info.forw),
- (void *)(unsigned long)
- be16_to_cpu(node->hdr.count),
- (void *)(unsigned long)
- be32_to_cpu(node->btree[0].hashval),
- (void *)(unsigned long)
- be32_to_cpu(node->btree[last].hashval),
- NULL, NULL, NULL);
-}
-
-/*
- * Add a trace buffer entry for an inode and a uio.
- */
-void
-xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio,
- xfs_dir_leafblock_t *leaf)
-{
- int last = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1;
-
- xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUL, where,
- (void *)dp, (void *)dp->i_mount,
- (void *)((unsigned long)(uio->uio_offset >> 32)),
- (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
- (void *)(unsigned long)uio->uio_resid,
- (void *)(unsigned long)be32_to_cpu(leaf->hdr.info.forw),
- (void *)(unsigned long)
- INT_GET(leaf->hdr.count, ARCH_CONVERT),
- (void *)(unsigned long)
- INT_GET(leaf->entries[0].hashval, ARCH_CONVERT),
- (void *)(unsigned long)
- INT_GET(leaf->entries[last].hashval, ARCH_CONVERT),
- NULL, NULL, NULL);
-}
-
-/*
- * Add a trace buffer entry for an inode and a uio.
- */
-void
-xfs_dir_trace_g_due(char *where, xfs_inode_t *dp, uio_t *uio,
- xfs_dir_leaf_entry_t *entry)
-{
- xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUE, where,
- (void *)dp, (void *)dp->i_mount,
- (void *)((unsigned long)(uio->uio_offset >> 32)),
- (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
- (void *)(unsigned long)uio->uio_resid,
- (void *)(unsigned long)
- INT_GET(entry->hashval, ARCH_CONVERT),
- NULL, NULL, NULL, NULL, NULL, NULL);
-}
-
-/*
- * Add a trace buffer entry for an inode and a uio.
- */
-void
-xfs_dir_trace_g_duc(char *where, xfs_inode_t *dp, uio_t *uio, xfs_off_t cookie)
-{
- xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUC, where,
- (void *)dp, (void *)dp->i_mount,
- (void *)((unsigned long)(uio->uio_offset >> 32)),
- (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
- (void *)(unsigned long)uio->uio_resid,
- (void *)((unsigned long)(cookie >> 32)),
- (void *)((unsigned long)(cookie & 0xFFFFFFFF)),
- NULL, NULL, NULL, NULL, NULL);
-}
-
-/*
- * Add a trace buffer entry for the arguments given to the routine,
- * generic form.
- */
-void
-xfs_dir_trace_enter(int type, char *where,
- void * a0, void * a1,
- void * a2, void * a3,
- void * a4, void * a5,
- void * a6, void * a7,
- void * a8, void * a9,
- void * a10, void * a11)
-{
- ASSERT(xfs_dir_trace_buf);
- ktrace_enter(xfs_dir_trace_buf, (void *)(unsigned long)type,
- (void *)where,
- (void *)a0, (void *)a1, (void *)a2,
- (void *)a3, (void *)a4, (void *)a5,
- (void *)a6, (void *)a7, (void *)a8,
- (void *)a9, (void *)a10, (void *)a11,
- NULL, NULL);
-}
-#endif /* XFS_DIR_TRACE */
diff --git a/fs/xfs/xfs_dir.h b/fs/xfs/xfs_dir.h
deleted file mode 100644
index 8cc8afb9f6c..00000000000
--- a/fs/xfs/xfs_dir.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2000,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR_H__
-#define __XFS_DIR_H__
-
-/*
- * Large directories are structured around Btrees where all the data
- * elements are in the leaf nodes. Filenames are hashed into an int,
- * then that int is used as the index into the Btree. Since the hashval
- * of a filename may not be unique, we may have duplicate keys. The
- * internal links in the Btree are logical block offsets into the file.
- *
- * Small directories use a different format and are packed as tightly
- * as possible so as to fit into the literal area of the inode.
- */
-
-/*========================================================================
- * Function prototypes for the kernel.
- *========================================================================*/
-
-struct uio;
-struct xfs_bmap_free;
-struct xfs_da_args;
-struct xfs_dinode;
-struct xfs_inode;
-struct xfs_mount;
-struct xfs_trans;
-
-/*
- * Directory function types.
- * Put in structures (xfs_dirops_t) for v1 and v2 directories.
- */
-typedef void (*xfs_dir_mount_t)(struct xfs_mount *mp);
-typedef int (*xfs_dir_isempty_t)(struct xfs_inode *dp);
-typedef int (*xfs_dir_init_t)(struct xfs_trans *tp,
- struct xfs_inode *dp,
- struct xfs_inode *pdp);
-typedef int (*xfs_dir_createname_t)(struct xfs_trans *tp,
- struct xfs_inode *dp,
- char *name,
- int namelen,
- xfs_ino_t inum,
- xfs_fsblock_t *first,
- struct xfs_bmap_free *flist,
- xfs_extlen_t total);
-typedef int (*xfs_dir_lookup_t)(struct xfs_trans *tp,
- struct xfs_inode *dp,
- char *name,
- int namelen,
- xfs_ino_t *inum);
-typedef int (*xfs_dir_removename_t)(struct xfs_trans *tp,
- struct xfs_inode *dp,
- char *name,
- int namelen,
- xfs_ino_t ino,
- xfs_fsblock_t *first,
- struct xfs_bmap_free *flist,
- xfs_extlen_t total);
-typedef int (*xfs_dir_getdents_t)(struct xfs_trans *tp,
- struct xfs_inode *dp,
- struct uio *uio,
- int *eofp);
-typedef int (*xfs_dir_replace_t)(struct xfs_trans *tp,
- struct xfs_inode *dp,
- char *name,
- int namelen,
- xfs_ino_t inum,
- xfs_fsblock_t *first,
- struct xfs_bmap_free *flist,
- xfs_extlen_t total);
-typedef int (*xfs_dir_canenter_t)(struct xfs_trans *tp,
- struct xfs_inode *dp,
- char *name,
- int namelen);
-typedef int (*xfs_dir_shortform_validate_ondisk_t)(struct xfs_mount *mp,
- struct xfs_dinode *dip);
-typedef int (*xfs_dir_shortform_to_single_t)(struct xfs_da_args *args);
-
-typedef struct xfs_dirops {
- xfs_dir_mount_t xd_mount;
- xfs_dir_isempty_t xd_isempty;
- xfs_dir_init_t xd_init;
- xfs_dir_createname_t xd_createname;
- xfs_dir_lookup_t xd_lookup;
- xfs_dir_removename_t xd_removename;
- xfs_dir_getdents_t xd_getdents;
- xfs_dir_replace_t xd_replace;
- xfs_dir_canenter_t xd_canenter;
- xfs_dir_shortform_validate_ondisk_t xd_shortform_validate_ondisk;
- xfs_dir_shortform_to_single_t xd_shortform_to_single;
-} xfs_dirops_t;
-
-/*
- * Overall external interface routines.
- */
-void xfs_dir_startup(void); /* called exactly once */
-
-#define XFS_DIR_MOUNT(mp) \
- ((mp)->m_dirops.xd_mount(mp))
-#define XFS_DIR_ISEMPTY(mp,dp) \
- ((mp)->m_dirops.xd_isempty(dp))
-#define XFS_DIR_INIT(mp,tp,dp,pdp) \
- ((mp)->m_dirops.xd_init(tp,dp,pdp))
-#define XFS_DIR_CREATENAME(mp,tp,dp,name,namelen,inum,first,flist,total) \
- ((mp)->m_dirops.xd_createname(tp,dp,name,namelen,inum,first,flist,\
- total))
-#define XFS_DIR_LOOKUP(mp,tp,dp,name,namelen,inum) \
- ((mp)->m_dirops.xd_lookup(tp,dp,name,namelen,inum))
-#define XFS_DIR_REMOVENAME(mp,tp,dp,name,namelen,ino,first,flist,total) \
- ((mp)->m_dirops.xd_removename(tp,dp,name,namelen,ino,first,flist,total))
-#define XFS_DIR_GETDENTS(mp,tp,dp,uio,eofp) \
- ((mp)->m_dirops.xd_getdents(tp,dp,uio,eofp))
-#define XFS_DIR_REPLACE(mp,tp,dp,name,namelen,inum,first,flist,total) \
- ((mp)->m_dirops.xd_replace(tp,dp,name,namelen,inum,first,flist,total))
-#define XFS_DIR_CANENTER(mp,tp,dp,name,namelen) \
- ((mp)->m_dirops.xd_canenter(tp,dp,name,namelen))
-#define XFS_DIR_SHORTFORM_VALIDATE_ONDISK(mp,dip) \
- ((mp)->m_dirops.xd_shortform_validate_ondisk(mp,dip))
-#define XFS_DIR_SHORTFORM_TO_SINGLE(mp,args) \
- ((mp)->m_dirops.xd_shortform_to_single(args))
-
-#define XFS_DIR_IS_V1(mp) ((mp)->m_dirversion == 1)
-#define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2)
-extern xfs_dirops_t xfsv1_dirops;
-extern xfs_dirops_t xfsv2_dirops;
-
-#endif /* __XFS_DIR_H__ */
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 022c8398ab6..8edbe1adb95 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -24,21 +24,18 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
-#include "xfs_dir_leaf.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
@@ -46,69 +43,14 @@
#include "xfs_dir2_trace.h"
#include "xfs_error.h"
-/*
- * Declarations for interface routines.
- */
-static void xfs_dir2_mount(xfs_mount_t *mp);
-static int xfs_dir2_isempty(xfs_inode_t *dp);
-static int xfs_dir2_init(xfs_trans_t *tp, xfs_inode_t *dp,
- xfs_inode_t *pdp);
-static int xfs_dir2_createname(xfs_trans_t *tp, xfs_inode_t *dp,
- char *name, int namelen, xfs_ino_t inum,
- xfs_fsblock_t *first,
- xfs_bmap_free_t *flist, xfs_extlen_t total);
-static int xfs_dir2_lookup(xfs_trans_t *tp, xfs_inode_t *dp, char *name,
- int namelen, xfs_ino_t *inum);
-static int xfs_dir2_removename(xfs_trans_t *tp, xfs_inode_t *dp,
- char *name, int namelen, xfs_ino_t ino,
- xfs_fsblock_t *first,
- xfs_bmap_free_t *flist, xfs_extlen_t total);
-static int xfs_dir2_getdents(xfs_trans_t *tp, xfs_inode_t *dp, uio_t *uio,
- int *eofp);
-static int xfs_dir2_replace(xfs_trans_t *tp, xfs_inode_t *dp, char *name,
- int namelen, xfs_ino_t inum,
- xfs_fsblock_t *first, xfs_bmap_free_t *flist,
- xfs_extlen_t total);
-static int xfs_dir2_canenter(xfs_trans_t *tp, xfs_inode_t *dp, char *name,
- int namelen);
-static int xfs_dir2_shortform_validate_ondisk(xfs_mount_t *mp,
- xfs_dinode_t *dip);
-
-/*
- * Utility routine declarations.
- */
static int xfs_dir2_put_dirent64_direct(xfs_dir2_put_args_t *pa);
static int xfs_dir2_put_dirent64_uio(xfs_dir2_put_args_t *pa);
-/*
- * Directory operations vector.
- */
-xfs_dirops_t xfsv2_dirops = {
- .xd_mount = xfs_dir2_mount,
- .xd_isempty = xfs_dir2_isempty,
- .xd_init = xfs_dir2_init,
- .xd_createname = xfs_dir2_createname,
- .xd_lookup = xfs_dir2_lookup,
- .xd_removename = xfs_dir2_removename,
- .xd_getdents = xfs_dir2_getdents,
- .xd_replace = xfs_dir2_replace,
- .xd_canenter = xfs_dir2_canenter,
- .xd_shortform_validate_ondisk = xfs_dir2_shortform_validate_ondisk,
- .xd_shortform_to_single = xfs_dir2_sf_to_block,
-};
-
-/*
- * Interface routines.
- */
-
-/*
- * Initialize directory-related fields in the mount structure.
- */
-static void
-xfs_dir2_mount(
- xfs_mount_t *mp) /* filesystem mount point */
+void
+xfs_dir_mount(
+ xfs_mount_t *mp)
{
- mp->m_dirversion = 2;
+ ASSERT(XFS_SB_VERSION_HASDIRV2(&mp->m_sb));
ASSERT((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) <=
XFS_MAX_BLOCKSIZE);
mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog);
@@ -128,19 +70,15 @@ xfs_dir2_mount(
/*
* Return 1 if directory contains only "." and "..".
*/
-static int /* return code */
-xfs_dir2_isempty(
- xfs_inode_t *dp) /* incore inode structure */
+int
+xfs_dir_isempty(
+ xfs_inode_t *dp)
{
- xfs_dir2_sf_t *sfp; /* shortform directory structure */
+ xfs_dir2_sf_t *sfp;
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
- /*
- * Might happen during shutdown.
- */
- if (dp->i_d.di_size == 0) {
+ if (dp->i_d.di_size == 0) /* might happen during shutdown. */
return 1;
- }
if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
return 0;
sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
@@ -148,53 +86,83 @@ xfs_dir2_isempty(
}
/*
+ * Validate a given inode number.
+ */
+int
+xfs_dir_ino_validate(
+ xfs_mount_t *mp,
+ xfs_ino_t ino)
+{
+ xfs_agblock_t agblkno;
+ xfs_agino_t agino;
+ xfs_agnumber_t agno;
+ int ino_ok;
+ int ioff;
+
+ agno = XFS_INO_TO_AGNO(mp, ino);
+ agblkno = XFS_INO_TO_AGBNO(mp, ino);
+ ioff = XFS_INO_TO_OFFSET(mp, ino);
+ agino = XFS_OFFBNO_TO_AGINO(mp, agblkno, ioff);
+ ino_ok =
+ agno < mp->m_sb.sb_agcount &&
+ agblkno < mp->m_sb.sb_agblocks &&
+ agblkno != 0 &&
+ ioff < (1 << mp->m_sb.sb_inopblog) &&
+ XFS_AGINO_TO_INO(mp, agno, agino) == ino;
+ if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE,
+ XFS_RANDOM_DIR_INO_VALIDATE))) {
+ xfs_fs_cmn_err(CE_WARN, mp, "Invalid inode number 0x%Lx",
+ (unsigned long long) ino);
+ XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+ return 0;
+}
+
+/*
* Initialize a directory with its "." and ".." entries.
*/
-static int /* error */
-xfs_dir2_init(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *dp, /* incore directory inode */
- xfs_inode_t *pdp) /* incore parent directory inode */
+int
+xfs_dir_init(
+ xfs_trans_t *tp,
+ xfs_inode_t *dp,
+ xfs_inode_t *pdp)
{
- xfs_da_args_t args; /* operation arguments */
- int error; /* error return value */
+ xfs_da_args_t args;
+ int error;
memset((char *)&args, 0, sizeof(args));
args.dp = dp;
args.trans = tp;
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
- if ((error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino))) {
+ if ((error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino)))
return error;
- }
return xfs_dir2_sf_create(&args, pdp->i_ino);
}
/*
Enter a name in a directory.
*/
-static int /* error */
-xfs_dir2_createname(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *dp, /* incore directory inode */
- char *name, /* new entry name */
- int namelen, /* new entry name length */
+int
+xfs_dir_createname(
+ xfs_trans_t *tp,
+ xfs_inode_t *dp,
+ char *name,
+ int namelen,
xfs_ino_t inum, /* new entry inode number */
xfs_fsblock_t *first, /* bmap's firstblock */
xfs_bmap_free_t *flist, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
- xfs_da_args_t args; /* operation arguments */
- int rval; /* return value */
+ xfs_da_args_t args;
+ int rval;
int v; /* type-checking value */
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
- if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) {
+ if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum)))
return rval;
- }
XFS_STATS_INC(xs_dir_create);
- /*
- * Fill in the arg structure for this request.
- */
+
args.name = name;
args.namelen = namelen;
args.hashval = xfs_da_hashname(name, namelen);
@@ -207,18 +175,16 @@ xfs_dir2_createname(
args.trans = tp;
args.justcheck = 0;
args.addname = args.oknoent = 1;
- /*
- * Decide on what work routines to call based on the inode size.
- */
+
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_addname(&args);
- else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isblock(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_block_addname(&args);
- else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isleaf(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_leaf_addname(&args);
else
rval = xfs_dir2_node_addname(&args);
@@ -228,24 +194,21 @@ xfs_dir2_createname(
/*
* Lookup a name in a directory, give back the inode number.
*/
-static int /* error */
-xfs_dir2_lookup(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *dp, /* incore directory inode */
- char *name, /* lookup name */
- int namelen, /* lookup name length */
+int
+xfs_dir_lookup(
+ xfs_trans_t *tp,
+ xfs_inode_t *dp,
+ char *name,
+ int namelen,
xfs_ino_t *inum) /* out: inode number */
{
- xfs_da_args_t args; /* operation arguments */
- int rval; /* return value */
+ xfs_da_args_t args;
+ int rval;
int v; /* type-checking value */
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
XFS_STATS_INC(xs_dir_lookup);
- /*
- * Fill in the arg structure for this request.
- */
args.name = name;
args.namelen = namelen;
args.hashval = xfs_da_hashname(name, namelen);
@@ -258,18 +221,16 @@ xfs_dir2_lookup(
args.trans = tp;
args.justcheck = args.addname = 0;
args.oknoent = 1;
- /*
- * Decide on what work routines to call based on the inode size.
- */
+
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_lookup(&args);
- else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isblock(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_block_lookup(&args);
- else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isleaf(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_leaf_lookup(&args);
else
rval = xfs_dir2_node_lookup(&args);
@@ -283,26 +244,24 @@ xfs_dir2_lookup(
/*
* Remove an entry from a directory.
*/
-static int /* error */
-xfs_dir2_removename(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *dp, /* incore directory inode */
- char *name, /* name of entry to remove */
- int namelen, /* name length of entry to remove */
- xfs_ino_t ino, /* inode number of entry to remove */
+int
+xfs_dir_removename(
+ xfs_trans_t *tp,
+ xfs_inode_t *dp,
+ char *name,
+ int namelen,
+ xfs_ino_t ino,
xfs_fsblock_t *first, /* bmap's firstblock */
xfs_bmap_free_t *flist, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
- xfs_da_args_t args; /* operation arguments */
- int rval; /* return value */
+ xfs_da_args_t args;
+ int rval;
int v; /* type-checking value */
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
XFS_STATS_INC(xs_dir_remove);
- /*
- * Fill in the arg structure for this request.
- */
+
args.name = name;
args.namelen = namelen;
args.hashval = xfs_da_hashname(name, namelen);
@@ -314,18 +273,16 @@ xfs_dir2_removename(
args.whichfork = XFS_DATA_FORK;
args.trans = tp;
args.justcheck = args.addname = args.oknoent = 0;
- /*
- * Decide on what work routines to call based on the inode size.
- */
+
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_removename(&args);
- else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isblock(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_block_removename(&args);
- else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isleaf(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_leaf_removename(&args);
else
rval = xfs_dir2_node_removename(&args);
@@ -335,10 +292,10 @@ xfs_dir2_removename(
/*
* Read a directory.
*/
-static int /* error */
-xfs_dir2_getdents(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *dp, /* incore directory inode */
+int
+xfs_dir_getdents(
+ xfs_trans_t *tp,
+ xfs_inode_t *dp,
uio_t *uio, /* caller's buffer control */
int *eofp) /* out: eof reached */
{
@@ -367,14 +324,11 @@ xfs_dir2_getdents(
}
*eofp = 0;
- /*
- * Decide on what work routines to call based on the inode size.
- */
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_getdents(dp, uio, eofp, dbp, put);
- else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isblock(tp, dp, &v)))
;
- } else if (v)
+ else if (v)
rval = xfs_dir2_block_getdents(tp, dp, uio, eofp, dbp, put);
else
rval = xfs_dir2_leaf_getdents(tp, dp, uio, eofp, dbp, put);
@@ -386,29 +340,26 @@ xfs_dir2_getdents(
/*
* Replace the inode number of a directory entry.
*/
-static int /* error */
-xfs_dir2_replace(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *dp, /* incore directory inode */
+int
+xfs_dir_replace(
+ xfs_trans_t *tp,
+ xfs_inode_t *dp,
char *name, /* name of entry to replace */
- int namelen, /* name length of entry to replace */
+ int namelen,
xfs_ino_t inum, /* new inode number */
xfs_fsblock_t *first, /* bmap's firstblock */
xfs_bmap_free_t *flist, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
- xfs_da_args_t args; /* operation arguments */
- int rval; /* return value */
+ xfs_da_args_t args;
+ int rval;
int v; /* type-checking value */
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
- if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) {
+ if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum)))
return rval;
- }
- /*
- * Fill in the arg structure for this request.
- */
+
args.name = name;
args.namelen = namelen;
args.hashval = xfs_da_hashname(name, namelen);
@@ -420,18 +371,16 @@ xfs_dir2_replace(
args.whichfork = XFS_DATA_FORK;
args.trans = tp;
args.justcheck = args.addname = args.oknoent = 0;
- /*
- * Decide on what work routines to call based on the inode size.
- */
+
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_replace(&args);
- else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isblock(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_block_replace(&args);
- else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isleaf(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_leaf_replace(&args);
else
rval = xfs_dir2_node_replace(&args);
@@ -441,21 +390,19 @@ xfs_dir2_replace(
/*
* See if this entry can be added to the directory without allocating space.
*/
-static int /* error */
-xfs_dir2_canenter(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *dp, /* incore directory inode */
+int
+xfs_dir_canenter(
+ xfs_trans_t *tp,
+ xfs_inode_t *dp,
char *name, /* name of entry to add */
- int namelen) /* name length of entry to add */
+ int namelen)
{
- xfs_da_args_t args; /* operation arguments */
- int rval; /* return value */
+ xfs_da_args_t args;
+ int rval;
int v; /* type-checking value */
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
- /*
- * Fill in the arg structure for this request.
- */
+
args.name = name;
args.namelen = namelen;
args.hashval = xfs_da_hashname(name, namelen);
@@ -467,18 +414,16 @@ xfs_dir2_canenter(
args.whichfork = XFS_DATA_FORK;
args.trans = tp;
args.justcheck = args.addname = args.oknoent = 1;
- /*
- * Decide on what work routines to call based on the inode size.
- */
+
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_addname(&args);
- else if ((rval = xfs_dir2_isblock(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isblock(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_block_addname(&args);
- else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) {
+ else if ((rval = xfs_dir2_isleaf(tp, dp, &v)))
return rval;
- } else if (v)
+ else if (v)
rval = xfs_dir2_leaf_addname(&args);
else
rval = xfs_dir2_node_addname(&args);
@@ -486,19 +431,6 @@ xfs_dir2_canenter(
}
/*
- * Dummy routine for shortform inode validation.
- * Can't really do this.
- */
-/* ARGSUSED */
-static int /* error */
-xfs_dir2_shortform_validate_ondisk(
- xfs_mount_t *mp, /* filesystem mount point */
- xfs_dinode_t *dip) /* ondisk inode */
-{
- return 0;
-}
-
-/*
* Utility routines.
*/
@@ -507,24 +439,24 @@ xfs_dir2_shortform_validate_ondisk(
* This routine is for data and free blocks, not leaf/node blocks
* which are handled by xfs_da_grow_inode.
*/
-int /* error */
+int
xfs_dir2_grow_inode(
- xfs_da_args_t *args, /* operation arguments */
+ xfs_da_args_t *args,
int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */
xfs_dir2_db_t *dbp) /* out: block number added */
{
xfs_fileoff_t bno; /* directory offset of new block */
int count; /* count of filesystem blocks */
xfs_inode_t *dp; /* incore directory inode */
- int error; /* error return value */
+ int error;
int got; /* blocks actually mapped */
- int i; /* temp mapping index */
+ int i;
xfs_bmbt_irec_t map; /* single structure for bmap */
int mapi; /* mapping index */
xfs_bmbt_irec_t *mapp; /* bmap mapping structure(s) */
- xfs_mount_t *mp; /* filesystem mount point */
+ xfs_mount_t *mp;
int nmap; /* number of bmap entries */
- xfs_trans_t *tp; /* transaction pointer */
+ xfs_trans_t *tp;
xfs_dir2_trace_args_s("grow_inode", args, space);
dp = args->dp;
@@ -538,9 +470,8 @@ xfs_dir2_grow_inode(
/*
* Find the first hole for our block.
*/
- if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, XFS_DATA_FORK))) {
+ if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, XFS_DATA_FORK)))
return error;
- }
nmap = 1;
ASSERT(args->firstblock != NULL);
/*
@@ -549,13 +480,9 @@ xfs_dir2_grow_inode(
if ((error = xfs_bmapi(tp, dp, bno, count,
XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
- args->flist))) {
+ args->flist, NULL)))
return error;
- }
ASSERT(nmap <= 1);
- /*
- * Got it in 1.
- */
if (nmap == 1) {
mapp = &map;
mapi = 1;
@@ -585,7 +512,8 @@ xfs_dir2_grow_inode(
if ((error = xfs_bmapi(tp, dp, b, c,
XFS_BMAPI_WRITE|XFS_BMAPI_METADATA,
args->firstblock, args->total,
- &mapp[mapi], &nmap, args->flist))) {
+ &mapp[mapi], &nmap, args->flist,
+ NULL))) {
kmem_free(mapp, sizeof(*mapp) * count);
return error;
}
@@ -645,20 +573,19 @@ xfs_dir2_grow_inode(
/*
* See if the directory is a single-block form directory.
*/
-int /* error */
+int
xfs_dir2_isblock(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *dp, /* incore directory inode */
+ xfs_trans_t *tp,
+ xfs_inode_t *dp,
int *vp) /* out: 1 is block, 0 is not block */
{
xfs_fileoff_t last; /* last file offset */
- xfs_mount_t *mp; /* filesystem mount point */
- int rval; /* return value */
+ xfs_mount_t *mp;
+ int rval;
mp = dp->i_mount;
- if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK))) {
+ if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK)))
return rval;
- }
rval = XFS_FSB_TO_B(mp, last) == mp->m_dirblksize;
ASSERT(rval == 0 || dp->i_d.di_size == mp->m_dirblksize);
*vp = rval;
@@ -668,20 +595,19 @@ xfs_dir2_isblock(
/*
* See if the directory is a single-leaf form directory.
*/
-int /* error */
+int
xfs_dir2_isleaf(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *dp, /* incore directory inode */
+ xfs_trans_t *tp,
+ xfs_inode_t *dp,
int *vp) /* out: 1 is leaf, 0 is not leaf */
{
xfs_fileoff_t last; /* last file offset */
- xfs_mount_t *mp; /* filesystem mount point */
- int rval; /* return value */
+ xfs_mount_t *mp;
+ int rval;
mp = dp->i_mount;
- if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK))) {
+ if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK)))
return rval;
- }
*vp = last == mp->m_dirleafblk + (1 << mp->m_sb.sb_dirblklog);
return 0;
}
@@ -689,9 +615,9 @@ xfs_dir2_isleaf(
/*
* Getdents put routine for 64-bit ABI, direct form.
*/
-static int /* error */
+static int
xfs_dir2_put_dirent64_direct(
- xfs_dir2_put_args_t *pa) /* argument bundle */
+ xfs_dir2_put_args_t *pa)
{
xfs_dirent_t *idbp; /* dirent pointer */
iovec_t *iovp; /* io vector */
@@ -726,9 +652,9 @@ xfs_dir2_put_dirent64_direct(
/*
* Getdents put routine for 64-bit ABI, uio form.
*/
-static int /* error */
+static int
xfs_dir2_put_dirent64_uio(
- xfs_dir2_put_args_t *pa) /* argument bundle */
+ xfs_dir2_put_args_t *pa)
{
xfs_dirent_t *idbp; /* dirent pointer */
int namelen; /* entry name length */
@@ -764,17 +690,17 @@ xfs_dir2_put_dirent64_uio(
*/
int
xfs_dir2_shrink_inode(
- xfs_da_args_t *args, /* operation arguments */
- xfs_dir2_db_t db, /* directory block number */
- xfs_dabuf_t *bp) /* block's buffer */
+ xfs_da_args_t *args,
+ xfs_dir2_db_t db,
+ xfs_dabuf_t *bp)
{
xfs_fileoff_t bno; /* directory file offset */
xfs_dablk_t da; /* directory file offset */
int done; /* bunmap is finished */
- xfs_inode_t *dp; /* incore directory inode */
- int error; /* error return value */
- xfs_mount_t *mp; /* filesystem mount point */
- xfs_trans_t *tp; /* transaction pointer */
+ xfs_inode_t *dp;
+ int error;
+ xfs_mount_t *mp;
+ xfs_trans_t *tp;
xfs_dir2_trace_args_db("shrink_inode", args, db, bp);
dp = args->dp;
@@ -786,7 +712,7 @@ xfs_dir2_shrink_inode(
*/
if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs,
XFS_BMAPI_METADATA, 0, args->firstblock, args->flist,
- &done))) {
+ NULL, &done))) {
/*
* ENOSPC actually can happen if we're in a removename with
* no space reservation, and the resulting block removal
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 7dd364b1e03..86560b6f794 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -22,7 +22,9 @@ struct uio;
struct xfs_dabuf;
struct xfs_da_args;
struct xfs_dir2_put_args;
+struct xfs_bmap_free;
struct xfs_inode;
+struct xfs_mount;
struct xfs_trans;
/*
@@ -73,7 +75,35 @@ typedef struct xfs_dir2_put_args {
} xfs_dir2_put_args_t;
/*
- * Other interfaces used by the rest of the dir v2 code.
+ * Generic directory interface routines
+ */
+extern void xfs_dir_startup(void);
+extern void xfs_dir_mount(struct xfs_mount *mp);
+extern int xfs_dir_isempty(struct xfs_inode *dp);
+extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp,
+ struct xfs_inode *pdp);
+extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp,
+ char *name, int namelen, xfs_ino_t inum,
+ xfs_fsblock_t *first,
+ struct xfs_bmap_free *flist, xfs_extlen_t tot);
+extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
+ char *name, int namelen, xfs_ino_t *inum);
+extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
+ char *name, int namelen, xfs_ino_t ino,
+ xfs_fsblock_t *first,
+ struct xfs_bmap_free *flist, xfs_extlen_t tot);
+extern int xfs_dir_getdents(struct xfs_trans *tp, struct xfs_inode *dp,
+ uio_t *uio, int *eofp);
+extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
+ char *name, int namelen, xfs_ino_t inum,
+ xfs_fsblock_t *first,
+ struct xfs_bmap_free *flist, xfs_extlen_t tot);
+extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
+ char *name, int namelen);
+extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
+
+/*
+ * Utility routines for v2 directories.
*/
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
xfs_dir2_db_t *dbp);
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 972ded59547..9d7438bba30 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -22,19 +22,16 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
-#include "xfs_dir_leaf.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
@@ -51,6 +48,18 @@ static int xfs_dir2_block_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **bpp,
int *entno);
static int xfs_dir2_block_sort(const void *a, const void *b);
+static xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot;
+
+/*
+ * One-time startup routine called from xfs_init().
+ */
+void
+xfs_dir_startup(void)
+{
+ xfs_dir_hash_dot = xfs_da_hashname(".", 1);
+ xfs_dir_hash_dotdot = xfs_da_hashname("..", 2);
+}
+
/*
* Add an entry to a block directory.
*/
@@ -400,7 +409,7 @@ xfs_dir2_block_addname(
/*
* Create the new data entry.
*/
- INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+ dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, args->namelen);
tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
@@ -508,7 +517,7 @@ xfs_dir2_block_getdents(
p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
ptr - (char *)block);
- p.ino = INT_GET(dep->inumber, ARCH_CONVERT);
+ p.ino = be64_to_cpu(dep->inumber);
#if XFS_BIG_INUMS
p.ino += mp->m_inoadd;
#endif
@@ -626,7 +635,7 @@ xfs_dir2_block_lookup(
/*
* Fill in inode number, release the block.
*/
- args->inumber = INT_GET(dep->inumber, ARCH_CONVERT);
+ args->inumber = be64_to_cpu(dep->inumber);
xfs_da_brelse(args->trans, bp);
return XFS_ERROR(EEXIST);
}
@@ -844,11 +853,11 @@ xfs_dir2_block_replace(
*/
dep = (xfs_dir2_data_entry_t *)
((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address)));
- ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) != args->inumber);
+ ASSERT(be64_to_cpu(dep->inumber) != args->inumber);
/*
* Change the inode number to the new value.
*/
- INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+ dep->inumber = cpu_to_be64(args->inumber);
xfs_dir2_data_log_entry(args->trans, bp, dep);
xfs_dir2_data_check(dp, bp);
xfs_da_buf_done(bp);
@@ -1130,7 +1139,7 @@ xfs_dir2_sf_to_block(
*/
dep = (xfs_dir2_data_entry_t *)
((char *)block + XFS_DIR2_DATA_DOT_OFFSET);
- INT_SET(dep->inumber, ARCH_CONVERT, dp->i_ino);
+ dep->inumber = cpu_to_be64(dp->i_ino);
dep->namelen = 1;
dep->name[0] = '.';
tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
@@ -1144,7 +1153,7 @@ xfs_dir2_sf_to_block(
*/
dep = (xfs_dir2_data_entry_t *)
((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET);
- INT_SET(dep->inumber, ARCH_CONVERT, XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent));
+ dep->inumber = cpu_to_be64(XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent));
dep->namelen = 2;
dep->name[0] = dep->name[1] = '.';
tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
@@ -1193,7 +1202,7 @@ xfs_dir2_sf_to_block(
* Copy a real entry.
*/
dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset);
- INT_SET(dep->inumber, ARCH_CONVERT, XFS_DIR2_SF_GET_INUMBER(sfp,
+ dep->inumber = cpu_to_be64(XFS_DIR2_SF_GET_INUMBER(sfp,
XFS_DIR2_SF_INUMBERP(sfep)));
dep->namelen = sfep->namelen;
memcpy(dep->name, sfep->name, dep->namelen);
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index bb3d03ff002..f7c79921707 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -22,18 +22,15 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_dir_leaf.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
@@ -133,7 +130,7 @@ xfs_dir2_data_check(
*/
dep = (xfs_dir2_data_entry_t *)p;
ASSERT(dep->namelen != 0);
- ASSERT(xfs_dir_ino_validate(mp, INT_GET(dep->inumber, ARCH_CONVERT)) == 0);
+ ASSERT(xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)) == 0);
ASSERT(be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep)) ==
(char *)dep - (char *)d);
count++;
diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h
index 0847cbb53e1..a6ae2d21c40 100644
--- a/fs/xfs/xfs_dir2_data.h
+++ b/fs/xfs/xfs_dir2_data.h
@@ -85,11 +85,11 @@ typedef struct xfs_dir2_data_hdr {
* Tag appears as the last 2 bytes.
*/
typedef struct xfs_dir2_data_entry {
- xfs_ino_t inumber; /* inode number */
- __uint8_t namelen; /* name length */
- __uint8_t name[1]; /* name bytes, no null */
+ __be64 inumber; /* inode number */
+ __u8 namelen; /* name length */
+ __u8 name[1]; /* name bytes, no null */
/* variable offset */
- xfs_dir2_data_off_t tag; /* starting offset of us */
+ __be16 tag; /* starting offset of us */
} xfs_dir2_data_entry_t;
/*
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 0f5e2f2ce6e..b1cf1fbf423 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_attr_sf.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@@ -407,7 +405,7 @@ xfs_dir2_leaf_addname(
* Initialize our new entry (at last).
*/
dep = (xfs_dir2_data_entry_t *)dup;
- INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+ dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
@@ -884,7 +882,7 @@ xfs_dir2_leaf_getdents(
XFS_DIR2_BYTE_TO_DA(mp,
XFS_DIR2_LEAF_OFFSET) - map_off,
XFS_BMAPI_METADATA, NULL, 0,
- &map[map_valid], &nmap, NULL);
+ &map[map_valid], &nmap, NULL, NULL);
/*
* Don't know if we should ignore this or
* try to return an error.
@@ -1098,7 +1096,7 @@ xfs_dir2_leaf_getdents(
p->cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length);
- p->ino = INT_GET(dep->inumber, ARCH_CONVERT);
+ p->ino = be64_to_cpu(dep->inumber);
#if XFS_BIG_INUMS
p->ino += mp->m_inoadd;
#endif
@@ -1319,7 +1317,7 @@ xfs_dir2_leaf_lookup(
/*
* Return the found inode number.
*/
- args->inumber = INT_GET(dep->inumber, ARCH_CONVERT);
+ args->inumber = be64_to_cpu(dep->inumber);
xfs_da_brelse(tp, dbp);
xfs_da_brelse(tp, lbp);
return XFS_ERROR(EEXIST);
@@ -1606,11 +1604,11 @@ xfs_dir2_leaf_replace(
dep = (xfs_dir2_data_entry_t *)
((char *)dbp->data +
XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address)));
- ASSERT(args->inumber != INT_GET(dep->inumber, ARCH_CONVERT));
+ ASSERT(args->inumber != be64_to_cpu(dep->inumber));
/*
* Put the new inode number in, log it.
*/
- INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+ dep->inumber = cpu_to_be64(args->inumber);
tp = args->trans;
xfs_dir2_data_log_entry(tp, dbp, dep);
xfs_da_buf_done(dbp);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index ac511ab9c52..9ca71719b68 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -22,13 +22,11 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -505,7 +503,6 @@ xfs_dir2_leafn_lookup_int(
XFS_DATA_FORK))) {
return error;
}
- curfdb = newfdb;
free = curbp->data;
ASSERT(be32_to_cpu(free->hdr.magic) ==
XFS_DIR2_FREE_MAGIC);
@@ -527,8 +524,11 @@ xfs_dir2_leafn_lookup_int(
if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) {
XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
XFS_ERRLEVEL_LOW, mp);
+ if (curfdb != newfdb)
+ xfs_da_brelse(tp, curbp);
return XFS_ERROR(EFSCORRUPTED);
}
+ curfdb = newfdb;
if (be16_to_cpu(free->bests[fi]) >= length) {
*indexp = index;
state->extravalid = 1;
@@ -580,7 +580,7 @@ xfs_dir2_leafn_lookup_int(
if (dep->namelen == args->namelen &&
dep->name[0] == args->name[0] &&
memcmp(dep->name, args->name, args->namelen) == 0) {
- args->inumber = INT_GET(dep->inumber, ARCH_CONVERT);
+ args->inumber = be64_to_cpu(dep->inumber);
*indexp = index;
state->extravalid = 1;
state->extrablk.bp = curbp;
@@ -970,7 +970,7 @@ xfs_dir2_leafn_remove(
/*
* One less used entry in the free table.
*/
- free->hdr.nused = cpu_to_be32(-1);
+ be32_add(&free->hdr.nused, -1);
xfs_dir2_free_log_header(tp, fbp);
/*
* If this was the last entry in the table, we can
@@ -1695,7 +1695,7 @@ xfs_dir2_node_addname_int(
* Fill in the new entry and log it.
*/
dep = (xfs_dir2_data_entry_t *)dup;
- INT_SET(dep->inumber, ARCH_CONVERT, args->inumber);
+ dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
@@ -1905,11 +1905,11 @@ xfs_dir2_node_replace(
dep = (xfs_dir2_data_entry_t *)
((char *)data +
XFS_DIR2_DATAPTR_TO_OFF(state->mp, be32_to_cpu(lep->address)));
- ASSERT(inum != INT_GET(dep->inumber, ARCH_CONVERT));
+ ASSERT(inum != be64_to_cpu(dep->inumber));
/*
* Fill in the new inode number and log the entry.
*/
- INT_SET(dep->inumber, ARCH_CONVERT, inum);
+ dep->inumber = cpu_to_be64(inum);
xfs_dir2_data_log_entry(args->trans, state->extrablk.bp, dep);
rval = 0;
}
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index d98a41d1fe6..0cd77b17bf9 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -22,19 +22,16 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
-#include "xfs_dir_leaf.h"
#include "xfs_error.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
@@ -117,13 +114,13 @@ xfs_dir2_block_sfsize(
dep->name[0] == '.' && dep->name[1] == '.';
#if XFS_BIG_INUMS
if (!isdot)
- i8count += INT_GET(dep->inumber, ARCH_CONVERT) > XFS_DIR2_MAX_SHORT_INUM;
+ i8count += be64_to_cpu(dep->inumber) > XFS_DIR2_MAX_SHORT_INUM;
#endif
if (!isdot && !isdotdot) {
count++;
namelen += dep->namelen;
} else if (isdotdot)
- parent = INT_GET(dep->inumber, ARCH_CONVERT);
+ parent = be64_to_cpu(dep->inumber);
/*
* Calculate the new size, see if we should give up yet.
*/
@@ -229,13 +226,13 @@ xfs_dir2_block_to_sf(
* Skip .
*/
if (dep->namelen == 1 && dep->name[0] == '.')
- ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) == dp->i_ino);
+ ASSERT(be64_to_cpu(dep->inumber) == dp->i_ino);
/*
* Skip .., but make sure the inode number is right.
*/
else if (dep->namelen == 2 &&
dep->name[0] == '.' && dep->name[1] == '.')
- ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) ==
+ ASSERT(be64_to_cpu(dep->inumber) ==
XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent));
/*
* Normal entry, copy it into shortform.
@@ -246,7 +243,7 @@ xfs_dir2_block_to_sf(
(xfs_dir2_data_aoff_t)
((char *)dep - (char *)block));
memcpy(sfep->name, dep->name, dep->namelen);
- temp=INT_GET(dep->inumber, ARCH_CONVERT);
+ temp = be64_to_cpu(dep->inumber);
XFS_DIR2_SF_PUT_INUMBER(sfp, &temp,
XFS_DIR2_SF_INUMBERP(sfep));
sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep);
diff --git a/fs/xfs/xfs_dir2_trace.c b/fs/xfs/xfs_dir2_trace.c
index c626943b411..f3fb2ffd6f5 100644
--- a/fs/xfs/xfs_dir2_trace.c
+++ b/fs/xfs/xfs_dir2_trace.c
@@ -19,11 +19,9 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_inum.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
diff --git a/fs/xfs/xfs_dir_leaf.c b/fs/xfs/xfs_dir_leaf.c
deleted file mode 100644
index 6d711869262..00000000000
--- a/fs/xfs/xfs_dir_leaf.c
+++ /dev/null
@@ -1,2213 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_dir.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
-#include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_bmap.h"
-#include "xfs_dir_leaf.h"
-#include "xfs_error.h"
-
-/*
- * xfs_dir_leaf.c
- *
- * Routines to implement leaf blocks of directories as Btrees of hashed names.
- */
-
-/*========================================================================
- * Function prototypes for the kernel.
- *========================================================================*/
-
-/*
- * Routines used for growing the Btree.
- */
-STATIC void xfs_dir_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args,
- int insertion_index,
- int freemap_index);
-STATIC int xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer,
- int musthave, int justcheck);
-STATIC void xfs_dir_leaf_rebalance(xfs_da_state_t *state,
- xfs_da_state_blk_t *blk1,
- xfs_da_state_blk_t *blk2);
-STATIC int xfs_dir_leaf_figure_balance(xfs_da_state_t *state,
- xfs_da_state_blk_t *leaf_blk_1,
- xfs_da_state_blk_t *leaf_blk_2,
- int *number_entries_in_blk1,
- int *number_namebytes_in_blk1);
-
-STATIC int xfs_dir_leaf_create(struct xfs_da_args *args,
- xfs_dablk_t which_block,
- struct xfs_dabuf **bpp);
-
-/*
- * Utility routines.
- */
-STATIC void xfs_dir_leaf_moveents(xfs_dir_leafblock_t *src_leaf,
- int src_start,
- xfs_dir_leafblock_t *dst_leaf,
- int dst_start, int move_count,
- xfs_mount_t *mp);
-
-
-/*========================================================================
- * External routines when dirsize < XFS_IFORK_DSIZE(dp).
- *========================================================================*/
-
-
-/*
- * Validate a given inode number.
- */
-int
-xfs_dir_ino_validate(xfs_mount_t *mp, xfs_ino_t ino)
-{
- xfs_agblock_t agblkno;
- xfs_agino_t agino;
- xfs_agnumber_t agno;
- int ino_ok;
- int ioff;
-
- agno = XFS_INO_TO_AGNO(mp, ino);
- agblkno = XFS_INO_TO_AGBNO(mp, ino);
- ioff = XFS_INO_TO_OFFSET(mp, ino);
- agino = XFS_OFFBNO_TO_AGINO(mp, agblkno, ioff);
- ino_ok =
- agno < mp->m_sb.sb_agcount &&
- agblkno < mp->m_sb.sb_agblocks &&
- agblkno != 0 &&
- ioff < (1 << mp->m_sb.sb_inopblog) &&
- XFS_AGINO_TO_INO(mp, agno, agino) == ino;
- if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE,
- XFS_RANDOM_DIR_INO_VALIDATE))) {
- xfs_fs_cmn_err(CE_WARN, mp, "Invalid inode number 0x%Lx",
- (unsigned long long) ino);
- XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
- return 0;
-}
-
-/*
- * Create the initial contents of a shortform directory.
- */
-int
-xfs_dir_shortform_create(xfs_da_args_t *args, xfs_ino_t parent)
-{
- xfs_dir_sf_hdr_t *hdr;
- xfs_inode_t *dp;
-
- dp = args->dp;
- ASSERT(dp != NULL);
- ASSERT(dp->i_d.di_size == 0);
- if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) {
- dp->i_df.if_flags &= ~XFS_IFEXTENTS; /* just in case */
- dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
- xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
- dp->i_df.if_flags |= XFS_IFINLINE;
- }
- ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- ASSERT(dp->i_df.if_bytes == 0);
- xfs_idata_realloc(dp, sizeof(*hdr), XFS_DATA_FORK);
- hdr = (xfs_dir_sf_hdr_t *)dp->i_df.if_u1.if_data;
- XFS_DIR_SF_PUT_DIRINO(&parent, &hdr->parent);
-
- hdr->count = 0;
- dp->i_d.di_size = sizeof(*hdr);
- xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
- return 0;
-}
-
-/*
- * Add a name to the shortform directory structure.
- * Overflow from the inode has already been checked for.
- */
-int
-xfs_dir_shortform_addname(xfs_da_args_t *args)
-{
- xfs_dir_shortform_t *sf;
- xfs_dir_sf_entry_t *sfe;
- int i, offset, size;
- xfs_inode_t *dp;
-
- dp = args->dp;
- ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Catch the case where the conversion from shortform to leaf
- * failed part way through.
- */
- if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return XFS_ERROR(EIO);
- }
- ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
- ASSERT(dp->i_df.if_u1.if_data != NULL);
- sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
- sfe = &sf->list[0];
- for (i = sf->hdr.count-1; i >= 0; i--) {
- if (sfe->namelen == args->namelen &&
- args->name[0] == sfe->name[0] &&
- memcmp(args->name, sfe->name, args->namelen) == 0)
- return XFS_ERROR(EEXIST);
- sfe = XFS_DIR_SF_NEXTENTRY(sfe);
- }
-
- offset = (int)((char *)sfe - (char *)sf);
- size = XFS_DIR_SF_ENTSIZE_BYNAME(args->namelen);
- xfs_idata_realloc(dp, size, XFS_DATA_FORK);
- sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
- sfe = (xfs_dir_sf_entry_t *)((char *)sf + offset);
-
- XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber);
- sfe->namelen = args->namelen;
- memcpy(sfe->name, args->name, sfe->namelen);
- sf->hdr.count++;
-
- dp->i_d.di_size += size;
- xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
-
- return 0;
-}
-
-/*
- * Remove a name from the shortform directory structure.
- */
-int
-xfs_dir_shortform_removename(xfs_da_args_t *args)
-{
- xfs_dir_shortform_t *sf;
- xfs_dir_sf_entry_t *sfe;
- int base, size = 0, i;
- xfs_inode_t *dp;
-
- dp = args->dp;
- ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Catch the case where the conversion from shortform to leaf
- * failed part way through.
- */
- if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return XFS_ERROR(EIO);
- }
- ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
- ASSERT(dp->i_df.if_u1.if_data != NULL);
- base = sizeof(xfs_dir_sf_hdr_t);
- sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
- sfe = &sf->list[0];
- for (i = sf->hdr.count-1; i >= 0; i--) {
- size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe);
- if (sfe->namelen == args->namelen &&
- sfe->name[0] == args->name[0] &&
- memcmp(sfe->name, args->name, args->namelen) == 0)
- break;
- base += size;
- sfe = XFS_DIR_SF_NEXTENTRY(sfe);
- }
- if (i < 0) {
- ASSERT(args->oknoent);
- return XFS_ERROR(ENOENT);
- }
-
- if ((base + size) != dp->i_d.di_size) {
- memmove(&((char *)sf)[base], &((char *)sf)[base+size],
- dp->i_d.di_size - (base+size));
- }
- sf->hdr.count--;
-
- xfs_idata_realloc(dp, -size, XFS_DATA_FORK);
- dp->i_d.di_size -= size;
- xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
-
- return 0;
-}
-
-/*
- * Look up a name in a shortform directory structure.
- */
-int
-xfs_dir_shortform_lookup(xfs_da_args_t *args)
-{
- xfs_dir_shortform_t *sf;
- xfs_dir_sf_entry_t *sfe;
- int i;
- xfs_inode_t *dp;
-
- dp = args->dp;
- ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Catch the case where the conversion from shortform to leaf
- * failed part way through.
- */
- if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return XFS_ERROR(EIO);
- }
- ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
- ASSERT(dp->i_df.if_u1.if_data != NULL);
- sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
- if (args->namelen == 2 &&
- args->name[0] == '.' && args->name[1] == '.') {
- XFS_DIR_SF_GET_DIRINO(&sf->hdr.parent, &args->inumber);
- return(XFS_ERROR(EEXIST));
- }
- if (args->namelen == 1 && args->name[0] == '.') {
- args->inumber = dp->i_ino;
- return(XFS_ERROR(EEXIST));
- }
- sfe = &sf->list[0];
- for (i = sf->hdr.count-1; i >= 0; i--) {
- if (sfe->namelen == args->namelen &&
- sfe->name[0] == args->name[0] &&
- memcmp(args->name, sfe->name, args->namelen) == 0) {
- XFS_DIR_SF_GET_DIRINO(&sfe->inumber, &args->inumber);
- return(XFS_ERROR(EEXIST));
- }
- sfe = XFS_DIR_SF_NEXTENTRY(sfe);
- }
- ASSERT(args->oknoent);
- return(XFS_ERROR(ENOENT));
-}
-
-/*
- * Convert from using the shortform to the leaf.
- */
-int
-xfs_dir_shortform_to_leaf(xfs_da_args_t *iargs)
-{
- xfs_inode_t *dp;
- xfs_dir_shortform_t *sf;
- xfs_dir_sf_entry_t *sfe;
- xfs_da_args_t args;
- xfs_ino_t inumber;
- char *tmpbuffer;
- int retval, i, size;
- xfs_dablk_t blkno;
- xfs_dabuf_t *bp;
-
- dp = iargs->dp;
- /*
- * Catch the case where the conversion from shortform to leaf
- * failed part way through.
- */
- if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return XFS_ERROR(EIO);
- }
- ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
- ASSERT(dp->i_df.if_u1.if_data != NULL);
- size = dp->i_df.if_bytes;
- tmpbuffer = kmem_alloc(size, KM_SLEEP);
- ASSERT(tmpbuffer != NULL);
-
- memcpy(tmpbuffer, dp->i_df.if_u1.if_data, size);
-
- sf = (xfs_dir_shortform_t *)tmpbuffer;
- XFS_DIR_SF_GET_DIRINO(&sf->hdr.parent, &inumber);
-
- xfs_idata_realloc(dp, -size, XFS_DATA_FORK);
- dp->i_d.di_size = 0;
- xfs_trans_log_inode(iargs->trans, dp, XFS_ILOG_CORE);
- retval = xfs_da_grow_inode(iargs, &blkno);
- if (retval)
- goto out;
-
- ASSERT(blkno == 0);
- retval = xfs_dir_leaf_create(iargs, blkno, &bp);
- if (retval)
- goto out;
- xfs_da_buf_done(bp);
-
- args.name = ".";
- args.namelen = 1;
- args.hashval = xfs_dir_hash_dot;
- args.inumber = dp->i_ino;
- args.dp = dp;
- args.firstblock = iargs->firstblock;
- args.flist = iargs->flist;
- args.total = iargs->total;
- args.whichfork = XFS_DATA_FORK;
- args.trans = iargs->trans;
- args.justcheck = 0;
- args.addname = args.oknoent = 1;
- retval = xfs_dir_leaf_addname(&args);
- if (retval)
- goto out;
-
- args.name = "..";
- args.namelen = 2;
- args.hashval = xfs_dir_hash_dotdot;
- args.inumber = inumber;
- retval = xfs_dir_leaf_addname(&args);
- if (retval)
- goto out;
-
- sfe = &sf->list[0];
- for (i = 0; i < sf->hdr.count; i++) {
- args.name = (char *)(sfe->name);
- args.namelen = sfe->namelen;
- args.hashval = xfs_da_hashname((char *)(sfe->name),
- sfe->namelen);
- XFS_DIR_SF_GET_DIRINO(&sfe->inumber, &args.inumber);
- retval = xfs_dir_leaf_addname(&args);
- if (retval)
- goto out;
- sfe = XFS_DIR_SF_NEXTENTRY(sfe);
- }
- retval = 0;
-
-out:
- kmem_free(tmpbuffer, size);
- return retval;
-}
-
-STATIC int
-xfs_dir_shortform_compare(const void *a, const void *b)
-{
- xfs_dir_sf_sort_t *sa, *sb;
-
- sa = (xfs_dir_sf_sort_t *)a;
- sb = (xfs_dir_sf_sort_t *)b;
- if (sa->hash < sb->hash)
- return -1;
- else if (sa->hash > sb->hash)
- return 1;
- else
- return sa->entno - sb->entno;
-}
-
-/*
- * Copy out directory entries for getdents(), for shortform directories.
- */
-/*ARGSUSED*/
-int
-xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp,
- xfs_dirent_t *dbp, xfs_dir_put_t put)
-{
- xfs_dir_shortform_t *sf;
- xfs_dir_sf_entry_t *sfe;
- int retval, i, sbsize, nsbuf, lastresid=0, want_entno;
- xfs_mount_t *mp;
- xfs_dahash_t cookhash, hash;
- xfs_dir_put_args_t p;
- xfs_dir_sf_sort_t *sbuf, *sbp;
-
- mp = dp->i_mount;
- sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
- cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset);
- want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset);
- nsbuf = sf->hdr.count + 2;
- sbsize = (nsbuf + 1) * sizeof(*sbuf);
- sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
-
- xfs_dir_trace_g_du("sf: start", dp, uio);
-
- /*
- * Collect all the entries into the buffer.
- * Entry 0 is .
- */
- sbp->entno = 0;
- sbp->seqno = 0;
- sbp->hash = xfs_dir_hash_dot;
- sbp->ino = dp->i_ino;
- sbp->name = ".";
- sbp->namelen = 1;
- sbp++;
-
- /*
- * Entry 1 is ..
- */
- sbp->entno = 1;
- sbp->seqno = 0;
- sbp->hash = xfs_dir_hash_dotdot;
- sbp->ino = XFS_GET_DIR_INO8(sf->hdr.parent);
- sbp->name = "..";
- sbp->namelen = 2;
- sbp++;
-
- /*
- * Scan the directory data for the rest of the entries.
- */
- for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
-
- if (unlikely(
- ((char *)sfe < (char *)sf) ||
- ((char *)sfe >= ((char *)sf + dp->i_df.if_bytes)))) {
- xfs_dir_trace_g_du("sf: corrupted", dp, uio);
- XFS_CORRUPTION_ERROR("xfs_dir_shortform_getdents",
- XFS_ERRLEVEL_LOW, mp, sfe);
- kmem_free(sbuf, sbsize);
- return XFS_ERROR(EFSCORRUPTED);
- }
-
- sbp->entno = i + 2;
- sbp->seqno = 0;
- sbp->hash = xfs_da_hashname((char *)sfe->name, sfe->namelen);
- sbp->ino = XFS_GET_DIR_INO8(sfe->inumber);
- sbp->name = (char *)sfe->name;
- sbp->namelen = sfe->namelen;
- sfe = XFS_DIR_SF_NEXTENTRY(sfe);
- sbp++;
- }
-
- /*
- * Sort the entries on hash then entno.
- */
- xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_dir_shortform_compare);
- /*
- * Stuff in last entry.
- */
- sbp->entno = nsbuf;
- sbp->hash = XFS_DA_MAXHASH;
- sbp->seqno = 0;
- /*
- * Figure out the sequence numbers in case there's a hash duplicate.
- */
- for (hash = sbuf->hash, sbp = sbuf + 1;
- sbp < &sbuf[nsbuf + 1]; sbp++) {
- if (sbp->hash == hash)
- sbp->seqno = sbp[-1].seqno + 1;
- else
- hash = sbp->hash;
- }
-
- /*
- * Set up put routine.
- */
- p.dbp = dbp;
- p.put = put;
- p.uio = uio;
-
- /*
- * Find our place.
- */
- for (sbp = sbuf; sbp < &sbuf[nsbuf + 1]; sbp++) {
- if (sbp->hash > cookhash ||
- (sbp->hash == cookhash && sbp->seqno >= want_entno))
- break;
- }
-
- /*
- * Did we fail to find anything? We stop at the last entry,
- * the one we put maxhash into.
- */
- if (sbp == &sbuf[nsbuf]) {
- kmem_free(sbuf, sbsize);
- xfs_dir_trace_g_du("sf: hash beyond end", dp, uio);
- uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH);
- *eofp = 1;
- return 0;
- }
-
- /*
- * Loop putting entries into the user buffer.
- */
- while (sbp < &sbuf[nsbuf]) {
- /*
- * Save the first resid in a run of equal-hashval entries
- * so that we can back them out if they don't all fit.
- */
- if (sbp->seqno == 0 || sbp == sbuf)
- lastresid = uio->uio_resid;
- XFS_PUT_COOKIE(p.cook, mp, 0, sbp[1].seqno, sbp[1].hash);
- p.ino = sbp->ino;
-#if XFS_BIG_INUMS
- p.ino += mp->m_inoadd;
-#endif
- p.name = sbp->name;
- p.namelen = sbp->namelen;
- retval = p.put(&p);
- if (!p.done) {
- uio->uio_offset =
- XFS_DA_MAKE_COOKIE(mp, 0, 0, sbp->hash);
- kmem_free(sbuf, sbsize);
- uio->uio_resid = lastresid;
- xfs_dir_trace_g_du("sf: E-O-B", dp, uio);
- return retval;
- }
- sbp++;
- }
- kmem_free(sbuf, sbsize);
- uio->uio_offset = p.cook.o;
- *eofp = 1;
- xfs_dir_trace_g_du("sf: E-O-F", dp, uio);
- return 0;
-}
-
-/*
- * Look up a name in a shortform directory structure, replace the inode number.
- */
-int
-xfs_dir_shortform_replace(xfs_da_args_t *args)
-{
- xfs_dir_shortform_t *sf;
- xfs_dir_sf_entry_t *sfe;
- xfs_inode_t *dp;
- int i;
-
- dp = args->dp;
- ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Catch the case where the conversion from shortform to leaf
- * failed part way through.
- */
- if (dp->i_d.di_size < sizeof(xfs_dir_sf_hdr_t)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return XFS_ERROR(EIO);
- }
- ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
- ASSERT(dp->i_df.if_u1.if_data != NULL);
- sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
- if (args->namelen == 2 &&
- args->name[0] == '.' && args->name[1] == '.') {
- /* XXX - replace assert? */
- XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sf->hdr.parent);
- xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA);
- return 0;
- }
- ASSERT(args->namelen != 1 || args->name[0] != '.');
- sfe = &sf->list[0];
- for (i = sf->hdr.count-1; i >= 0; i--) {
- if (sfe->namelen == args->namelen &&
- sfe->name[0] == args->name[0] &&
- memcmp(args->name, sfe->name, args->namelen) == 0) {
- ASSERT(memcmp((char *)&args->inumber,
- (char *)&sfe->inumber, sizeof(xfs_ino_t)));
- XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber);
- xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA);
- return 0;
- }
- sfe = XFS_DIR_SF_NEXTENTRY(sfe);
- }
- ASSERT(args->oknoent);
- return XFS_ERROR(ENOENT);
-}
-
-/*
- * Convert a leaf directory to shortform structure
- */
-int
-xfs_dir_leaf_to_shortform(xfs_da_args_t *iargs)
-{
- xfs_dir_leafblock_t *leaf;
- xfs_dir_leaf_hdr_t *hdr;
- xfs_dir_leaf_entry_t *entry;
- xfs_dir_leaf_name_t *namest;
- xfs_da_args_t args;
- xfs_inode_t *dp;
- xfs_ino_t parent = 0;
- char *tmpbuffer;
- int retval, i;
- xfs_dabuf_t *bp;
-
- dp = iargs->dp;
- tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP);
- ASSERT(tmpbuffer != NULL);
-
- retval = xfs_da_read_buf(iargs->trans, iargs->dp, 0, -1, &bp,
- XFS_DATA_FORK);
- if (retval)
- goto out;
- ASSERT(bp != NULL);
- memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
- leaf = (xfs_dir_leafblock_t *)tmpbuffer;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
-
- /*
- * Find and special case the parent inode number
- */
- hdr = &leaf->hdr;
- entry = &leaf->entries[0];
- for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; i >= 0; entry++, i--) {
- namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
- if ((entry->namelen == 2) &&
- (namest->name[0] == '.') &&
- (namest->name[1] == '.')) {
- XFS_DIR_SF_GET_DIRINO(&namest->inumber, &parent);
- entry->nameidx = 0;
- } else if ((entry->namelen == 1) && (namest->name[0] == '.')) {
- entry->nameidx = 0;
- }
- }
- retval = xfs_da_shrink_inode(iargs, 0, bp);
- if (retval)
- goto out;
- retval = xfs_dir_shortform_create(iargs, parent);
- if (retval)
- goto out;
-
- /*
- * Copy the rest of the filenames
- */
- entry = &leaf->entries[0];
- args.dp = dp;
- args.firstblock = iargs->firstblock;
- args.flist = iargs->flist;
- args.total = iargs->total;
- args.whichfork = XFS_DATA_FORK;
- args.trans = iargs->trans;
- args.justcheck = 0;
- args.addname = args.oknoent = 1;
- for (i = 0; i < INT_GET(hdr->count, ARCH_CONVERT); entry++, i++) {
- if (!entry->nameidx)
- continue;
- namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
- args.name = (char *)(namest->name);
- args.namelen = entry->namelen;
- args.hashval = INT_GET(entry->hashval, ARCH_CONVERT);
- XFS_DIR_SF_GET_DIRINO(&namest->inumber, &args.inumber);
- xfs_dir_shortform_addname(&args);
- }
-
-out:
- kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount));
- return retval;
-}
-
-/*
- * Convert from using a single leaf to a root node and a leaf.
- */
-int
-xfs_dir_leaf_to_node(xfs_da_args_t *args)
-{
- xfs_dir_leafblock_t *leaf;
- xfs_da_intnode_t *node;
- xfs_inode_t *dp;
- xfs_dabuf_t *bp1, *bp2;
- xfs_dablk_t blkno;
- int retval;
-
- dp = args->dp;
- retval = xfs_da_grow_inode(args, &blkno);
- ASSERT(blkno == 1);
- if (retval)
- return retval;
- retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1,
- XFS_DATA_FORK);
- if (retval)
- return retval;
- ASSERT(bp1 != NULL);
- retval = xfs_da_get_buf(args->trans, args->dp, 1, -1, &bp2,
- XFS_DATA_FORK);
- if (retval) {
- xfs_da_buf_done(bp1);
- return retval;
- }
- ASSERT(bp2 != NULL);
- memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount));
- xfs_da_buf_done(bp1);
- xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
-
- /*
- * Set up the new root node.
- */
- retval = xfs_da_node_create(args, 0, 1, &bp1, XFS_DATA_FORK);
- if (retval) {
- xfs_da_buf_done(bp2);
- return retval;
- }
- node = bp1->data;
- leaf = bp2->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- node->btree[0].hashval = cpu_to_be32(
- INT_GET(leaf->entries[
- INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT));
- xfs_da_buf_done(bp2);
- node->btree[0].before = cpu_to_be32(blkno);
- node->hdr.count = cpu_to_be16(1);
- xfs_da_log_buf(args->trans, bp1,
- XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0])));
- xfs_da_buf_done(bp1);
-
- return retval;
-}
-
-
-/*========================================================================
- * Routines used for growing the Btree.
- *========================================================================*/
-
-/*
- * Create the initial contents of a leaf directory
- * or a leaf in a node directory.
- */
-STATIC int
-xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
-{
- xfs_dir_leafblock_t *leaf;
- xfs_dir_leaf_hdr_t *hdr;
- xfs_inode_t *dp;
- xfs_dabuf_t *bp;
- int retval;
-
- dp = args->dp;
- ASSERT(dp != NULL);
- retval = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp, XFS_DATA_FORK);
- if (retval)
- return retval;
- ASSERT(bp != NULL);
- leaf = bp->data;
- memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
- hdr = &leaf->hdr;
- hdr->info.magic = cpu_to_be16(XFS_DIR_LEAF_MAGIC);
- INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount));
- if (!hdr->firstused)
- INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - 1);
- INT_SET(hdr->freemap[0].base, ARCH_CONVERT, sizeof(xfs_dir_leaf_hdr_t));
- INT_SET(hdr->freemap[0].size, ARCH_CONVERT, INT_GET(hdr->firstused, ARCH_CONVERT) - INT_GET(hdr->freemap[0].base, ARCH_CONVERT));
-
- xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
-
- *bpp = bp;
- return 0;
-}
-
-/*
- * Split the leaf node, rebalance, then add the new entry.
- */
-int
-xfs_dir_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
- xfs_da_state_blk_t *newblk)
-{
- xfs_dablk_t blkno;
- xfs_da_args_t *args;
- int error;
-
- /*
- * Allocate space for a new leaf node.
- */
- args = state->args;
- ASSERT(args != NULL);
- ASSERT(oldblk->magic == XFS_DIR_LEAF_MAGIC);
- error = xfs_da_grow_inode(args, &blkno);
- if (error)
- return error;
- error = xfs_dir_leaf_create(args, blkno, &newblk->bp);
- if (error)
- return error;
- newblk->blkno = blkno;
- newblk->magic = XFS_DIR_LEAF_MAGIC;
-
- /*
- * Rebalance the entries across the two leaves.
- */
- xfs_dir_leaf_rebalance(state, oldblk, newblk);
- error = xfs_da_blk_link(state, oldblk, newblk);
- if (error)
- return error;
-
- /*
- * Insert the new entry in the correct block.
- */
- if (state->inleaf) {
- error = xfs_dir_leaf_add(oldblk->bp, args, oldblk->index);
- } else {
- error = xfs_dir_leaf_add(newblk->bp, args, newblk->index);
- }
-
- /*
- * Update last hashval in each block since we added the name.
- */
- oldblk->hashval = xfs_dir_leaf_lasthash(oldblk->bp, NULL);
- newblk->hashval = xfs_dir_leaf_lasthash(newblk->bp, NULL);
- return error;
-}
-
-/*
- * Add a name to the leaf directory structure.
- *
- * Must take into account fragmented leaves and leaves where spacemap has
- * lost some freespace information (ie: holes).
- */
-int
-xfs_dir_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index)
-{
- xfs_dir_leafblock_t *leaf;
- xfs_dir_leaf_hdr_t *hdr;
- xfs_dir_leaf_map_t *map;
- int tablesize, entsize, sum, i, tmp, error;
-
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- ASSERT((index >= 0) && (index <= INT_GET(leaf->hdr.count, ARCH_CONVERT)));
- hdr = &leaf->hdr;
- entsize = XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen);
-
- /*
- * Search through freemap for first-fit on new name length.
- * (may need to figure in size of entry struct too)
- */
- tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) * (uint)sizeof(xfs_dir_leaf_entry_t)
- + (uint)sizeof(xfs_dir_leaf_hdr_t);
- map = &hdr->freemap[XFS_DIR_LEAF_MAPSIZE-1];
- for (sum = 0, i = XFS_DIR_LEAF_MAPSIZE-1; i >= 0; map--, i--) {
- if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) {
- sum += INT_GET(map->size, ARCH_CONVERT);
- continue;
- }
- if (!map->size)
- continue; /* no space in this map */
- tmp = entsize;
- if (INT_GET(map->base, ARCH_CONVERT) < INT_GET(hdr->firstused, ARCH_CONVERT))
- tmp += (uint)sizeof(xfs_dir_leaf_entry_t);
- if (INT_GET(map->size, ARCH_CONVERT) >= tmp) {
- if (!args->justcheck)
- xfs_dir_leaf_add_work(bp, args, index, i);
- return 0;
- }
- sum += INT_GET(map->size, ARCH_CONVERT);
- }
-
- /*
- * If there are no holes in the address space of the block,
- * and we don't have enough freespace, then compaction will do us
- * no good and we should just give up.
- */
- if (!hdr->holes && (sum < entsize))
- return XFS_ERROR(ENOSPC);
-
- /*
- * Compact the entries to coalesce free space.
- * Pass the justcheck flag so the checking pass can return
- * an error, without changing anything, if it won't fit.
- */
- error = xfs_dir_leaf_compact(args->trans, bp,
- args->total == 0 ?
- entsize +
- (uint)sizeof(xfs_dir_leaf_entry_t) : 0,
- args->justcheck);
- if (error)
- return error;
- /*
- * After compaction, the block is guaranteed to have only one
- * free region, in freemap[0]. If it is not big enough, give up.
- */
- if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) <
- (entsize + (uint)sizeof(xfs_dir_leaf_entry_t)))
- return XFS_ERROR(ENOSPC);
-
- if (!args->justcheck)
- xfs_dir_leaf_add_work(bp, args, index, 0);
- return 0;
-}
-
-/*
- * Add a name to a leaf directory structure.
- */
-STATIC void
-xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index,
- int mapindex)
-{
- xfs_dir_leafblock_t *leaf;
- xfs_dir_leaf_hdr_t *hdr;
- xfs_dir_leaf_entry_t *entry;
- xfs_dir_leaf_name_t *namest;
- xfs_dir_leaf_map_t *map;
- /* REFERENCED */
- xfs_mount_t *mp;
- int tmp, i;
-
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- hdr = &leaf->hdr;
- ASSERT((mapindex >= 0) && (mapindex < XFS_DIR_LEAF_MAPSIZE));
- ASSERT((index >= 0) && (index <= INT_GET(hdr->count, ARCH_CONVERT)));
-
- /*
- * Force open some space in the entry array and fill it in.
- */
- entry = &leaf->entries[index];
- if (index < INT_GET(hdr->count, ARCH_CONVERT)) {
- tmp = INT_GET(hdr->count, ARCH_CONVERT) - index;
- tmp *= (uint)sizeof(xfs_dir_leaf_entry_t);
- memmove(entry + 1, entry, tmp);
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry)));
- }
- INT_MOD(hdr->count, ARCH_CONVERT, +1);
-
- /*
- * Allocate space for the new string (at the end of the run).
- */
- map = &hdr->freemap[mapindex];
- mp = args->trans->t_mountp;
- ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp));
- ASSERT(INT_GET(map->size, ARCH_CONVERT) >= XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen));
- ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp));
- INT_MOD(map->size, ARCH_CONVERT, -(XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen)));
- INT_SET(entry->nameidx, ARCH_CONVERT, INT_GET(map->base, ARCH_CONVERT) + INT_GET(map->size, ARCH_CONVERT));
- INT_SET(entry->hashval, ARCH_CONVERT, args->hashval);
- entry->namelen = args->namelen;
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
-
- /*
- * Copy the string and inode number into the new space.
- */
- namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
- XFS_DIR_SF_PUT_DIRINO(&args->inumber, &namest->inumber);
- memcpy(namest->name, args->name, args->namelen);
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, namest, XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry)));
-
- /*
- * Update the control info for this leaf node
- */
- if (INT_GET(entry->nameidx, ARCH_CONVERT) < INT_GET(hdr->firstused, ARCH_CONVERT))
- INT_COPY(hdr->firstused, entry->nameidx, ARCH_CONVERT);
- ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) >= ((INT_GET(hdr->count, ARCH_CONVERT)*sizeof(*entry))+sizeof(*hdr)));
- tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1) * (uint)sizeof(xfs_dir_leaf_entry_t)
- + (uint)sizeof(xfs_dir_leaf_hdr_t);
- map = &hdr->freemap[0];
- for (i = 0; i < XFS_DIR_LEAF_MAPSIZE; map++, i++) {
- if (INT_GET(map->base, ARCH_CONVERT) == tmp) {
- INT_MOD(map->base, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_entry_t));
- INT_MOD(map->size, ARCH_CONVERT, -((uint)sizeof(xfs_dir_leaf_entry_t)));
- }
- }
- INT_MOD(hdr->namebytes, ARCH_CONVERT, args->namelen);
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
-}
-
-/*
- * Garbage collect a leaf directory block by copying it to a new buffer.
- */
-STATIC int
-xfs_dir_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp, int musthave,
- int justcheck)
-{
- xfs_dir_leafblock_t *leaf_s, *leaf_d;
- xfs_dir_leaf_hdr_t *hdr_s, *hdr_d;
- xfs_mount_t *mp;
- char *tmpbuffer;
- char *tmpbuffer2=NULL;
- int rval;
- int lbsize;
-
- mp = trans->t_mountp;
- lbsize = XFS_LBSIZE(mp);
- tmpbuffer = kmem_alloc(lbsize, KM_SLEEP);
- ASSERT(tmpbuffer != NULL);
- memcpy(tmpbuffer, bp->data, lbsize);
-
- /*
- * Make a second copy in case xfs_dir_leaf_moveents()
- * below destroys the original.
- */
- if (musthave || justcheck) {
- tmpbuffer2 = kmem_alloc(lbsize, KM_SLEEP);
- memcpy(tmpbuffer2, bp->data, lbsize);
- }
- memset(bp->data, 0, lbsize);
-
- /*
- * Copy basic information
- */
- leaf_s = (xfs_dir_leafblock_t *)tmpbuffer;
- leaf_d = bp->data;
- hdr_s = &leaf_s->hdr;
- hdr_d = &leaf_d->hdr;
- hdr_d->info = hdr_s->info; /* struct copy */
- INT_SET(hdr_d->firstused, ARCH_CONVERT, lbsize);
- if (!hdr_d->firstused)
- INT_SET(hdr_d->firstused, ARCH_CONVERT, lbsize - 1);
- hdr_d->namebytes = 0;
- hdr_d->count = 0;
- hdr_d->holes = 0;
- INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, sizeof(xfs_dir_leaf_hdr_t));
- INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, INT_GET(hdr_d->firstused, ARCH_CONVERT) - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT));
-
- /*
- * Copy all entry's in the same (sorted) order,
- * but allocate filenames packed and in sequence.
- * This changes the source (leaf_s) as well.
- */
- xfs_dir_leaf_moveents(leaf_s, 0, leaf_d, 0, (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp);
-
- if (musthave && INT_GET(hdr_d->freemap[0].size, ARCH_CONVERT) < musthave)
- rval = XFS_ERROR(ENOSPC);
- else
- rval = 0;
-
- if (justcheck || rval == ENOSPC) {
- ASSERT(tmpbuffer2);
- memcpy(bp->data, tmpbuffer2, lbsize);
- } else {
- xfs_da_log_buf(trans, bp, 0, lbsize - 1);
- }
-
- kmem_free(tmpbuffer, lbsize);
- if (musthave || justcheck)
- kmem_free(tmpbuffer2, lbsize);
- return rval;
-}
-
-/*
- * Redistribute the directory entries between two leaf nodes,
- * taking into account the size of the new entry.
- *
- * NOTE: if new block is empty, then it will get the upper half of old block.
- */
-STATIC void
-xfs_dir_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
- xfs_da_state_blk_t *blk2)
-{
- xfs_da_state_blk_t *tmp_blk;
- xfs_dir_leafblock_t *leaf1, *leaf2;
- xfs_dir_leaf_hdr_t *hdr1, *hdr2;
- int count, totallen, max, space, swap;
-
- /*
- * Set up environment.
- */
- ASSERT(blk1->magic == XFS_DIR_LEAF_MAGIC);
- ASSERT(blk2->magic == XFS_DIR_LEAF_MAGIC);
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
- ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
-
- /*
- * Check ordering of blocks, reverse if it makes things simpler.
- */
- swap = 0;
- if (xfs_dir_leaf_order(blk1->bp, blk2->bp)) {
- tmp_blk = blk1;
- blk1 = blk2;
- blk2 = tmp_blk;
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
- swap = 1;
- }
- hdr1 = &leaf1->hdr;
- hdr2 = &leaf2->hdr;
-
- /*
- * Examine entries until we reduce the absolute difference in
- * byte usage between the two blocks to a minimum. Then get
- * the direction to copy and the number of elements to move.
- */
- state->inleaf = xfs_dir_leaf_figure_balance(state, blk1, blk2,
- &count, &totallen);
- if (swap)
- state->inleaf = !state->inleaf;
-
- /*
- * Move any entries required from leaf to leaf:
- */
- if (count < INT_GET(hdr1->count, ARCH_CONVERT)) {
- /*
- * Figure the total bytes to be added to the destination leaf.
- */
- count = INT_GET(hdr1->count, ARCH_CONVERT) - count; /* number entries being moved */
- space = INT_GET(hdr1->namebytes, ARCH_CONVERT) - totallen;
- space += count * ((uint)sizeof(xfs_dir_leaf_name_t)-1);
- space += count * (uint)sizeof(xfs_dir_leaf_entry_t);
-
- /*
- * leaf2 is the destination, compact it if it looks tight.
- */
- max = INT_GET(hdr2->firstused, ARCH_CONVERT) - (uint)sizeof(xfs_dir_leaf_hdr_t);
- max -= INT_GET(hdr2->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t);
- if (space > max) {
- xfs_dir_leaf_compact(state->args->trans, blk2->bp,
- 0, 0);
- }
-
- /*
- * Move high entries from leaf1 to low end of leaf2.
- */
- xfs_dir_leaf_moveents(leaf1, INT_GET(hdr1->count, ARCH_CONVERT) - count,
- leaf2, 0, count, state->mp);
-
- xfs_da_log_buf(state->args->trans, blk1->bp, 0,
- state->blocksize-1);
- xfs_da_log_buf(state->args->trans, blk2->bp, 0,
- state->blocksize-1);
-
- } else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) {
- /*
- * Figure the total bytes to be added to the destination leaf.
- */
- count -= INT_GET(hdr1->count, ARCH_CONVERT); /* number entries being moved */
- space = totallen - INT_GET(hdr1->namebytes, ARCH_CONVERT);
- space += count * ((uint)sizeof(xfs_dir_leaf_name_t)-1);
- space += count * (uint)sizeof(xfs_dir_leaf_entry_t);
-
- /*
- * leaf1 is the destination, compact it if it looks tight.
- */
- max = INT_GET(hdr1->firstused, ARCH_CONVERT) - (uint)sizeof(xfs_dir_leaf_hdr_t);
- max -= INT_GET(hdr1->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t);
- if (space > max) {
- xfs_dir_leaf_compact(state->args->trans, blk1->bp,
- 0, 0);
- }
-
- /*
- * Move low entries from leaf2 to high end of leaf1.
- */
- xfs_dir_leaf_moveents(leaf2, 0, leaf1, (int)INT_GET(hdr1->count, ARCH_CONVERT),
- count, state->mp);
-
- xfs_da_log_buf(state->args->trans, blk1->bp, 0,
- state->blocksize-1);
- xfs_da_log_buf(state->args->trans, blk2->bp, 0,
- state->blocksize-1);
- }
-
- /*
- * Copy out last hashval in each block for B-tree code.
- */
- blk1->hashval = INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
- blk2->hashval = INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
-
- /*
- * Adjust the expected index for insertion.
- * GROT: this doesn't work unless blk2 was originally empty.
- */
- if (!state->inleaf) {
- blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT);
- }
-}
-
-/*
- * Examine entries until we reduce the absolute difference in
- * byte usage between the two blocks to a minimum.
- * GROT: Is this really necessary? With other than a 512 byte blocksize,
- * GROT: there will always be enough room in either block for a new entry.
- * GROT: Do a double-split for this case?
- */
-STATIC int
-xfs_dir_leaf_figure_balance(xfs_da_state_t *state,
- xfs_da_state_blk_t *blk1,
- xfs_da_state_blk_t *blk2,
- int *countarg, int *namebytesarg)
-{
- xfs_dir_leafblock_t *leaf1, *leaf2;
- xfs_dir_leaf_hdr_t *hdr1, *hdr2;
- xfs_dir_leaf_entry_t *entry;
- int count, max, totallen, half;
- int lastdelta, foundit, tmp;
-
- /*
- * Set up environment.
- */
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
- hdr1 = &leaf1->hdr;
- hdr2 = &leaf2->hdr;
- foundit = 0;
- totallen = 0;
-
- /*
- * Examine entries until we reduce the absolute difference in
- * byte usage between the two blocks to a minimum.
- */
- max = INT_GET(hdr1->count, ARCH_CONVERT) + INT_GET(hdr2->count, ARCH_CONVERT);
- half = (max+1) * (uint)(sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1);
- half += INT_GET(hdr1->namebytes, ARCH_CONVERT) + INT_GET(hdr2->namebytes, ARCH_CONVERT) + state->args->namelen;
- half /= 2;
- lastdelta = state->blocksize;
- entry = &leaf1->entries[0];
- for (count = 0; count < max; entry++, count++) {
-
-#define XFS_DIR_ABS(A) (((A) < 0) ? -(A) : (A))
- /*
- * The new entry is in the first block, account for it.
- */
- if (count == blk1->index) {
- tmp = totallen + (uint)sizeof(*entry)
- + XFS_DIR_LEAF_ENTSIZE_BYNAME(state->args->namelen);
- if (XFS_DIR_ABS(half - tmp) > lastdelta)
- break;
- lastdelta = XFS_DIR_ABS(half - tmp);
- totallen = tmp;
- foundit = 1;
- }
-
- /*
- * Wrap around into the second block if necessary.
- */
- if (count == INT_GET(hdr1->count, ARCH_CONVERT)) {
- leaf1 = leaf2;
- entry = &leaf1->entries[0];
- }
-
- /*
- * Figure out if next leaf entry would be too much.
- */
- tmp = totallen + (uint)sizeof(*entry)
- + XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry);
- if (XFS_DIR_ABS(half - tmp) > lastdelta)
- break;
- lastdelta = XFS_DIR_ABS(half - tmp);
- totallen = tmp;
-#undef XFS_DIR_ABS
- }
-
- /*
- * Calculate the number of namebytes that will end up in lower block.
- * If new entry not in lower block, fix up the count.
- */
- totallen -=
- count * (uint)(sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1);
- if (foundit) {
- totallen -= (sizeof(*entry)+sizeof(xfs_dir_leaf_entry_t)-1) +
- state->args->namelen;
- }
-
- *countarg = count;
- *namebytesarg = totallen;
- return foundit;
-}
-
-/*========================================================================
- * Routines used for shrinking the Btree.
- *========================================================================*/
-
-/*
- * Check a leaf block and its neighbors to see if the block should be
- * collapsed into one or the other neighbor. Always keep the block
- * with the smaller block number.
- * If the current block is over 50% full, don't try to join it, return 0.
- * If the block is empty, fill in the state structure and return 2.
- * If it can be collapsed, fill in the state structure and return 1.
- * If nothing can be done, return 0.
- */
-int
-xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
-{
- xfs_dir_leafblock_t *leaf;
- xfs_da_state_blk_t *blk;
- xfs_da_blkinfo_t *info;
- int count, bytes, forward, error, retval, i;
- xfs_dablk_t blkno;
- xfs_dabuf_t *bp;
-
- /*
- * Check for the degenerate case of the block being over 50% full.
- * If so, it's not worth even looking to see if we might be able
- * to coalesce with a sibling.
- */
- blk = &state->path.blk[ state->path.active-1 ];
- info = blk->bp->data;
- ASSERT(be16_to_cpu(info->magic) == XFS_DIR_LEAF_MAGIC);
- leaf = (xfs_dir_leafblock_t *)info;
- count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
- bytes = (uint)sizeof(xfs_dir_leaf_hdr_t) +
- count * (uint)sizeof(xfs_dir_leaf_entry_t) +
- count * ((uint)sizeof(xfs_dir_leaf_name_t)-1) +
- INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
- if (bytes > (state->blocksize >> 1)) {
- *action = 0; /* blk over 50%, don't try to join */
- return 0;
- }
-
- /*
- * Check for the degenerate case of the block being empty.
- * If the block is empty, we'll simply delete it, no need to
- * coalesce it with a sibling block. We choose (arbitrarily)
- * to merge with the forward block unless it is NULL.
- */
- if (count == 0) {
- /*
- * Make altpath point to the block we want to keep and
- * path point to the block we want to drop (this one).
- */
- forward = (info->forw != 0);
- memcpy(&state->altpath, &state->path, sizeof(state->path));
- error = xfs_da_path_shift(state, &state->altpath, forward,
- 0, &retval);
- if (error)
- return error;
- if (retval) {
- *action = 0;
- } else {
- *action = 2;
- }
- return 0;
- }
-
- /*
- * Examine each sibling block to see if we can coalesce with
- * at least 25% free space to spare. We need to figure out
- * whether to merge with the forward or the backward block.
- * We prefer coalescing with the lower numbered sibling so as
- * to shrink a directory over time.
- */
- forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); /* start with smaller blk num */
- for (i = 0; i < 2; forward = !forward, i++) {
- if (forward)
- blkno = be32_to_cpu(info->forw);
- else
- blkno = be32_to_cpu(info->back);
- if (blkno == 0)
- continue;
- error = xfs_da_read_buf(state->args->trans, state->args->dp,
- blkno, -1, &bp,
- XFS_DATA_FORK);
- if (error)
- return error;
- ASSERT(bp != NULL);
-
- leaf = (xfs_dir_leafblock_t *)info;
- count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
- bytes = state->blocksize - (state->blocksize>>2);
- bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- count += INT_GET(leaf->hdr.count, ARCH_CONVERT);
- bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
- bytes -= count * ((uint)sizeof(xfs_dir_leaf_name_t) - 1);
- bytes -= count * (uint)sizeof(xfs_dir_leaf_entry_t);
- bytes -= (uint)sizeof(xfs_dir_leaf_hdr_t);
- if (bytes >= 0)
- break; /* fits with at least 25% to spare */
-
- xfs_da_brelse(state->args->trans, bp);
- }
- if (i >= 2) {
- *action = 0;
- return 0;
- }
- xfs_da_buf_done(bp);
-
- /*
- * Make altpath point to the block we want to keep (the lower
- * numbered block) and path point to the block we want to drop.
- */
- memcpy(&state->altpath, &state->path, sizeof(state->path));
- if (blkno < blk->blkno) {
- error = xfs_da_path_shift(state, &state->altpath, forward,
- 0, &retval);
- } else {
- error = xfs_da_path_shift(state, &state->path, forward,
- 0, &retval);
- }
- if (error)
- return error;
- if (retval) {
- *action = 0;
- } else {
- *action = 1;
- }
- return 0;
-}
-
-/*
- * Remove a name from the leaf directory structure.
- *
- * Return 1 if leaf is less than 37% full, 0 if >= 37% full.
- * If two leaves are 37% full, when combined they will leave 25% free.
- */
-int
-xfs_dir_leaf_remove(xfs_trans_t *trans, xfs_dabuf_t *bp, int index)
-{
- xfs_dir_leafblock_t *leaf;
- xfs_dir_leaf_hdr_t *hdr;
- xfs_dir_leaf_map_t *map;
- xfs_dir_leaf_entry_t *entry;
- xfs_dir_leaf_name_t *namest;
- int before, after, smallest, entsize;
- int tablesize, tmp, i;
- xfs_mount_t *mp;
-
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- hdr = &leaf->hdr;
- mp = trans->t_mountp;
- ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)));
- ASSERT((index >= 0) && (index < INT_GET(hdr->count, ARCH_CONVERT)));
- ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) >= ((INT_GET(hdr->count, ARCH_CONVERT)*sizeof(*entry))+sizeof(*hdr)));
- entry = &leaf->entries[index];
- ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) >= INT_GET(hdr->firstused, ARCH_CONVERT));
- ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp));
-
- /*
- * Scan through free region table:
- * check for adjacency of free'd entry with an existing one,
- * find smallest free region in case we need to replace it,
- * adjust any map that borders the entry table,
- */
- tablesize = INT_GET(hdr->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t)
- + (uint)sizeof(xfs_dir_leaf_hdr_t);
- map = &hdr->freemap[0];
- tmp = INT_GET(map->size, ARCH_CONVERT);
- before = after = -1;
- smallest = XFS_DIR_LEAF_MAPSIZE - 1;
- entsize = XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry);
- for (i = 0; i < XFS_DIR_LEAF_MAPSIZE; map++, i++) {
- ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp));
- ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp));
- if (INT_GET(map->base, ARCH_CONVERT) == tablesize) {
- INT_MOD(map->base, ARCH_CONVERT, -((uint)sizeof(xfs_dir_leaf_entry_t)));
- INT_MOD(map->size, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_entry_t));
- }
-
- if ((INT_GET(map->base, ARCH_CONVERT) + INT_GET(map->size, ARCH_CONVERT)) == INT_GET(entry->nameidx, ARCH_CONVERT)) {
- before = i;
- } else if (INT_GET(map->base, ARCH_CONVERT) == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) {
- after = i;
- } else if (INT_GET(map->size, ARCH_CONVERT) < tmp) {
- tmp = INT_GET(map->size, ARCH_CONVERT);
- smallest = i;
- }
- }
-
- /*
- * Coalesce adjacent freemap regions,
- * or replace the smallest region.
- */
- if ((before >= 0) || (after >= 0)) {
- if ((before >= 0) && (after >= 0)) {
- map = &hdr->freemap[before];
- INT_MOD(map->size, ARCH_CONVERT, entsize);
- INT_MOD(map->size, ARCH_CONVERT, INT_GET(hdr->freemap[after].size, ARCH_CONVERT));
- hdr->freemap[after].base = 0;
- hdr->freemap[after].size = 0;
- } else if (before >= 0) {
- map = &hdr->freemap[before];
- INT_MOD(map->size, ARCH_CONVERT, entsize);
- } else {
- map = &hdr->freemap[after];
- INT_COPY(map->base, entry->nameidx, ARCH_CONVERT);
- INT_MOD(map->size, ARCH_CONVERT, entsize);
- }
- } else {
- /*
- * Replace smallest region (if it is smaller than free'd entry)
- */
- map = &hdr->freemap[smallest];
- if (INT_GET(map->size, ARCH_CONVERT) < entsize) {
- INT_COPY(map->base, entry->nameidx, ARCH_CONVERT);
- INT_SET(map->size, ARCH_CONVERT, entsize);
- }
- }
-
- /*
- * Did we remove the first entry?
- */
- if (INT_GET(entry->nameidx, ARCH_CONVERT) == INT_GET(hdr->firstused, ARCH_CONVERT))
- smallest = 1;
- else
- smallest = 0;
-
- /*
- * Compress the remaining entries and zero out the removed stuff.
- */
- namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
- memset((char *)namest, 0, entsize);
- xfs_da_log_buf(trans, bp, XFS_DA_LOGRANGE(leaf, namest, entsize));
-
- INT_MOD(hdr->namebytes, ARCH_CONVERT, -(entry->namelen));
- tmp = (INT_GET(hdr->count, ARCH_CONVERT) - index) * (uint)sizeof(xfs_dir_leaf_entry_t);
- memmove(entry, entry + 1, tmp);
- INT_MOD(hdr->count, ARCH_CONVERT, -1);
- xfs_da_log_buf(trans, bp,
- XFS_DA_LOGRANGE(leaf, entry, tmp + (uint)sizeof(*entry)));
- entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)];
- memset((char *)entry, 0, sizeof(xfs_dir_leaf_entry_t));
-
- /*
- * If we removed the first entry, re-find the first used byte
- * in the name area. Note that if the entry was the "firstused",
- * then we don't have a "hole" in our block resulting from
- * removing the name.
- */
- if (smallest) {
- tmp = XFS_LBSIZE(mp);
- entry = &leaf->entries[0];
- for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; i >= 0; entry++, i--) {
- ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) >= INT_GET(hdr->firstused, ARCH_CONVERT));
- ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp));
- if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp)
- tmp = INT_GET(entry->nameidx, ARCH_CONVERT);
- }
- INT_SET(hdr->firstused, ARCH_CONVERT, tmp);
- if (!hdr->firstused)
- INT_SET(hdr->firstused, ARCH_CONVERT, tmp - 1);
- } else {
- hdr->holes = 1; /* mark as needing compaction */
- }
-
- xfs_da_log_buf(trans, bp, XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
-
- /*
- * Check if leaf is less than 50% full, caller may want to
- * "join" the leaf with a sibling if so.
- */
- tmp = (uint)sizeof(xfs_dir_leaf_hdr_t);
- tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t);
- tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * ((uint)sizeof(xfs_dir_leaf_name_t) - 1);
- tmp += INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
- if (tmp < mp->m_dir_magicpct)
- return 1; /* leaf is < 37% full */
- return 0;
-}
-
-/*
- * Move all the directory entries from drop_leaf into save_leaf.
- */
-void
-xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
- xfs_da_state_blk_t *save_blk)
-{
- xfs_dir_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf;
- xfs_dir_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr;
- xfs_mount_t *mp;
- char *tmpbuffer;
-
- /*
- * Set up environment.
- */
- mp = state->mp;
- ASSERT(drop_blk->magic == XFS_DIR_LEAF_MAGIC);
- ASSERT(save_blk->magic == XFS_DIR_LEAF_MAGIC);
- drop_leaf = drop_blk->bp->data;
- save_leaf = save_blk->bp->data;
- ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- drop_hdr = &drop_leaf->hdr;
- save_hdr = &save_leaf->hdr;
-
- /*
- * Save last hashval from dying block for later Btree fixup.
- */
- drop_blk->hashval = INT_GET(drop_leaf->entries[ drop_leaf->hdr.count-1 ].hashval, ARCH_CONVERT);
-
- /*
- * Check if we need a temp buffer, or can we do it in place.
- * Note that we don't check "leaf" for holes because we will
- * always be dropping it, toosmall() decided that for us already.
- */
- if (save_hdr->holes == 0) {
- /*
- * dest leaf has no holes, so we add there. May need
- * to make some room in the entry array.
- */
- if (xfs_dir_leaf_order(save_blk->bp, drop_blk->bp)) {
- xfs_dir_leaf_moveents(drop_leaf, 0, save_leaf, 0,
- (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
- } else {
- xfs_dir_leaf_moveents(drop_leaf, 0,
- save_leaf, INT_GET(save_hdr->count, ARCH_CONVERT),
- (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
- }
- } else {
- /*
- * Destination has holes, so we make a temporary copy
- * of the leaf and add them both to that.
- */
- tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP);
- ASSERT(tmpbuffer != NULL);
- memset(tmpbuffer, 0, state->blocksize);
- tmp_leaf = (xfs_dir_leafblock_t *)tmpbuffer;
- tmp_hdr = &tmp_leaf->hdr;
- tmp_hdr->info = save_hdr->info; /* struct copy */
- tmp_hdr->count = 0;
- INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize);
- if (!tmp_hdr->firstused)
- INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize - 1);
- tmp_hdr->namebytes = 0;
- if (xfs_dir_leaf_order(save_blk->bp, drop_blk->bp)) {
- xfs_dir_leaf_moveents(drop_leaf, 0, tmp_leaf, 0,
- (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
- xfs_dir_leaf_moveents(save_leaf, 0,
- tmp_leaf, INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT),
- (int)INT_GET(save_hdr->count, ARCH_CONVERT), mp);
- } else {
- xfs_dir_leaf_moveents(save_leaf, 0, tmp_leaf, 0,
- (int)INT_GET(save_hdr->count, ARCH_CONVERT), mp);
- xfs_dir_leaf_moveents(drop_leaf, 0,
- tmp_leaf, INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT),
- (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp);
- }
- memcpy(save_leaf, tmp_leaf, state->blocksize);
- kmem_free(tmpbuffer, state->blocksize);
- }
-
- xfs_da_log_buf(state->args->trans, save_blk->bp, 0,
- state->blocksize - 1);
-
- /*
- * Copy out last hashval in each block for B-tree code.
- */
- save_blk->hashval = INT_GET(save_leaf->entries[ INT_GET(save_leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);
-}
-
-/*========================================================================
- * Routines used for finding things in the Btree.
- *========================================================================*/
-
-/*
- * Look up a name in a leaf directory structure.
- * This is the internal routine, it uses the caller's buffer.
- *
- * Note that duplicate keys are allowed, but only check within the
- * current leaf node. The Btree code must check in adjacent leaf nodes.
- *
- * Return in *index the index into the entry[] array of either the found
- * entry, or where the entry should have been (insert before that entry).
- *
- * Don't change the args->inumber unless we find the filename.
- */
-int
-xfs_dir_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args, int *index)
-{
- xfs_dir_leafblock_t *leaf;
- xfs_dir_leaf_entry_t *entry;
- xfs_dir_leaf_name_t *namest;
- int probe, span;
- xfs_dahash_t hashval;
-
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8));
-
- /*
- * Binary search. (note: small blocks will skip this loop)
- */
- hashval = args->hashval;
- probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2;
- for (entry = &leaf->entries[probe]; span > 4;
- entry = &leaf->entries[probe]) {
- span /= 2;
- if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)
- probe += span;
- else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval)
- probe -= span;
- else
- break;
- }
- ASSERT((probe >= 0) && \
- ((!leaf->hdr.count) || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT))));
- ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) == hashval));
-
- /*
- * Since we may have duplicate hashval's, find the first matching
- * hashval in the leaf.
- */
- while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) >= hashval)) {
- entry--;
- probe--;
- }
- while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) {
- entry++;
- probe++;
- }
- if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) {
- *index = probe;
- ASSERT(args->oknoent);
- return XFS_ERROR(ENOENT);
- }
-
- /*
- * Duplicate keys may be present, so search all of them for a match.
- */
- while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval)) {
- namest = XFS_DIR_LEAF_NAMESTRUCT(leaf, INT_GET(entry->nameidx, ARCH_CONVERT));
- if (entry->namelen == args->namelen &&
- namest->name[0] == args->name[0] &&
- memcmp(args->name, namest->name, args->namelen) == 0) {
- XFS_DIR_SF_GET_DIRINO(&namest->inumber, &args->inumber);
- *index = probe;
- return XFS_ERROR(EEXIST);
- }
- entry++;
- probe++;
- }
- *index = probe;
- ASSERT(probe == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent);
- return XFS_ERROR(ENOENT);
-}
-
-/*========================================================================
- * Utility routines.
- *========================================================================*/
-
-/*
- * Move the indicated entries from one leaf to another.
- * NOTE: this routine modifies both source and destination leaves.
- */
-/* ARGSUSED */
-STATIC void
-xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s,
- xfs_dir_leafblock_t *leaf_d, int start_d,
- int count, xfs_mount_t *mp)
-{
- xfs_dir_leaf_hdr_t *hdr_s, *hdr_d;
- xfs_dir_leaf_entry_t *entry_s, *entry_d;
- int tmp, i;
-
- /*
- * Check for nothing to do.
- */
- if (count == 0)
- return;
-
- /*
- * Set up environment.
- */
- ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- hdr_s = &leaf_s->hdr;
- hdr_d = &leaf_d->hdr;
- ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) && (INT_GET(hdr_s->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)));
- ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >=
- ((INT_GET(hdr_s->count, ARCH_CONVERT)*sizeof(*entry_s))+sizeof(*hdr_s)));
- ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8));
- ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >=
- ((INT_GET(hdr_d->count, ARCH_CONVERT)*sizeof(*entry_d))+sizeof(*hdr_d)));
-
- ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT));
- ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT));
- ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT));
-
- /*
- * Move the entries in the destination leaf up to make a hole?
- */
- if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) {
- tmp = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d;
- tmp *= (uint)sizeof(xfs_dir_leaf_entry_t);
- entry_s = &leaf_d->entries[start_d];
- entry_d = &leaf_d->entries[start_d + count];
- memcpy(entry_d, entry_s, tmp);
- }
-
- /*
- * Copy all entry's in the same (sorted) order,
- * but allocate filenames packed and in sequence.
- */
- entry_s = &leaf_s->entries[start_s];
- entry_d = &leaf_d->entries[start_d];
- for (i = 0; i < count; entry_s++, entry_d++, i++) {
- ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) >= INT_GET(hdr_s->firstused, ARCH_CONVERT));
- tmp = XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry_s);
- INT_MOD(hdr_d->firstused, ARCH_CONVERT, -(tmp));
- entry_d->hashval = entry_s->hashval; /* INT_: direct copy */
- INT_COPY(entry_d->nameidx, hdr_d->firstused, ARCH_CONVERT);
- entry_d->namelen = entry_s->namelen;
- ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp));
- memcpy(XFS_DIR_LEAF_NAMESTRUCT(leaf_d, INT_GET(entry_d->nameidx, ARCH_CONVERT)),
- XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)), tmp);
- ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp));
- memset((char *)XFS_DIR_LEAF_NAMESTRUCT(leaf_s, INT_GET(entry_s->nameidx, ARCH_CONVERT)),
- 0, tmp);
- INT_MOD(hdr_s->namebytes, ARCH_CONVERT, -(entry_d->namelen));
- INT_MOD(hdr_d->namebytes, ARCH_CONVERT, entry_d->namelen);
- INT_MOD(hdr_s->count, ARCH_CONVERT, -1);
- INT_MOD(hdr_d->count, ARCH_CONVERT, +1);
- tmp = INT_GET(hdr_d->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t)
- + (uint)sizeof(xfs_dir_leaf_hdr_t);
- ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp);
-
- }
-
- /*
- * Zero out the entries we just copied.
- */
- if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) {
- tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t);
- entry_s = &leaf_s->entries[start_s];
- ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp));
- memset((char *)entry_s, 0, tmp);
- } else {
- /*
- * Move the remaining entries down to fill the hole,
- * then zero the entries at the top.
- */
- tmp = INT_GET(hdr_s->count, ARCH_CONVERT) - count;
- tmp *= (uint)sizeof(xfs_dir_leaf_entry_t);
- entry_s = &leaf_s->entries[start_s + count];
- entry_d = &leaf_s->entries[start_s];
- memcpy(entry_d, entry_s, tmp);
-
- tmp = count * (uint)sizeof(xfs_dir_leaf_entry_t);
- entry_s = &leaf_s->entries[INT_GET(hdr_s->count, ARCH_CONVERT)];
- ASSERT((char *)entry_s + tmp <= (char *)leaf_s + XFS_LBSIZE(mp));
- memset((char *)entry_s, 0, tmp);
- }
-
- /*
- * Fill in the freemap information
- */
- INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, (uint)sizeof(xfs_dir_leaf_hdr_t));
- INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, INT_GET(hdr_d->count, ARCH_CONVERT) * (uint)sizeof(xfs_dir_leaf_entry_t));
- INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, INT_GET(hdr_d->firstused, ARCH_CONVERT) - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT));
- INT_SET(hdr_d->freemap[1].base, ARCH_CONVERT, (hdr_d->freemap[2].base = 0));
- INT_SET(hdr_d->freemap[1].size, ARCH_CONVERT, (hdr_d->freemap[2].size = 0));
- hdr_s->holes = 1; /* leaf may not be compact */
-}
-
-/*
- * Compare two leaf blocks "order".
- */
-int
-xfs_dir_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
-{
- xfs_dir_leafblock_t *leaf1, *leaf2;
-
- leaf1 = leaf1_bp->data;
- leaf2 = leaf2_bp->data;
- ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC) &&
- (be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC));
- if ((INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) &&
- ((INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) <
- INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) ||
- (INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) <
- INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) {
- return 1;
- }
- return 0;
-}
-
-/*
- * Pick up the last hashvalue from a leaf block.
- */
-xfs_dahash_t
-xfs_dir_leaf_lasthash(xfs_dabuf_t *bp, int *count)
-{
- xfs_dir_leafblock_t *leaf;
-
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
- if (count)
- *count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
- if (!leaf->hdr.count)
- return(0);
- return(INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT));
-}
-
-/*
- * Copy out directory entries for getdents(), for leaf directories.
- */
-int
-xfs_dir_leaf_getdents_int(
- xfs_dabuf_t *bp,
- xfs_inode_t *dp,
- xfs_dablk_t bno,
- uio_t *uio,
- int *eobp,
- xfs_dirent_t *dbp,
- xfs_dir_put_t put,
- xfs_daddr_t nextda)
-{
- xfs_dir_leafblock_t *leaf;
- xfs_dir_leaf_entry_t *entry;
- xfs_dir_leaf_name_t *namest;
- int entno, want_entno, i, nextentno;
- xfs_mount_t *mp;
- xfs_dahash_t cookhash;
- xfs_dahash_t nexthash = 0;
-#if (BITS_PER_LONG == 32)
- xfs_dahash_t lasthash = XFS_DA_MAXHASH;
-#endif
- xfs_dir_put_args_t p;
-
- mp = dp->i_mount;
- leaf = bp->data;
- if (be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) {
- *eobp = 1;
- return XFS_ERROR(ENOENT); /* XXX wrong code */
- }
-
- want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset);
-
- cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset);
-
- xfs_dir_trace_g_dul("leaf: start", dp, uio, leaf);
-
- /*
- * Re-find our place.
- */
- for (i = entno = 0, entry = &leaf->entries[0];
- i < INT_GET(leaf->hdr.count, ARCH_CONVERT);
- entry++, i++) {
-
- namest = XFS_DIR_LEAF_NAMESTRUCT(leaf,
- INT_GET(entry->nameidx, ARCH_CONVERT));
-
- if (unlikely(
- ((char *)namest < (char *)leaf) ||
- ((char *)namest >= (char *)leaf + XFS_LBSIZE(mp)))) {
- XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(1)",
- XFS_ERRLEVEL_LOW, mp, leaf);
- xfs_dir_trace_g_du("leaf: corrupted", dp, uio);
- return XFS_ERROR(EFSCORRUPTED);
- }
- if (INT_GET(entry->hashval, ARCH_CONVERT) >= cookhash) {
- if ( entno < want_entno
- && INT_GET(entry->hashval, ARCH_CONVERT)
- == cookhash) {
- /*
- * Trying to get to a particular offset in a
- * run of equal-hashval entries.
- */
- entno++;
- } else if ( want_entno > 0
- && entno == want_entno
- && INT_GET(entry->hashval, ARCH_CONVERT)
- == cookhash) {
- break;
- } else {
- entno = 0;
- break;
- }
- }
- }
-
- if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) {
- xfs_dir_trace_g_du("leaf: hash not found", dp, uio);
- if (!leaf->hdr.info.forw)
- uio->uio_offset =
- XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH);
- /*
- * Don't set uio_offset if there's another block:
- * the node code will be setting uio_offset anyway.
- */
- *eobp = 0;
- return 0;
- }
- xfs_dir_trace_g_due("leaf: hash found", dp, uio, entry);
-
- p.dbp = dbp;
- p.put = put;
- p.uio = uio;
-
- /*
- * We're synchronized, start copying entries out to the user.
- */
- for (; entno >= 0 && i < INT_GET(leaf->hdr.count, ARCH_CONVERT);
- entry++, i++, (entno = nextentno)) {
- int lastresid=0, retval;
- xfs_dircook_t lastoffset;
- xfs_dahash_t thishash;
-
- /*
- * Check for a damaged directory leaf block and pick up
- * the inode number from this entry.
- */
- namest = XFS_DIR_LEAF_NAMESTRUCT(leaf,
- INT_GET(entry->nameidx, ARCH_CONVERT));
-
- if (unlikely(
- ((char *)namest < (char *)leaf) ||
- ((char *)namest >= (char *)leaf + XFS_LBSIZE(mp)))) {
- XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(2)",
- XFS_ERRLEVEL_LOW, mp, leaf);
- xfs_dir_trace_g_du("leaf: corrupted", dp, uio);
- return XFS_ERROR(EFSCORRUPTED);
- }
-
- xfs_dir_trace_g_duc("leaf: middle cookie ",
- dp, uio, p.cook.o);
-
- if (i < (INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1)) {
- nexthash = INT_GET(entry[1].hashval, ARCH_CONVERT);
-
- if (nexthash == INT_GET(entry->hashval, ARCH_CONVERT))
- nextentno = entno + 1;
- else
- nextentno = 0;
- XFS_PUT_COOKIE(p.cook, mp, bno, nextentno, nexthash);
- xfs_dir_trace_g_duc("leaf: middle cookie ",
- dp, uio, p.cook.o);
-
- } else if ((thishash = be32_to_cpu(leaf->hdr.info.forw))) {
- xfs_dabuf_t *bp2;
- xfs_dir_leafblock_t *leaf2;
-
- ASSERT(nextda != -1);
-
- retval = xfs_da_read_buf(dp->i_transp, dp, thishash,
- nextda, &bp2, XFS_DATA_FORK);
- if (retval)
- return retval;
-
- ASSERT(bp2 != NULL);
-
- leaf2 = bp2->data;
-
- if (unlikely(
- (be16_to_cpu(leaf2->hdr.info.magic)
- != XFS_DIR_LEAF_MAGIC)
- || (be32_to_cpu(leaf2->hdr.info.back)
- != bno))) { /* GROT */
- XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)",
- XFS_ERRLEVEL_LOW, mp,
- leaf2);
- xfs_da_brelse(dp->i_transp, bp2);
-
- return XFS_ERROR(EFSCORRUPTED);
- }
-
- nexthash = INT_GET(leaf2->entries[0].hashval,
- ARCH_CONVERT);
- nextentno = -1;
- XFS_PUT_COOKIE(p.cook, mp, thishash, 0, nexthash);
- xfs_da_brelse(dp->i_transp, bp2);
- xfs_dir_trace_g_duc("leaf: next blk cookie",
- dp, uio, p.cook.o);
- } else {
- nextentno = -1;
- XFS_PUT_COOKIE(p.cook, mp, 0, 0, XFS_DA_MAXHASH);
- }
-
- /*
- * Save off the cookie so we can fall back should the
- * 'put' into the outgoing buffer fails. To handle a run
- * of equal-hashvals, the off_t structure on 64bit
- * builds has entno built into the cookie to ID the
- * entry. On 32bit builds, we only have space for the
- * hashval so we can't ID specific entries within a group
- * of same hashval entries. For this, lastoffset is set
- * to the first in the run of equal hashvals so we don't
- * include any entries unless we can include all entries
- * that share the same hashval. Hopefully the buffer
- * provided is big enough to handle it (see pv763517).
- */
-#if (BITS_PER_LONG == 32)
- if ((thishash = INT_GET(entry->hashval, ARCH_CONVERT))
- != lasthash) {
- XFS_PUT_COOKIE(lastoffset, mp, bno, entno, thishash);
- lastresid = uio->uio_resid;
- lasthash = thishash;
- } else {
- xfs_dir_trace_g_duc("leaf: DUP COOKIES, skipped",
- dp, uio, p.cook.o);
- }
-#else
- thishash = INT_GET(entry->hashval, ARCH_CONVERT);
- XFS_PUT_COOKIE(lastoffset, mp, bno, entno, thishash);
- lastresid = uio->uio_resid;
-#endif /* BITS_PER_LONG == 32 */
-
- /*
- * Put the current entry into the outgoing buffer. If we fail
- * then restore the UIO to the first entry in the current
- * run of equal-hashval entries (probably one 1 entry long).
- */
- p.ino = XFS_GET_DIR_INO8(namest->inumber);
-#if XFS_BIG_INUMS
- p.ino += mp->m_inoadd;
-#endif
- p.name = (char *)namest->name;
- p.namelen = entry->namelen;
-
- retval = p.put(&p);
-
- if (!p.done) {
- uio->uio_offset = lastoffset.o;
- uio->uio_resid = lastresid;
-
- *eobp = 1;
-
- xfs_dir_trace_g_du("leaf: E-O-B", dp, uio);
-
- return retval;
- }
- }
-
- uio->uio_offset = p.cook.o;
-
- *eobp = 0;
-
- xfs_dir_trace_g_du("leaf: E-O-F", dp, uio);
-
- return 0;
-}
-
-/*
- * Format a dirent64 structure and copy it out the the user's buffer.
- */
-int
-xfs_dir_put_dirent64_direct(xfs_dir_put_args_t *pa)
-{
- iovec_t *iovp;
- int reclen, namelen;
- xfs_dirent_t *idbp;
- uio_t *uio;
-
- namelen = pa->namelen;
- reclen = DIRENTSIZE(namelen);
- uio = pa->uio;
- if (reclen > uio->uio_resid) {
- pa->done = 0;
- return 0;
- }
- iovp = uio->uio_iov;
- idbp = (xfs_dirent_t *)iovp->iov_base;
- iovp->iov_base = (char *)idbp + reclen;
- iovp->iov_len -= reclen;
- uio->uio_resid -= reclen;
- idbp->d_reclen = reclen;
- idbp->d_ino = pa->ino;
- idbp->d_off = pa->cook.o;
- idbp->d_name[namelen] = '\0';
- pa->done = 1;
- memcpy(idbp->d_name, pa->name, namelen);
- return 0;
-}
-
-/*
- * Format a dirent64 structure and copy it out the the user's buffer.
- */
-int
-xfs_dir_put_dirent64_uio(xfs_dir_put_args_t *pa)
-{
- int retval, reclen, namelen;
- xfs_dirent_t *idbp;
- uio_t *uio;
-
- namelen = pa->namelen;
- reclen = DIRENTSIZE(namelen);
- uio = pa->uio;
- if (reclen > uio->uio_resid) {
- pa->done = 0;
- return 0;
- }
- idbp = pa->dbp;
- idbp->d_reclen = reclen;
- idbp->d_ino = pa->ino;
- idbp->d_off = pa->cook.o;
- idbp->d_name[namelen] = '\0';
- memcpy(idbp->d_name, pa->name, namelen);
- retval = uio_read((caddr_t)idbp, reclen, uio);
- pa->done = (retval == 0);
- return retval;
-}
diff --git a/fs/xfs/xfs_dir_leaf.h b/fs/xfs/xfs_dir_leaf.h
deleted file mode 100644
index eb8cd9a4667..00000000000
--- a/fs/xfs/xfs_dir_leaf.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR_LEAF_H__
-#define __XFS_DIR_LEAF_H__
-
-/*
- * Directory layout, internal structure, access macros, etc.
- *
- * Large directories are structured around Btrees where all the data
- * elements are in the leaf nodes. Filenames are hashed into an int,
- * then that int is used as the index into the Btree. Since the hashval
- * of a filename may not be unique, we may have duplicate keys. The
- * internal links in the Btree are logical block offsets into the file.
- */
-
-struct uio;
-struct xfs_bmap_free;
-struct xfs_dabuf;
-struct xfs_da_args;
-struct xfs_da_state;
-struct xfs_da_state_blk;
-struct xfs_dir_put_args;
-struct xfs_inode;
-struct xfs_mount;
-struct xfs_trans;
-
-/*========================================================================
- * Directory Structure when equal to XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This is the structure of the leaf nodes in the Btree.
- *
- * Struct leaf_entry's are packed from the top. Names grow from the bottom
- * but are not packed. The freemap contains run-length-encoded entries
- * for the free bytes after the leaf_entry's, but only the N largest such,
- * smaller runs are dropped. When the freemap doesn't show enough space
- * for an allocation, we compact the namelist area and try again. If we
- * still don't have enough space, then we have to split the block.
- *
- * Since we have duplicate hash keys, for each key that matches, compare
- * the actual string. The root and intermediate node search always takes
- * the first-in-the-block key match found, so we should only have to work
- * "forw"ard. If none matches, continue with the "forw"ard leaf nodes
- * until the hash key changes or the filename is found.
- *
- * The parent directory and the self-pointer are explicitly represented
- * (ie: there are entries for "." and "..").
- *
- * Note that the count being a __uint16_t limits us to something like a
- * blocksize of 1.3MB in the face of worst case (short) filenames.
- */
-#define XFS_DIR_LEAF_MAPSIZE 3 /* how many freespace slots */
-
-typedef struct xfs_dir_leaf_map { /* RLE map of free bytes */
- __uint16_t base; /* base of free region */
- __uint16_t size; /* run length of free region */
-} xfs_dir_leaf_map_t;
-
-typedef struct xfs_dir_leaf_hdr { /* constant-structure header block */
- xfs_da_blkinfo_t info; /* block type, links, etc. */
- __uint16_t count; /* count of active leaf_entry's */
- __uint16_t namebytes; /* num bytes of name strings stored */
- __uint16_t firstused; /* first used byte in name area */
- __uint8_t holes; /* != 0 if blk needs compaction */
- __uint8_t pad1;
- xfs_dir_leaf_map_t freemap[XFS_DIR_LEAF_MAPSIZE];
-} xfs_dir_leaf_hdr_t;
-
-typedef struct xfs_dir_leaf_entry { /* sorted on key, not name */
- xfs_dahash_t hashval; /* hash value of name */
- __uint16_t nameidx; /* index into buffer of name */
- __uint8_t namelen; /* length of name string */
- __uint8_t pad2;
-} xfs_dir_leaf_entry_t;
-
-typedef struct xfs_dir_leaf_name {
- xfs_dir_ino_t inumber; /* inode number for this key */
- __uint8_t name[1]; /* name string itself */
-} xfs_dir_leaf_name_t;
-
-typedef struct xfs_dir_leafblock {
- xfs_dir_leaf_hdr_t hdr; /* constant-structure header block */
- xfs_dir_leaf_entry_t entries[1]; /* var sized array */
- xfs_dir_leaf_name_t namelist[1]; /* grows from bottom of buf */
-} xfs_dir_leafblock_t;
-
-/*
- * Length of name for which a 512-byte block filesystem
- * can get a double split.
- */
-#define XFS_DIR_LEAF_CAN_DOUBLE_SPLIT_LEN \
- (512 - (uint)sizeof(xfs_dir_leaf_hdr_t) - \
- (uint)sizeof(xfs_dir_leaf_entry_t) * 2 - \
- (uint)sizeof(xfs_dir_leaf_name_t) * 2 - (MAXNAMELEN - 2) + 1 + 1)
-
-typedef int (*xfs_dir_put_t)(struct xfs_dir_put_args *pa);
-
-typedef union {
- xfs_off_t o; /* offset (cookie) */
- /*
- * Watch the order here (endian-ness dependent).
- */
- struct {
-#ifndef XFS_NATIVE_HOST
- xfs_dahash_t h; /* hash value */
- __uint32_t be; /* block and entry */
-#else
- __uint32_t be; /* block and entry */
- xfs_dahash_t h; /* hash value */
-#endif /* XFS_NATIVE_HOST */
- } s;
-} xfs_dircook_t;
-
-#define XFS_PUT_COOKIE(c,mp,bno,entry,hash) \
- ((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash))
-
-typedef struct xfs_dir_put_args {
- xfs_dircook_t cook; /* cookie of (next) entry */
- xfs_intino_t ino; /* inode number */
- struct xfs_dirent *dbp; /* buffer pointer */
- char *name; /* directory entry name */
- int namelen; /* length of name */
- int done; /* output: set if value was stored */
- xfs_dir_put_t put; /* put function ptr (i/o) */
- struct uio *uio; /* uio control structure */
-} xfs_dir_put_args_t;
-
-#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) \
- xfs_dir_leaf_entsize_byname(len)
-static inline int xfs_dir_leaf_entsize_byname(int len)
-{
- return (uint)sizeof(xfs_dir_leaf_name_t)-1 + len;
-}
-
-#define XFS_DIR_LEAF_ENTSIZE_BYENTRY(entry) \
- xfs_dir_leaf_entsize_byentry(entry)
-static inline int xfs_dir_leaf_entsize_byentry(xfs_dir_leaf_entry_t *entry)
-{
- return (uint)sizeof(xfs_dir_leaf_name_t)-1 + (entry)->namelen;
-}
-
-#define XFS_DIR_LEAF_NAMESTRUCT(leafp,offset) \
- xfs_dir_leaf_namestruct(leafp,offset)
-static inline xfs_dir_leaf_name_t *
-xfs_dir_leaf_namestruct(xfs_dir_leafblock_t *leafp, int offset)
-{
- return (xfs_dir_leaf_name_t *)&((char *)(leafp))[offset];
-}
-
-/*========================================================================
- * Function prototypes for the kernel.
- *========================================================================*/
-
-/*
- * Internal routines when dirsize < XFS_LITINO(mp).
- */
-int xfs_dir_shortform_create(struct xfs_da_args *args, xfs_ino_t parent);
-int xfs_dir_shortform_addname(struct xfs_da_args *args);
-int xfs_dir_shortform_lookup(struct xfs_da_args *args);
-int xfs_dir_shortform_to_leaf(struct xfs_da_args *args);
-int xfs_dir_shortform_removename(struct xfs_da_args *args);
-int xfs_dir_shortform_getdents(struct xfs_inode *dp, struct uio *uio, int *eofp,
- struct xfs_dirent *dbp, xfs_dir_put_t put);
-int xfs_dir_shortform_replace(struct xfs_da_args *args);
-
-/*
- * Internal routines when dirsize == XFS_LBSIZE(mp).
- */
-int xfs_dir_leaf_to_node(struct xfs_da_args *args);
-int xfs_dir_leaf_to_shortform(struct xfs_da_args *args);
-
-/*
- * Routines used for growing the Btree.
- */
-int xfs_dir_leaf_split(struct xfs_da_state *state,
- struct xfs_da_state_blk *oldblk,
- struct xfs_da_state_blk *newblk);
-int xfs_dir_leaf_add(struct xfs_dabuf *leaf_buffer,
- struct xfs_da_args *args, int insertion_index);
-int xfs_dir_leaf_addname(struct xfs_da_args *args);
-int xfs_dir_leaf_lookup_int(struct xfs_dabuf *leaf_buffer,
- struct xfs_da_args *args,
- int *index_found_at);
-int xfs_dir_leaf_remove(struct xfs_trans *trans,
- struct xfs_dabuf *leaf_buffer,
- int index_to_remove);
-int xfs_dir_leaf_getdents_int(struct xfs_dabuf *bp, struct xfs_inode *dp,
- xfs_dablk_t bno, struct uio *uio,
- int *eobp, struct xfs_dirent *dbp,
- xfs_dir_put_t put, xfs_daddr_t nextda);
-
-/*
- * Routines used for shrinking the Btree.
- */
-int xfs_dir_leaf_toosmall(struct xfs_da_state *state, int *retval);
-void xfs_dir_leaf_unbalance(struct xfs_da_state *state,
- struct xfs_da_state_blk *drop_blk,
- struct xfs_da_state_blk *save_blk);
-
-/*
- * Utility routines.
- */
-uint xfs_dir_leaf_lasthash(struct xfs_dabuf *bp, int *count);
-int xfs_dir_leaf_order(struct xfs_dabuf *leaf1_bp,
- struct xfs_dabuf *leaf2_bp);
-int xfs_dir_put_dirent64_direct(xfs_dir_put_args_t *pa);
-int xfs_dir_put_dirent64_uio(xfs_dir_put_args_t *pa);
-int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
-
-/*
- * Global data.
- */
-extern xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot;
-
-#endif /* __XFS_DIR_LEAF_H__ */
diff --git a/fs/xfs/xfs_dir_sf.h b/fs/xfs/xfs_dir_sf.h
deleted file mode 100644
index 5b20b4d3f57..00000000000
--- a/fs/xfs/xfs_dir_sf.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (c) 2000,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR_SF_H__
-#define __XFS_DIR_SF_H__
-
-/*
- * Directory layout when stored internal to an inode.
- *
- * Small directories are packed as tightly as possible so as to
- * fit into the literal area of the inode.
- */
-
-typedef struct { __uint8_t i[sizeof(xfs_ino_t)]; } xfs_dir_ino_t;
-
-/*
- * The parent directory has a dedicated field, and the self-pointer must
- * be calculated on the fly.
- *
- * Entries are packed toward the top as tight as possible. The header
- * and the elements much be memcpy'd out into a work area to get correct
- * alignment for the inode number fields.
- */
-typedef struct xfs_dir_sf_hdr { /* constant-structure header block */
- xfs_dir_ino_t parent; /* parent dir inode number */
- __uint8_t count; /* count of active entries */
-} xfs_dir_sf_hdr_t;
-
-typedef struct xfs_dir_sf_entry {
- xfs_dir_ino_t inumber; /* referenced inode number */
- __uint8_t namelen; /* actual length of name (no NULL) */
- __uint8_t name[1]; /* name */
-} xfs_dir_sf_entry_t;
-
-typedef struct xfs_dir_shortform {
- xfs_dir_sf_hdr_t hdr;
- xfs_dir_sf_entry_t list[1]; /* variable sized array */
-} xfs_dir_shortform_t;
-
-/*
- * We generate this then sort it, so that readdirs are returned in
- * hash-order. Else seekdir won't work.
- */
-typedef struct xfs_dir_sf_sort {
- __uint8_t entno; /* .=0, ..=1, else entry# + 2 */
- __uint8_t seqno; /* sequence # with same hash value */
- __uint8_t namelen; /* length of name value (no null) */
- xfs_dahash_t hash; /* this entry's hash value */
- xfs_intino_t ino; /* this entry's inode number */
- char *name; /* name value, pointer into buffer */
-} xfs_dir_sf_sort_t;
-
-#define XFS_DIR_SF_GET_DIRINO(from,to) xfs_dir_sf_get_dirino(from, to)
-static inline void xfs_dir_sf_get_dirino(xfs_dir_ino_t *from, xfs_ino_t *to)
-{
- *(to) = XFS_GET_DIR_INO8(*from);
-}
-
-#define XFS_DIR_SF_PUT_DIRINO(from,to) xfs_dir_sf_put_dirino(from, to)
-static inline void xfs_dir_sf_put_dirino(xfs_ino_t *from, xfs_dir_ino_t *to)
-{
- XFS_PUT_DIR_INO8(*(from), *(to));
-}
-
-#define XFS_DIR_SF_ENTSIZE_BYNAME(len) xfs_dir_sf_entsize_byname(len)
-static inline int xfs_dir_sf_entsize_byname(int len)
-{
- return (uint)sizeof(xfs_dir_sf_entry_t)-1 + (len);
-}
-
-#define XFS_DIR_SF_ENTSIZE_BYENTRY(sfep) xfs_dir_sf_entsize_byentry(sfep)
-static inline int xfs_dir_sf_entsize_byentry(xfs_dir_sf_entry_t *sfep)
-{
- return (uint)sizeof(xfs_dir_sf_entry_t)-1 + (sfep)->namelen;
-}
-
-#define XFS_DIR_SF_NEXTENTRY(sfep) xfs_dir_sf_nextentry(sfep)
-static inline xfs_dir_sf_entry_t *xfs_dir_sf_nextentry(xfs_dir_sf_entry_t *sfep)
-{
- return (xfs_dir_sf_entry_t *) \
- ((char *)(sfep) + XFS_DIR_SF_ENTSIZE_BYENTRY(sfep));
-}
-
-#define XFS_DIR_SF_ALLFIT(count,totallen) \
- xfs_dir_sf_allfit(count,totallen)
-static inline int xfs_dir_sf_allfit(int count, int totallen)
-{
- return ((uint)sizeof(xfs_dir_sf_hdr_t) + \
- ((uint)sizeof(xfs_dir_sf_entry_t)-1)*(count) + (totallen));
-}
-
-#if defined(XFS_DIR_TRACE)
-
-/*
- * Kernel tracing support for directories.
- */
-struct uio;
-struct xfs_inode;
-struct xfs_da_intnode;
-struct xfs_dinode;
-struct xfs_dir_leafblock;
-struct xfs_dir_leaf_entry;
-
-#define XFS_DIR_TRACE_SIZE 4096 /* size of global trace buffer */
-extern ktrace_t *xfs_dir_trace_buf;
-
-/*
- * Trace record types.
- */
-#define XFS_DIR_KTRACE_G_DU 1 /* dp, uio */
-#define XFS_DIR_KTRACE_G_DUB 2 /* dp, uio, bno */
-#define XFS_DIR_KTRACE_G_DUN 3 /* dp, uio, node */
-#define XFS_DIR_KTRACE_G_DUL 4 /* dp, uio, leaf */
-#define XFS_DIR_KTRACE_G_DUE 5 /* dp, uio, leaf entry */
-#define XFS_DIR_KTRACE_G_DUC 6 /* dp, uio, cookie */
-
-void xfs_dir_trace_g_du(char *where, struct xfs_inode *dp, struct uio *uio);
-void xfs_dir_trace_g_dub(char *where, struct xfs_inode *dp, struct uio *uio,
- xfs_dablk_t bno);
-void xfs_dir_trace_g_dun(char *where, struct xfs_inode *dp, struct uio *uio,
- struct xfs_da_intnode *node);
-void xfs_dir_trace_g_dul(char *where, struct xfs_inode *dp, struct uio *uio,
- struct xfs_dir_leafblock *leaf);
-void xfs_dir_trace_g_due(char *where, struct xfs_inode *dp, struct uio *uio,
- struct xfs_dir_leaf_entry *entry);
-void xfs_dir_trace_g_duc(char *where, struct xfs_inode *dp, struct uio *uio,
- xfs_off_t cookie);
-void xfs_dir_trace_enter(int type, char *where,
- void *a0, void *a1, void *a2, void *a3,
- void *a4, void *a5, void *a6, void *a7,
- void *a8, void *a9, void *a10, void *a11);
-#else
-#define xfs_dir_trace_g_du(w,d,u)
-#define xfs_dir_trace_g_dub(w,d,u,b)
-#define xfs_dir_trace_g_dun(w,d,u,n)
-#define xfs_dir_trace_g_dul(w,d,u,l)
-#define xfs_dir_trace_g_due(w,d,u,e)
-#define xfs_dir_trace_g_duc(w,d,u,c)
-#endif /* DEBUG */
-
-#endif /* __XFS_DIR_SF_H__ */
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
index 00b1540f810..4e7865ad6f0 100644
--- a/fs/xfs/xfs_dmapi.h
+++ b/fs/xfs/xfs_dmapi.h
@@ -189,6 +189,6 @@ typedef enum {
#define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
-extern struct bhv_vfsops xfs_dmops;
+extern struct bhv_module_vfsops xfs_dmops;
#endif /* __XFS_DMAPI_H__ */
diff --git a/fs/xfs/xfs_dmops.c b/fs/xfs/xfs_dmops.c
index 629795b3b3d..1e4a35ddf7f 100644
--- a/fs/xfs/xfs_dmops.c
+++ b/fs/xfs/xfs_dmops.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 2a21c502401..b95681b03d8 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -22,12 +22,10 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index f19282ec854..6cf6d8769b9 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
@@ -294,6 +293,62 @@ xfs_efi_init(xfs_mount_t *mp,
}
/*
+ * Copy an EFI format buffer from the given buf, and into the destination
+ * EFI format structure.
+ * The given buffer can be in 32 bit or 64 bit form (which has different padding),
+ * one of which will be the native format for this kernel.
+ * It will handle the conversion of formats if necessary.
+ */
+int
+xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
+{
+ xfs_efi_log_format_t *src_efi_fmt = (xfs_efi_log_format_t *)buf->i_addr;
+ uint i;
+ uint len = sizeof(xfs_efi_log_format_t) +
+ (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t);
+ uint len32 = sizeof(xfs_efi_log_format_32_t) +
+ (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_32_t);
+ uint len64 = sizeof(xfs_efi_log_format_64_t) +
+ (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_64_t);
+
+ if (buf->i_len == len) {
+ memcpy((char *)dst_efi_fmt, (char*)src_efi_fmt, len);
+ return 0;
+ } else if (buf->i_len == len32) {
+ xfs_efi_log_format_32_t *src_efi_fmt_32 =
+ (xfs_efi_log_format_32_t *)buf->i_addr;
+
+ dst_efi_fmt->efi_type = src_efi_fmt_32->efi_type;
+ dst_efi_fmt->efi_size = src_efi_fmt_32->efi_size;
+ dst_efi_fmt->efi_nextents = src_efi_fmt_32->efi_nextents;
+ dst_efi_fmt->efi_id = src_efi_fmt_32->efi_id;
+ for (i = 0; i < dst_efi_fmt->efi_nextents; i++) {
+ dst_efi_fmt->efi_extents[i].ext_start =
+ src_efi_fmt_32->efi_extents[i].ext_start;
+ dst_efi_fmt->efi_extents[i].ext_len =
+ src_efi_fmt_32->efi_extents[i].ext_len;
+ }
+ return 0;
+ } else if (buf->i_len == len64) {
+ xfs_efi_log_format_64_t *src_efi_fmt_64 =
+ (xfs_efi_log_format_64_t *)buf->i_addr;
+
+ dst_efi_fmt->efi_type = src_efi_fmt_64->efi_type;
+ dst_efi_fmt->efi_size = src_efi_fmt_64->efi_size;
+ dst_efi_fmt->efi_nextents = src_efi_fmt_64->efi_nextents;
+ dst_efi_fmt->efi_id = src_efi_fmt_64->efi_id;
+ for (i = 0; i < dst_efi_fmt->efi_nextents; i++) {
+ dst_efi_fmt->efi_extents[i].ext_start =
+ src_efi_fmt_64->efi_extents[i].ext_start;
+ dst_efi_fmt->efi_extents[i].ext_len =
+ src_efi_fmt_64->efi_extents[i].ext_len;
+ }
+ return 0;
+ }
+ return EFSCORRUPTED;
+}
+
+/*
* This is called by the efd item code below to release references to
* the given efi item. Each efd calls this with the number of
* extents that it has logged, and when the sum of these reaches
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 5bf681708fe..0ea45edaab0 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -27,6 +27,24 @@ typedef struct xfs_extent {
} xfs_extent_t;
/*
+ * Since an xfs_extent_t has types (start:64, len: 32)
+ * there are different alignments on 32 bit and 64 bit kernels.
+ * So we provide the different variants for use by a
+ * conversion routine.
+ */
+
+typedef struct xfs_extent_32 {
+ xfs_dfsbno_t ext_start;
+ xfs_extlen_t ext_len;
+} __attribute__((packed)) xfs_extent_32_t;
+
+typedef struct xfs_extent_64 {
+ xfs_dfsbno_t ext_start;
+ xfs_extlen_t ext_len;
+ __uint32_t ext_pad;
+} xfs_extent_64_t;
+
+/*
* This is the structure used to lay out an efi log item in the
* log. The efi_extents field is a variable size array whose
* size is given by efi_nextents.
@@ -39,6 +57,22 @@ typedef struct xfs_efi_log_format {
xfs_extent_t efi_extents[1]; /* array of extents to free */
} xfs_efi_log_format_t;
+typedef struct xfs_efi_log_format_32 {
+ unsigned short efi_type; /* efi log item type */
+ unsigned short efi_size; /* size of this item */
+ uint efi_nextents; /* # extents to free */
+ __uint64_t efi_id; /* efi identifier */
+ xfs_extent_32_t efi_extents[1]; /* array of extents to free */
+} __attribute__((packed)) xfs_efi_log_format_32_t;
+
+typedef struct xfs_efi_log_format_64 {
+ unsigned short efi_type; /* efi log item type */
+ unsigned short efi_size; /* size of this item */
+ uint efi_nextents; /* # extents to free */
+ __uint64_t efi_id; /* efi identifier */
+ xfs_extent_64_t efi_extents[1]; /* array of extents to free */
+} xfs_efi_log_format_64_t;
+
/*
* This is the structure used to lay out an efd log item in the
* log. The efd_extents array is a variable size array whose
@@ -52,6 +86,22 @@ typedef struct xfs_efd_log_format {
xfs_extent_t efd_extents[1]; /* array of extents freed */
} xfs_efd_log_format_t;
+typedef struct xfs_efd_log_format_32 {
+ unsigned short efd_type; /* efd log item type */
+ unsigned short efd_size; /* size of this item */
+ uint efd_nextents; /* # of extents freed */
+ __uint64_t efd_efi_id; /* id of corresponding efi */
+ xfs_extent_32_t efd_extents[1]; /* array of extents freed */
+} __attribute__((packed)) xfs_efd_log_format_32_t;
+
+typedef struct xfs_efd_log_format_64 {
+ unsigned short efd_type; /* efd log item type */
+ unsigned short efd_size; /* size of this item */
+ uint efd_nextents; /* # of extents freed */
+ __uint64_t efd_efi_id; /* id of corresponding efi */
+ xfs_extent_64_t efd_extents[1]; /* array of extents freed */
+} xfs_efd_log_format_64_t;
+
#ifdef __KERNEL__
@@ -103,7 +153,8 @@ extern struct kmem_zone *xfs_efd_zone;
xfs_efi_log_item_t *xfs_efi_init(struct xfs_mount *, uint);
xfs_efd_log_item_t *xfs_efd_init(struct xfs_mount *, xfs_efi_log_item_t *,
uint);
-
+int xfs_efi_copy_format(xfs_log_iovec_t *buf,
+ xfs_efi_log_format_t *dst_efi_fmt);
void xfs_efi_item_free(xfs_efi_log_item_t *);
#endif /* __KERNEL__ */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 14010f1fa82..0f0ad153595 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -67,14 +67,15 @@ struct fsxattr {
#define XFS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */
#define XFS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */
#define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */
+#define XFS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */
#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
/*
* Structure for XFS_IOC_GETBMAP.
* On input, fill in bmv_offset and bmv_length of the first structure
- * to indicate the area of interest in the file, and bmv_entry with the
- * number of array elements given. The first structure is updated on
- * return to give the offset and length for the next call.
+ * to indicate the area of interest in the file, and bmv_entries with
+ * the number of array elements given back. The first structure is
+ * updated on return to give the offset and length for the next call.
*/
#ifndef HAVE_GETBMAP
struct getbmap {
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index dfa3527b20a..077629bab53 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -542,14 +540,13 @@ xfs_reserve_blocks(
}
void
-xfs_fs_log_dummy(xfs_mount_t *mp)
+xfs_fs_log_dummy(
+ xfs_mount_t *mp)
{
- xfs_trans_t *tp;
- xfs_inode_t *ip;
-
+ xfs_trans_t *tp;
+ xfs_inode_t *ip;
tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
- atomic_inc(&mp->m_active_trans);
if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) {
xfs_trans_cancel(tp, 0);
return;
@@ -574,21 +571,22 @@ xfs_fs_goingdown(
{
switch (inflags) {
case XFS_FSOP_GOING_FLAGS_DEFAULT: {
- struct vfs *vfsp = XFS_MTOVFS(mp);
+ struct bhv_vfs *vfsp = XFS_MTOVFS(mp);
struct super_block *sb = freeze_bdev(vfsp->vfs_super->s_bdev);
if (sb && !IS_ERR(sb)) {
- xfs_force_shutdown(mp, XFS_FORCE_UMOUNT);
+ xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
thaw_bdev(sb->s_bdev, sb);
}
break;
}
case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
- xfs_force_shutdown(mp, XFS_FORCE_UMOUNT);
+ xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
break;
case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
- xfs_force_shutdown(mp, XFS_FORCE_UMOUNT|XFS_LOG_IO_ERROR);
+ xfs_force_shutdown(mp,
+ SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
break;
default:
return XFS_ERROR(EINVAL);
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index deddbd03c16..33164a85aa9 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -1174,6 +1172,9 @@ xfs_dilocate(
if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
#ifdef DEBUG
+ /* no diagnostics for bulkstat, ino comes from userspace */
+ if (flags & XFS_IMAP_BULKSTAT)
+ return XFS_ERROR(EINVAL);
if (agno >= mp->m_sb.sb_agcount) {
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_dilocate: agno (%d) >= "
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 60c65683462..616eeeb6953 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index b5385432526..0724df7fabb 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -186,7 +184,7 @@ xfs_ihash_promote(
*/
STATIC int
xfs_iget_core(
- vnode_t *vp,
+ bhv_vnode_t *vp,
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
@@ -198,7 +196,7 @@ xfs_iget_core(
xfs_ihash_t *ih;
xfs_inode_t *ip;
xfs_inode_t *iq;
- vnode_t *inode_vp;
+ bhv_vnode_t *inode_vp;
ulong version;
int error;
/* REFERENCED */
@@ -468,7 +466,7 @@ finish_inode:
* If we have a real type for an on-disk inode, we can set ops(&unlock)
* now. If it's a new inode being created, xfs_ialloc will handle it.
*/
- VFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);
+ bhv_vfs_init_vnode(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);
return 0;
}
@@ -489,7 +487,7 @@ xfs_iget(
xfs_daddr_t bno)
{
struct inode *inode;
- vnode_t *vp = NULL;
+ bhv_vnode_t *vp = NULL;
int error;
XFS_STATS_INC(xs_ig_attempts);
@@ -543,7 +541,7 @@ retry:
void
xfs_inode_lock_init(
xfs_inode_t *ip,
- vnode_t *vp)
+ bhv_vnode_t *vp)
{
mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
"xfsino", (long)vp->v_number);
@@ -603,12 +601,10 @@ void
xfs_iput(xfs_inode_t *ip,
uint lock_flags)
{
- vnode_t *vp = XFS_ITOV(ip);
+ bhv_vnode_t *vp = XFS_ITOV(ip);
vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address);
-
xfs_iunlock(ip, lock_flags);
-
VN_RELE(vp);
}
@@ -619,7 +615,7 @@ void
xfs_iput_new(xfs_inode_t *ip,
uint lock_flags)
{
- vnode_t *vp = XFS_ITOV(ip);
+ bhv_vnode_t *vp = XFS_ITOV(ip);
struct inode *inode = vn_to_inode(vp);
vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);
@@ -645,7 +641,7 @@ xfs_iput_new(xfs_inode_t *ip,
void
xfs_ireclaim(xfs_inode_t *ip)
{
- vnode_t *vp;
+ bhv_vnode_t *vp;
/*
* Remove from old hash list and mount list.
@@ -1033,6 +1029,6 @@ xfs_iflock_nowait(xfs_inode_t *ip)
void
xfs_ifunlock(xfs_inode_t *ip)
{
- ASSERT(valusema(&(ip->i_flock)) <= 0);
+ ASSERT(issemalocked(&(ip->i_flock)));
vsema(&(ip->i_flock));
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 94b60dd0380..5fa0adb7e17 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -26,14 +26,12 @@
#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -256,13 +254,11 @@ xfs_itobp(
xfs_daddr_t bno,
uint imap_flags)
{
+ xfs_imap_t imap;
xfs_buf_t *bp;
int error;
- xfs_imap_t imap;
-#ifdef __KERNEL__
int i;
int ni;
-#endif
if (ip->i_blkno == (xfs_daddr_t)0) {
/*
@@ -319,7 +315,6 @@ xfs_itobp(
*/
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
(int)imap.im_len, XFS_BUF_LOCK, &bp);
-
if (error) {
#ifdef DEBUG
xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
@@ -330,17 +325,21 @@ xfs_itobp(
#endif /* DEBUG */
return error;
}
-#ifdef __KERNEL__
+
/*
* Validate the magic number and version of every inode in the buffer
* (if DEBUG kernel) or the first inode in the buffer, otherwise.
+ * No validation is done here in userspace (xfs_repair).
*/
-#ifdef DEBUG
+#if !defined(__KERNEL__)
+ ni = 0;
+#elif defined(DEBUG)
ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 :
(BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog);
-#else
+#else /* usual case */
ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1;
#endif
+
for (i = 0; i < ni; i++) {
int di_ok;
xfs_dinode_t *dip;
@@ -352,8 +351,11 @@ xfs_itobp(
if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP,
XFS_RANDOM_ITOBP_INOTOBP))) {
#ifdef DEBUG
- prdev("bad inode magic/vsn daddr %lld #%d (magic=%x)",
- mp->m_ddev_targp,
+ if (!(imap_flags & XFS_IMAP_BULKSTAT))
+ cmn_err(CE_ALERT,
+ "Device %s - bad inode magic/vsn "
+ "daddr %lld #%d (magic=%x)",
+ XFS_BUFTARG_NAME(mp->m_ddev_targp),
(unsigned long long)imap.im_blkno, i,
INT_GET(dip->di_core.di_magic, ARCH_CONVERT));
#endif
@@ -363,7 +365,6 @@ xfs_itobp(
return XFS_ERROR(EFSCORRUPTED);
}
}
-#endif /* __KERNEL__ */
xfs_inobp_check(mp, bp);
@@ -782,7 +783,6 @@ xfs_xlate_dinode_core(
STATIC uint
_xfs_dic2xflags(
- xfs_dinode_core_t *dic,
__uint16_t di_flags)
{
uint flags = 0;
@@ -812,6 +812,8 @@ _xfs_dic2xflags(
flags |= XFS_XFLAG_EXTSIZE;
if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
flags |= XFS_XFLAG_EXTSZINHERIT;
+ if (di_flags & XFS_DIFLAG_NODEFRAG)
+ flags |= XFS_XFLAG_NODEFRAG;
}
return flags;
@@ -823,16 +825,16 @@ xfs_ip2xflags(
{
xfs_dinode_core_t *dic = &ip->i_d;
- return _xfs_dic2xflags(dic, dic->di_flags) |
- (XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0);
+ return _xfs_dic2xflags(dic->di_flags) |
+ (XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0);
}
uint
xfs_dic2xflags(
xfs_dinode_core_t *dic)
{
- return _xfs_dic2xflags(dic, INT_GET(dic->di_flags, ARCH_CONVERT)) |
- (XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0);
+ return _xfs_dic2xflags(INT_GET(dic->di_flags, ARCH_CONVERT)) |
+ (XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0);
}
/*
@@ -1083,7 +1085,7 @@ xfs_ialloc(
{
xfs_ino_t ino;
xfs_inode_t *ip;
- vnode_t *vp;
+ bhv_vnode_t *vp;
uint flags;
int error;
@@ -1221,6 +1223,9 @@ xfs_ialloc(
di_flags |= XFS_DIFLAG_NOSYMLINKS;
if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
di_flags |= XFS_DIFLAG_PROJINHERIT;
+ if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
+ xfs_inherit_nodefrag)
+ di_flags |= XFS_DIFLAG_NODEFRAG;
ip->i_d.di_flags |= di_flags;
}
/* FALLTHROUGH */
@@ -1244,8 +1249,8 @@ xfs_ialloc(
*/
xfs_trans_log_inode(tp, ip, flags);
- /* now that we have an i_mode we can set Linux inode ops (& unlock) */
- VFS_INIT_VNODE(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
+ /* now that we have an i_mode we can setup inode ops and unlock */
+ bhv_vfs_init_vnode(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
*ipp = ip;
return 0;
@@ -1285,7 +1290,7 @@ xfs_isize_check(
(xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
map_first),
XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
- NULL))
+ NULL, NULL))
return;
ASSERT(nimaps == 1);
ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
@@ -1421,7 +1426,7 @@ xfs_itruncate_start(
xfs_fsize_t last_byte;
xfs_off_t toss_start;
xfs_mount_t *mp;
- vnode_t *vp;
+ bhv_vnode_t *vp;
ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size));
@@ -1434,9 +1439,9 @@ xfs_itruncate_start(
vn_iowait(vp); /* wait for the completion of any pending DIOs */
/*
- * Call VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES() to get rid of pages and buffers
+ * Call toss_pages or flushinval_pages to get rid of pages
* overlapping the region being removed. We have to use
- * the less efficient VOP_FLUSHINVAL_PAGES() in the case that the
+ * the less efficient flushinval_pages in the case that the
* caller may not be able to finish the truncate without
* dropping the inode's I/O lock. Make sure
* to catch any pages brought in by buffers overlapping
@@ -1445,10 +1450,10 @@ xfs_itruncate_start(
* so that we don't toss things on the same block as
* new_size but before it.
*
- * Before calling VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES(), make sure to
+ * Before calling toss_page or flushinval_pages, make sure to
* call remapf() over the same region if the file is mapped.
* This frees up mapped file references to the pages in the
- * given range and for the VOP_FLUSHINVAL_PAGES() case it ensures
+ * given range and for the flushinval_pages case it ensures
* that we get the latest mapped changes flushed out.
*/
toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
@@ -1466,9 +1471,9 @@ xfs_itruncate_start(
last_byte);
if (last_byte > toss_start) {
if (flags & XFS_ITRUNC_DEFINITE) {
- VOP_TOSS_PAGES(vp, toss_start, -1, FI_REMAPF_LOCKED);
+ bhv_vop_toss_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
} else {
- VOP_FLUSHINVAL_PAGES(vp, toss_start, -1, FI_REMAPF_LOCKED);
+ bhv_vop_flushinval_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
}
}
@@ -1666,12 +1671,13 @@ xfs_itruncate_finish(
* runs.
*/
XFS_BMAP_INIT(&free_list, &first_block);
- error = xfs_bunmapi(ntp, ip, first_unmap_block,
- unmap_len,
+ error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore,
+ first_unmap_block, unmap_len,
XFS_BMAPI_AFLAG(fork) |
(sync ? 0 : XFS_BMAPI_ASYNC),
XFS_ITRUNC_MAX_EXTENTS,
- &first_block, &free_list, &done);
+ &first_block, &free_list,
+ NULL, &done);
if (error) {
/*
* If the bunmapi call encounters an error,
@@ -2745,13 +2751,14 @@ xfs_iunpin(
* the inode to become unpinned.
*/
if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
- vnode_t *vp = XFS_ITOV_NULL(ip);
+ bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
/* make sync come back and flush this inode */
if (vp) {
struct inode *inode = vn_to_inode(vp);
- if (!(inode->i_state & I_NEW))
+ if (!(inode->i_state &
+ (I_NEW|I_FREEING|I_CLEAR)))
mark_inode_dirty_sync(inode);
}
}
@@ -2916,13 +2923,6 @@ xfs_iflush_fork(
ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
}
- if (whichfork == XFS_DATA_FORK) {
- if (unlikely(XFS_DIR_SHORTFORM_VALIDATE_ONDISK(mp, dip))) {
- XFS_ERROR_REPORT("xfs_iflush_fork",
- XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
- }
break;
case XFS_DINODE_FMT_EXTENTS:
@@ -3006,7 +3006,7 @@ xfs_iflush(
XFS_STATS_INC(xs_iflush_count);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
- ASSERT(valusema(&ip->i_flock) <= 0);
+ ASSERT(issemalocked(&(ip->i_flock)));
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > ip->i_df.if_ext_max);
@@ -3199,7 +3199,7 @@ xfs_iflush(
corrupt_out:
xfs_buf_relse(bp);
- xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
xfs_iflush_abort(ip);
/*
* Unlocks the flush lock
@@ -3221,7 +3221,7 @@ cluster_corrupt_out:
xfs_buf_relse(bp);
}
- xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
if(!bufwasdelwri) {
/*
@@ -3264,7 +3264,7 @@ xfs_iflush_int(
SPLDECL(s);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
- ASSERT(valusema(&ip->i_flock) <= 0);
+ ASSERT(issemalocked(&(ip->i_flock)));
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > ip->i_df.if_ext_max);
@@ -3504,7 +3504,7 @@ xfs_iflush_all(
xfs_mount_t *mp)
{
xfs_inode_t *ip;
- vnode_t *vp;
+ bhv_vnode_t *vp;
again:
XFS_MOUNT_ILOCK(mp);
@@ -4180,7 +4180,7 @@ xfs_iext_direct_to_inline(
*/
memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
nextents * sizeof(xfs_bmbt_rec_t));
- kmem_free(ifp->if_u1.if_extents, KM_SLEEP);
+ kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
ifp->if_real_bytes = 0;
}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 3b544db1790..d10b76ed1e5 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -102,9 +102,9 @@ typedef struct xfs_ifork {
#ifdef __KERNEL__
struct bhv_desc;
+struct bhv_vnode;
struct cred;
struct ktrace;
-struct vnode;
struct xfs_buf;
struct xfs_bmap_free;
struct xfs_bmbt_irec;
@@ -400,7 +400,7 @@ void xfs_chash_init(struct xfs_mount *);
void xfs_chash_free(struct xfs_mount *);
xfs_inode_t *xfs_inode_incore(struct xfs_mount *, xfs_ino_t,
struct xfs_trans *);
-void xfs_inode_lock_init(xfs_inode_t *, struct vnode *);
+void xfs_inode_lock_init(xfs_inode_t *, struct bhv_vnode *);
int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
uint, uint, xfs_inode_t **, xfs_daddr_t);
void xfs_iput(xfs_inode_t *, uint);
@@ -461,7 +461,7 @@ void xfs_ichgtime(xfs_inode_t *, int);
xfs_fsize_t xfs_file_last_byte(xfs_inode_t *);
void xfs_lock_inodes(xfs_inode_t **, int, int, uint);
-xfs_inode_t *xfs_vtoi(struct vnode *vp);
+xfs_inode_t *xfs_vtoi(struct bhv_vnode *vp);
void xfs_synchronize_atime(xfs_inode_t *);
@@ -509,7 +509,6 @@ extern struct kmem_zone *xfs_chashlist_zone;
extern struct kmem_zone *xfs_ifork_zone;
extern struct kmem_zone *xfs_inode_zone;
extern struct kmem_zone *xfs_ili_zone;
-extern struct vnodeops xfs_vnodeops;
#endif /* __KERNEL__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7497a481b2f..f8e80d8e723 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -25,7 +25,6 @@
#include "xfs_buf_item.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
@@ -33,7 +32,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -794,7 +792,7 @@ xfs_inode_item_pushbuf(
* inode flush completed and the inode was taken off the AIL.
* So, just get out.
*/
- if ((valusema(&(ip->i_flock)) > 0) ||
+ if (!issemalocked(&(ip->i_flock)) ||
((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -816,7 +814,7 @@ xfs_inode_item_pushbuf(
* If not, we can flush it async.
*/
dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
- (valusema(&(ip->i_flock)) <= 0));
+ issemalocked(&(ip->i_flock)));
iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
xfs_buftrace("INODE ITEM PUSH", bp);
@@ -864,7 +862,7 @@ xfs_inode_item_push(
ip = iip->ili_inode;
ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS));
- ASSERT(valusema(&(ip->i_flock)) <= 0);
+ ASSERT(issemalocked(&(ip->i_flock)));
/*
* Since we were able to lock the inode's flush lock and
* we found it on the AIL, the inode must be dirty. This
@@ -1084,3 +1082,52 @@ xfs_istale_done(
{
xfs_iflush_abort(iip->ili_inode);
}
+
+/*
+ * convert an xfs_inode_log_format struct from either 32 or 64 bit versions
+ * (which can have different field alignments) to the native version
+ */
+int
+xfs_inode_item_format_convert(
+ xfs_log_iovec_t *buf,
+ xfs_inode_log_format_t *in_f)
+{
+ if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
+ xfs_inode_log_format_32_t *in_f32;
+
+ in_f32 = (xfs_inode_log_format_32_t *)buf->i_addr;
+ in_f->ilf_type = in_f32->ilf_type;
+ in_f->ilf_size = in_f32->ilf_size;
+ in_f->ilf_fields = in_f32->ilf_fields;
+ in_f->ilf_asize = in_f32->ilf_asize;
+ in_f->ilf_dsize = in_f32->ilf_dsize;
+ in_f->ilf_ino = in_f32->ilf_ino;
+ /* copy biggest field of ilf_u */
+ memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
+ in_f32->ilf_u.ilfu_uuid.__u_bits,
+ sizeof(uuid_t));
+ in_f->ilf_blkno = in_f32->ilf_blkno;
+ in_f->ilf_len = in_f32->ilf_len;
+ in_f->ilf_boffset = in_f32->ilf_boffset;
+ return 0;
+ } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
+ xfs_inode_log_format_64_t *in_f64;
+
+ in_f64 = (xfs_inode_log_format_64_t *)buf->i_addr;
+ in_f->ilf_type = in_f64->ilf_type;
+ in_f->ilf_size = in_f64->ilf_size;
+ in_f->ilf_fields = in_f64->ilf_fields;
+ in_f->ilf_asize = in_f64->ilf_asize;
+ in_f->ilf_dsize = in_f64->ilf_dsize;
+ in_f->ilf_ino = in_f64->ilf_ino;
+ /* copy biggest field of ilf_u */
+ memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
+ in_f64->ilf_u.ilfu_uuid.__u_bits,
+ sizeof(uuid_t));
+ in_f->ilf_blkno = in_f64->ilf_blkno;
+ in_f->ilf_len = in_f64->ilf_len;
+ in_f->ilf_boffset = in_f64->ilf_boffset;
+ return 0;
+ }
+ return EFSCORRUPTED;
+}
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index c5dbf93b666..5db6cd1b4cf 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -23,25 +23,6 @@
* log. The size of the inline data/extents/b-tree root to be logged
* (if any) is indicated in the ilf_dsize field. Changes to this structure
* must be added on to the end.
- *
- * Convention for naming inode log item versions : The current version
- * is always named XFS_LI_INODE. When an inode log item gets superseded,
- * add the latest version of IRIX that will generate logs with that item
- * to the version name.
- *
- * -Version 1 of this structure (XFS_LI_5_3_INODE) included up to the first
- * union (ilf_u) field. This was released with IRIX 5.3-XFS.
- * -Version 2 of this structure (XFS_LI_6_1_INODE) is currently the entire
- * structure. This was released with IRIX 6.0.1-XFS and IRIX 6.1.
- * -Version 3 of this structure (XFS_LI_INODE) is the same as version 2
- * so a new structure definition wasn't necessary. However, we had
- * to add a new type because the inode cluster size changed from 4K
- * to 8K and the version number had to be rev'ved to keep older kernels
- * from trying to recover logs with the 8K buffers in them. The logging
- * code can handle recovery on different-sized clusters now so hopefully
- * this'll be the last time we need to change the inode log item just
- * for a change in the inode cluster size. This new version was
- * released with IRIX 6.2.
*/
typedef struct xfs_inode_log_format {
unsigned short ilf_type; /* inode log item type */
@@ -59,18 +40,38 @@ typedef struct xfs_inode_log_format {
int ilf_boffset; /* off of inode in buffer */
} xfs_inode_log_format_t;
-/* Initial version shipped with IRIX 5.3-XFS */
-typedef struct xfs_inode_log_format_v1 {
- unsigned short ilf_type; /* inode log item type */
- unsigned short ilf_size; /* size of this item */
- uint ilf_fields; /* flags for fields logged */
- uint ilf_dsize; /* size of data/ext/root */
- xfs_ino_t ilf_ino; /* inode number */
+typedef struct xfs_inode_log_format_32 {
+ unsigned short ilf_type; /* 16: inode log item type */
+ unsigned short ilf_size; /* 16: size of this item */
+ uint ilf_fields; /* 32: flags for fields logged */
+ ushort ilf_asize; /* 32: size of attr d/ext/root */
+ ushort ilf_dsize; /* 32: size of data/ext/root */
+ xfs_ino_t ilf_ino; /* 64: inode number */
union {
- xfs_dev_t ilfu_rdev; /* rdev value for dev inode*/
- uuid_t ilfu_uuid; /* mount point value */
+ xfs_dev_t ilfu_rdev; /* 32: rdev value for dev inode*/
+ uuid_t ilfu_uuid; /* 128: mount point value */
+ } ilf_u;
+ __int64_t ilf_blkno; /* 64: blkno of inode buffer */
+ int ilf_len; /* 32: len of inode buffer */
+ int ilf_boffset; /* 32: off of inode in buffer */
+} __attribute__((packed)) xfs_inode_log_format_32_t;
+
+typedef struct xfs_inode_log_format_64 {
+ unsigned short ilf_type; /* 16: inode log item type */
+ unsigned short ilf_size; /* 16: size of this item */
+ uint ilf_fields; /* 32: flags for fields logged */
+ ushort ilf_asize; /* 32: size of attr d/ext/root */
+ ushort ilf_dsize; /* 32: size of data/ext/root */
+ __uint32_t ilf_pad; /* 32: pad for 64 bit boundary */
+ xfs_ino_t ilf_ino; /* 64: inode number */
+ union {
+ xfs_dev_t ilfu_rdev; /* 32: rdev value for dev inode*/
+ uuid_t ilfu_uuid; /* 128: mount point value */
} ilf_u;
-} xfs_inode_log_format_t_v1;
+ __int64_t ilf_blkno; /* 64: blkno of inode buffer */
+ int ilf_len; /* 32: len of inode buffer */
+ int ilf_boffset; /* 32: off of inode in buffer */
+} xfs_inode_log_format_64_t;
/*
* Flags for xfs_trans_log_inode flags field.
@@ -172,6 +173,8 @@ extern void xfs_inode_item_destroy(struct xfs_inode *);
extern void xfs_iflush_done(struct xfs_buf *, xfs_inode_log_item_t *);
extern void xfs_istale_done(struct xfs_buf *, xfs_inode_log_item_t *);
extern void xfs_iflush_abort(struct xfs_inode *);
+extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
+ xfs_inode_log_format_t *);
#endif /* __KERNEL__ */
diff --git a/fs/xfs/xfs_iocore.c b/fs/xfs/xfs_iocore.c
index a07815661a8..06d710c9ce4 100644
--- a/fs/xfs/xfs_iocore.c
+++ b/fs/xfs/xfs_iocore.c
@@ -24,14 +24,13 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
+#include "xfs_dfrag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -58,7 +57,7 @@ xfs_size_fn(
STATIC int
xfs_ioinit(
- struct vfs *vfsp,
+ struct bhv_vfs *vfsp,
struct xfs_mount_args *mntargs,
int flags)
{
@@ -68,6 +67,7 @@ xfs_ioinit(
xfs_ioops_t xfs_iocore_xfs = {
.xfs_ioinit = (xfs_ioinit_t) xfs_ioinit,
.xfs_bmapi_func = (xfs_bmapi_t) xfs_bmapi,
+ .xfs_bunmapi_func = (xfs_bunmapi_t) xfs_bunmapi,
.xfs_bmap_eof_func = (xfs_bmap_eof_t) xfs_bmap_eof,
.xfs_iomap_write_direct =
(xfs_iomap_write_direct_t) xfs_iomap_write_direct,
@@ -84,6 +84,7 @@ xfs_ioops_t xfs_iocore_xfs = {
.xfs_unlock = (xfs_unlk_t) xfs_iunlock,
.xfs_size_func = (xfs_size_t) xfs_size_fn,
.xfs_iodone = (xfs_iodone_t) fs_noerr,
+ .xfs_swap_extents_func = (xfs_swap_extents_t) xfs_swap_extents,
};
void
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d5dfedcb892..f1949c16df1 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -252,7 +250,7 @@ xfs_iomap(
error = XFS_BMAPI(mp, NULL, io, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb),
bmapi_flags, NULL, 0, &imap,
- &nimaps, NULL);
+ &nimaps, NULL, NULL);
if (error)
goto out;
@@ -519,8 +517,8 @@ xfs_iomap_write_direct(
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
nimaps = 1;
- error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
- bmapi_flag, &firstfsb, 0, &imap, &nimaps, &free_list);
+ error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb, bmapi_flag,
+ &firstfsb, 0, &imap, &nimaps, &free_list, NULL);
if (error)
goto error0;
@@ -610,8 +608,8 @@ xfs_iomap_eof_want_preallocate(
while (count_fsb > 0) {
imaps = nimaps;
firstblock = NULLFSBLOCK;
- error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
- 0, &firstblock, 0, imap, &imaps, NULL);
+ error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb, 0,
+ &firstblock, 0, imap, &imaps, NULL, NULL);
if (error)
return error;
for (n = 0; n < imaps; n++) {
@@ -695,11 +693,11 @@ retry:
nimaps = XFS_WRITE_IMAPS;
firstblock = NULLFSBLOCK;
- error = xfs_bmapi(NULL, ip, offset_fsb,
+ error = XFS_BMAPI(mp, NULL, io, offset_fsb,
(xfs_filblks_t)(last_fsb - offset_fsb),
XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
- &nimaps, NULL);
+ &nimaps, NULL, NULL);
if (error && (error != ENOSPC))
return XFS_ERROR(error);
@@ -832,9 +830,9 @@ xfs_iomap_write_allocate(
}
/* Go get the actual blocks */
- error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,
+ error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb,
XFS_BMAPI_WRITE, &first_block, 1,
- imap, &nimaps, &free_list);
+ imap, &nimaps, &free_list, NULL);
if (error)
goto trans_cancel;
@@ -955,9 +953,9 @@ xfs_iomap_write_unwritten(
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
nimaps = 1;
- error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
+ error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb,
XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
- 1, &imap, &nimaps, &free_list);
+ 1, &imap, &nimaps, &free_list, NULL);
if (error)
goto error_on_bmapi_transaction;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 94068d014f2..46249e4d1fe 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -41,11 +39,6 @@
#include "xfs_error.h"
#include "xfs_btree.h"
-#ifndef HAVE_USERACC
-#define useracc(ubuffer, size, flags, foo) (0)
-#define unuseracc(ubuffer, size, flags)
-#endif
-
STATIC int
xfs_bulkstat_one_iget(
xfs_mount_t *mp, /* mount point for filesystem */
@@ -56,7 +49,7 @@ xfs_bulkstat_one_iget(
{
xfs_dinode_core_t *dic; /* dinode core info pointer */
xfs_inode_t *ip; /* incore inode pointer */
- vnode_t *vp;
+ bhv_vnode_t *vp;
int error;
error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno);
@@ -336,15 +329,6 @@ xfs_bulkstat(
nimask = ~(nicluster - 1);
nbcluster = nicluster >> mp->m_sb.sb_inopblog;
/*
- * Lock down the user's buffer. If a buffer was not sent, as in the case
- * disk quota code calls here, we skip this.
- */
- if (ubuffer &&
- (error = useracc(ubuffer, ubcount * statstruct_size,
- (B_READ|B_PHYS), NULL))) {
- return error;
- }
- /*
* Allocate a page-sized buffer for inode btree records.
* We could try allocating something smaller, but for normal
* calls we'll always (potentially) need the whole page.
@@ -650,8 +634,6 @@ xfs_bulkstat(
* Done, we're either out of filesystem or space to put the data.
*/
kmem_free(irbuf, NBPC);
- if (ubuffer)
- unuseracc(ubuffer, ubcount * statstruct_size, (B_READ|B_PHYS));
*ubcountp = ubelem;
if (agno >= mp->m_sb.sb_agcount) {
/*
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 11eb4e1b18c..be5f12e07d2 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -45,7 +45,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
*/
#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */
#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */
-#define BULKSTAT_FG_VFSLOCKED 0x4 /* Already have vfs lock */
/*
* Return stat information in bulk (by-inode) for the filesystem.
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 32e841d2f26..d8f5d4cbe8b 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
@@ -36,7 +35,6 @@
#include "xfs_ialloc_btree.h"
#include "xfs_log_recover.h"
#include "xfs_trans_priv.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -402,7 +400,7 @@ xfs_log_release_iclog(xfs_mount_t *mp,
xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl;
if (xlog_state_release_iclog(log, iclog)) {
- xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
+ xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
return EIO;
}
@@ -498,9 +496,8 @@ xfs_log_mount(xfs_mount_t *mp,
* just worked.
*/
if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
- int error;
- vfs_t *vfsp = XFS_MTOVFS(mp);
- int readonly = (vfsp->vfs_flag & VFS_RDONLY);
+ bhv_vfs_t *vfsp = XFS_MTOVFS(mp);
+ int error, readonly = (vfsp->vfs_flag & VFS_RDONLY);
if (readonly)
vfsp->vfs_flag &= ~VFS_RDONLY;
@@ -726,7 +723,7 @@ xfs_log_write(xfs_mount_t * mp,
return XFS_ERROR(EIO);
if ((error = xlog_write(mp, reg, nentries, tic, start_lsn, NULL, 0))) {
- xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
+ xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
}
return error;
} /* xfs_log_write */
@@ -816,9 +813,9 @@ xfs_log_need_covered(xfs_mount_t *mp)
SPLDECL(s);
int needed = 0, gen;
xlog_t *log = mp->m_log;
- vfs_t *vfsp = XFS_MTOVFS(mp);
+ bhv_vfs_t *vfsp = XFS_MTOVFS(mp);
- if (fs_frozen(vfsp) || XFS_FORCED_SHUTDOWN(mp) ||
+ if (vfs_test_for_freeze(vfsp) || XFS_FORCED_SHUTDOWN(mp) ||
(vfsp->vfs_flag & VFS_RDONLY))
return 0;
@@ -956,7 +953,7 @@ xlog_iodone(xfs_buf_t *bp)
XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp));
XFS_BUF_STALE(bp);
- xfs_force_shutdown(l->l_mp, XFS_LOG_IO_ERROR);
+ xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR);
/*
* This flag will be propagated to the trans-committed
* callback routines to let them know that the log-commit
@@ -1261,7 +1258,7 @@ xlog_commit_record(xfs_mount_t *mp,
ASSERT_ALWAYS(iclog);
if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
iclog, XLOG_COMMIT_TRANS))) {
- xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
+ xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
}
return error;
} /* xlog_commit_record */
@@ -1790,7 +1787,7 @@ xlog_write(xfs_mount_t * mp,
xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, mp,
"xfs_log_write: reservation ran out. Need to up reservation");
/* If we did not panic, shutdown the filesystem */
- xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
#endif
} else
ticket->t_curr_res -= len;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1f0016b0b4e..55b4237c215 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -193,14 +191,14 @@ xlog_header_check_dump(
{
int b;
- printk("%s: SB : uuid = ", __FUNCTION__);
+ cmn_err(CE_DEBUG, "%s: SB : uuid = ", __FUNCTION__);
for (b = 0; b < 16; b++)
- printk("%02x",((unsigned char *)&mp->m_sb.sb_uuid)[b]);
- printk(", fmt = %d\n", XLOG_FMT);
- printk(" log : uuid = ");
+ cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]);
+ cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
+ cmn_err(CE_DEBUG, " log : uuid = ");
for (b = 0; b < 16; b++)
- printk("%02x",((unsigned char *)&head->h_fs_uuid)[b]);
- printk(", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
+ cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]);
+ cmn_err(CE_DEBUG, ", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
}
#else
#define xlog_header_check_dump(mp, head)
@@ -282,7 +280,7 @@ xlog_recover_iodone(
mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *);
xfs_ioerror_alert("xlog_recover_iodone",
mp, bp, XFS_BUF_ADDR(bp));
- xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
}
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1889,7 +1887,7 @@ xlog_recover_do_inode_buffer(
buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
next_unlinked_offset);
- INT_SET(*buffer_nextp, ARCH_CONVERT, *logged_nextp);
+ *buffer_nextp = *logged_nextp;
}
return 0;
@@ -2292,12 +2290,22 @@ xlog_recover_do_inode_trans(
int attr_index;
uint fields;
xfs_dinode_core_t *dicp;
+ int need_free = 0;
if (pass == XLOG_RECOVER_PASS1) {
return 0;
}
- in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
+ if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
+ in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
+ } else {
+ in_f = (xfs_inode_log_format_t *)kmem_alloc(
+ sizeof(xfs_inode_log_format_t), KM_SLEEP);
+ need_free = 1;
+ error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
+ if (error)
+ goto error;
+ }
ino = in_f->ilf_ino;
mp = log->l_mp;
if (ITEM_TYPE(item) == XFS_LI_INODE) {
@@ -2323,8 +2331,10 @@ xlog_recover_do_inode_trans(
* Inode buffers can be freed, look out for it,
* and do not replay the inode.
*/
- if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0))
- return 0;
+ if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0)) {
+ error = 0;
+ goto error;
+ }
bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len,
XFS_BUF_LOCK);
@@ -2333,7 +2343,7 @@ xlog_recover_do_inode_trans(
bp, imap.im_blkno);
error = XFS_BUF_GETERROR(bp);
xfs_buf_relse(bp);
- return error;
+ goto error;
}
error = 0;
ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
@@ -2350,7 +2360,8 @@ xlog_recover_do_inode_trans(
dip, bp, ino);
XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
+ error = EFSCORRUPTED;
+ goto error;
}
dicp = (xfs_dinode_core_t*)(item->ri_buf[1].i_addr);
if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
@@ -2360,7 +2371,8 @@ xlog_recover_do_inode_trans(
item, ino);
XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
+ error = EFSCORRUPTED;
+ goto error;
}
/* Skip replay when the on disk inode is newer than the log one */
@@ -2376,7 +2388,8 @@ xlog_recover_do_inode_trans(
/* do nothing */
} else {
xfs_buf_relse(bp);
- return 0;
+ error = 0;
+ goto error;
}
}
/* Take the opportunity to reset the flush iteration count */
@@ -2391,7 +2404,8 @@ xlog_recover_do_inode_trans(
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
item, dip, bp, ino);
- return XFS_ERROR(EFSCORRUPTED);
+ error = EFSCORRUPTED;
+ goto error;
}
} else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
@@ -2403,7 +2417,8 @@ xlog_recover_do_inode_trans(
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
item, dip, bp, ino);
- return XFS_ERROR(EFSCORRUPTED);
+ error = EFSCORRUPTED;
+ goto error;
}
}
if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
@@ -2415,7 +2430,8 @@ xlog_recover_do_inode_trans(
item, dip, bp, ino,
dicp->di_nextents + dicp->di_anextents,
dicp->di_nblocks);
- return XFS_ERROR(EFSCORRUPTED);
+ error = EFSCORRUPTED;
+ goto error;
}
if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
@@ -2424,7 +2440,8 @@ xlog_recover_do_inode_trans(
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
item, dip, bp, ino, dicp->di_forkoff);
- return XFS_ERROR(EFSCORRUPTED);
+ error = EFSCORRUPTED;
+ goto error;
}
if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) {
XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
@@ -2433,7 +2450,8 @@ xlog_recover_do_inode_trans(
xfs_fs_cmn_err(CE_ALERT, mp,
"xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
item->ri_buf[1].i_len, item);
- return XFS_ERROR(EFSCORRUPTED);
+ error = EFSCORRUPTED;
+ goto error;
}
/* The core is in in-core format */
@@ -2521,7 +2539,8 @@ xlog_recover_do_inode_trans(
xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
ASSERT(0);
xfs_buf_relse(bp);
- return XFS_ERROR(EIO);
+ error = EIO;
+ goto error;
}
}
@@ -2537,7 +2556,10 @@ write_inode_buffer:
error = xfs_bwrite(mp, bp);
}
- return (error);
+error:
+ if (need_free)
+ kmem_free(in_f, sizeof(*in_f));
+ return XFS_ERROR(error);
}
/*
@@ -2674,32 +2696,32 @@ xlog_recover_do_dquot_trans(
* structure into it, and adds the efi to the AIL with the given
* LSN.
*/
-STATIC void
+STATIC int
xlog_recover_do_efi_trans(
xlog_t *log,
xlog_recover_item_t *item,
xfs_lsn_t lsn,
int pass)
{
+ int error;
xfs_mount_t *mp;
xfs_efi_log_item_t *efip;
xfs_efi_log_format_t *efi_formatp;
SPLDECL(s);
if (pass == XLOG_RECOVER_PASS1) {
- return;
+ return 0;
}
efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
- ASSERT(item->ri_buf[0].i_len ==
- (sizeof(xfs_efi_log_format_t) +
- ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t))));
mp = log->l_mp;
efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
- memcpy((char *)&(efip->efi_format), (char *)efi_formatp,
- sizeof(xfs_efi_log_format_t) +
- ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t)));
+ if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
+ &(efip->efi_format)))) {
+ xfs_efi_item_free(efip);
+ return error;
+ }
efip->efi_next_extent = efi_formatp->efi_nextents;
efip->efi_flags |= XFS_EFI_COMMITTED;
@@ -2708,6 +2730,7 @@ xlog_recover_do_efi_trans(
* xfs_trans_update_ail() drops the AIL lock.
*/
xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s);
+ return 0;
}
@@ -2738,9 +2761,10 @@ xlog_recover_do_efd_trans(
}
efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
- ASSERT(item->ri_buf[0].i_len ==
- (sizeof(xfs_efd_log_format_t) +
- ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_t))));
+ ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
+ ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
+ (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
+ ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
efi_id = efd_formatp->efd_efi_id;
/*
@@ -2810,15 +2834,14 @@ xlog_recover_do_trans(
if ((error = xlog_recover_do_buffer_trans(log, item,
pass)))
break;
- } else if ((ITEM_TYPE(item) == XFS_LI_INODE) ||
- (ITEM_TYPE(item) == XFS_LI_6_1_INODE) ||
- (ITEM_TYPE(item) == XFS_LI_5_3_INODE)) {
+ } else if ((ITEM_TYPE(item) == XFS_LI_INODE)) {
if ((error = xlog_recover_do_inode_trans(log, item,
pass)))
break;
} else if (ITEM_TYPE(item) == XFS_LI_EFI) {
- xlog_recover_do_efi_trans(log, item, trans->r_lsn,
- pass);
+ if ((error = xlog_recover_do_efi_trans(log, item, trans->r_lsn,
+ pass)))
+ break;
} else if (ITEM_TYPE(item) == XFS_LI_EFD) {
xlog_recover_do_efd_trans(log, item, pass);
} else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
@@ -3419,13 +3442,13 @@ xlog_unpack_data_checksum(
if (rhead->h_chksum ||
((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
cmn_err(CE_DEBUG,
- "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)",
+ "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum);
cmn_err(CE_DEBUG,
"XFS: Disregard message if filesystem was created with non-DEBUG kernel");
if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
cmn_err(CE_DEBUG,
- "XFS: LogR this is a LogV2 filesystem");
+ "XFS: LogR this is a LogV2 filesystem\n");
}
log->l_flags |= XLOG_CHKSUM_MISMATCH;
}
@@ -3798,7 +3821,7 @@ xlog_do_log_recovery(
error = xlog_do_recovery_pass(log, head_blk, tail_blk,
XLOG_RECOVER_PASS2);
#ifdef DEBUG
- {
+ if (!error) {
int i;
for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
@@ -3974,7 +3997,7 @@ xlog_recover_finish(
log->l_flags &= ~XLOG_RECOVERY_NEEDED;
} else {
cmn_err(CE_DEBUG,
- "!Ending clean XFS mount for filesystem: %s",
+ "!Ending clean XFS mount for filesystem: %s\n",
log->l_mp->m_fsname);
}
return 0;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index c0b1c290688..10dbf203c62 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -196,7 +194,7 @@ xfs_mount_free(
kmem_free(mp->m_logname, strlen(mp->m_logname) + 1);
if (remove_bhv) {
- struct vfs *vfsp = XFS_MTOVFS(mp);
+ struct bhv_vfs *vfsp = XFS_MTOVFS(mp);
bhv_remove_all_vfsops(vfsp, 0);
VFS_REMOVEBHV(vfsp, &mp->m_bhv);
@@ -337,7 +335,7 @@ xfs_mount_validate_sb(
xfs_agnumber_t
xfs_initialize_perag(
- struct vfs *vfs,
+ bhv_vfs_t *vfs,
xfs_mount_t *mp,
xfs_agnumber_t agcount)
{
@@ -651,14 +649,14 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
*/
int
xfs_mountfs(
- vfs_t *vfsp,
+ bhv_vfs_t *vfsp,
xfs_mount_t *mp,
int mfsi_flags)
{
xfs_buf_t *bp;
xfs_sb_t *sbp = &(mp->m_sb);
xfs_inode_t *rip;
- vnode_t *rvp = NULL;
+ bhv_vnode_t *rvp = NULL;
int readio_log, writeio_log;
xfs_daddr_t d;
__uint64_t ret64;
@@ -934,18 +932,7 @@ xfs_mountfs(
vfsp->vfs_altfsid = (xfs_fsid_t *)mp->m_fixedfsid;
mp->m_dmevmask = 0; /* not persistent; set after each mount */
- /*
- * Select the right directory manager.
- */
- mp->m_dirops =
- XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ?
- xfsv2_dirops :
- xfsv1_dirops;
-
- /*
- * Initialize directory manager's entries.
- */
- XFS_DIR_MOUNT(mp);
+ xfs_dir_mount(mp);
/*
* Initialize the attribute manager's entries.
@@ -1006,8 +993,9 @@ xfs_mountfs(
if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) {
cmn_err(CE_WARN, "XFS: corrupted root inode");
- prdev("Root inode %llu is not a directory",
- mp->m_ddev_targp, (unsigned long long)rip->i_ino);
+ cmn_err(CE_WARN, "Device %s - root %llu is not a directory",
+ XFS_BUFTARG_NAME(mp->m_ddev_targp),
+ (unsigned long long)rip->i_ino);
xfs_iunlock(rip, XFS_ILOCK_EXCL);
XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
mp);
@@ -1094,7 +1082,7 @@ xfs_mountfs(
int
xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
{
- struct vfs *vfsp = XFS_MTOVFS(mp);
+ struct bhv_vfs *vfsp = XFS_MTOVFS(mp);
#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
int64_t fsid;
#endif
@@ -1254,6 +1242,26 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
xfs_trans_log_buf(tp, bp, first, last);
}
+
+/*
+ * In order to avoid ENOSPC-related deadlock caused by
+ * out-of-order locking of AGF buffer (PV 947395), we place
+ * constraints on the relationship among actual allocations for
+ * data blocks, freelist blocks, and potential file data bmap
+ * btree blocks. However, these restrictions may result in no
+ * actual space allocated for a delayed extent, for example, a data
+ * block in a certain AG is allocated but there is no additional
+ * block for the additional bmap btree block due to a split of the
+ * bmap btree of the file. The result of this may lead to an
+ * infinite loop in xfssyncd when the file gets flushed to disk and
+ * all delayed extents need to be actually allocated. To get around
+ * this, we explicitly set aside a few blocks which will not be
+ * reserved in delayed allocation. Considering the minimum number of
+ * needed freelist blocks is 4 fsbs, a potential split of file's bmap
+ * btree requires 1 fsb, so we set the number of set-aside blocks to 8.
+*/
+#define SET_ASIDE_BLOCKS 8
+
/*
* xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
* a delta to a specified field in the in-core superblock. Simply
@@ -1298,7 +1306,7 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
return 0;
case XFS_SBS_FDBLOCKS:
- lcounter = (long long)mp->m_sb.sb_fdblocks;
+ lcounter = (long long)mp->m_sb.sb_fdblocks - SET_ASIDE_BLOCKS;
res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
if (delta > 0) { /* Putting blocks back */
@@ -1332,7 +1340,7 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
}
}
- mp->m_sb.sb_fdblocks = lcounter;
+ mp->m_sb.sb_fdblocks = lcounter + SET_ASIDE_BLOCKS;
return 0;
case XFS_SBS_FREXTENTS:
lcounter = (long long)mp->m_sb.sb_frextents;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 668ad23fd37..b2bd4be4200 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -53,8 +53,8 @@ typedef struct xfs_trans_reservations {
#else
struct cred;
struct log;
-struct vfs;
-struct vnode;
+struct bhv_vfs;
+struct bhv_vnode;
struct xfs_mount_args;
struct xfs_ihash;
struct xfs_chash;
@@ -63,9 +63,11 @@ struct xfs_perag;
struct xfs_iocore;
struct xfs_bmbt_irec;
struct xfs_bmap_free;
+struct xfs_extdelta;
+struct xfs_swapext;
-extern struct vfsops xfs_vfsops;
-extern struct vnodeops xfs_vnodeops;
+extern struct bhv_vfsops xfs_vfsops;
+extern struct bhv_vnodeops xfs_vnodeops;
#define AIL_LOCK_T lock_t
#define AIL_LOCKINIT(x,y) spinlock_init(x,y)
@@ -78,15 +80,15 @@ extern struct vnodeops xfs_vnodeops;
* Prototypes and functions for the Data Migration subsystem.
*/
-typedef int (*xfs_send_data_t)(int, struct vnode *,
- xfs_off_t, size_t, int, vrwlock_t *);
+typedef int (*xfs_send_data_t)(int, struct bhv_vnode *,
+ xfs_off_t, size_t, int, bhv_vrwlock_t *);
typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint);
-typedef int (*xfs_send_destroy_t)(struct vnode *, dm_right_t);
-typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct vfs *,
- struct vnode *,
- dm_right_t, struct vnode *, dm_right_t,
+typedef int (*xfs_send_destroy_t)(struct bhv_vnode *, dm_right_t);
+typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct bhv_vfs *,
+ struct bhv_vnode *,
+ dm_right_t, struct bhv_vnode *, dm_right_t,
char *, char *, mode_t, int, int);
-typedef void (*xfs_send_unmount_t)(struct vfs *, struct vnode *,
+typedef void (*xfs_send_unmount_t)(struct bhv_vfs *, struct bhv_vnode *,
dm_right_t, mode_t, int, int);
typedef struct xfs_dmops {
@@ -188,13 +190,18 @@ typedef struct xfs_qmops {
* Prototypes and functions for I/O core modularization.
*/
-typedef int (*xfs_ioinit_t)(struct vfs *,
+typedef int (*xfs_ioinit_t)(struct bhv_vfs *,
struct xfs_mount_args *, int);
typedef int (*xfs_bmapi_t)(struct xfs_trans *, void *,
xfs_fileoff_t, xfs_filblks_t, int,
xfs_fsblock_t *, xfs_extlen_t,
struct xfs_bmbt_irec *, int *,
- struct xfs_bmap_free *);
+ struct xfs_bmap_free *, struct xfs_extdelta *);
+typedef int (*xfs_bunmapi_t)(struct xfs_trans *,
+ void *, xfs_fileoff_t,
+ xfs_filblks_t, int, xfs_extnum_t,
+ xfs_fsblock_t *, struct xfs_bmap_free *,
+ struct xfs_extdelta *, int *);
typedef int (*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *);
typedef int (*xfs_iomap_write_direct_t)(
void *, xfs_off_t, size_t, int,
@@ -213,11 +220,14 @@ typedef void (*xfs_lock_demote_t)(void *, uint);
typedef int (*xfs_lock_nowait_t)(void *, uint);
typedef void (*xfs_unlk_t)(void *, unsigned int);
typedef xfs_fsize_t (*xfs_size_t)(void *);
-typedef xfs_fsize_t (*xfs_iodone_t)(struct vfs *);
+typedef xfs_fsize_t (*xfs_iodone_t)(struct bhv_vfs *);
+typedef int (*xfs_swap_extents_t)(void *, void *,
+ struct xfs_swapext*);
typedef struct xfs_ioops {
xfs_ioinit_t xfs_ioinit;
xfs_bmapi_t xfs_bmapi_func;
+ xfs_bunmapi_t xfs_bunmapi_func;
xfs_bmap_eof_t xfs_bmap_eof_func;
xfs_iomap_write_direct_t xfs_iomap_write_direct;
xfs_iomap_write_delay_t xfs_iomap_write_delay;
@@ -230,13 +240,17 @@ typedef struct xfs_ioops {
xfs_unlk_t xfs_unlock;
xfs_size_t xfs_size_func;
xfs_iodone_t xfs_iodone;
+ xfs_swap_extents_t xfs_swap_extents_func;
} xfs_ioops_t;
#define XFS_IOINIT(vfsp, args, flags) \
(*(mp)->m_io_ops.xfs_ioinit)(vfsp, args, flags)
-#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist) \
+#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist,delta) \
(*(mp)->m_io_ops.xfs_bmapi_func) \
- (trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist)
+ (trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist,delta)
+#define XFS_BUNMAPI(mp, trans,io,bno,len,f,nexts,first,flist,delta,done) \
+ (*(mp)->m_io_ops.xfs_bunmapi_func) \
+ (trans,(io)->io_obj,bno,len,f,nexts,first,flist,delta,done)
#define XFS_BMAP_EOF(mp, io, endoff, whichfork, eof) \
(*(mp)->m_io_ops.xfs_bmap_eof_func) \
((io)->io_obj, endoff, whichfork, eof)
@@ -266,6 +280,9 @@ typedef struct xfs_ioops {
(*(mp)->m_io_ops.xfs_size_func)((io)->io_obj)
#define XFS_IODONE(vfsp) \
(*(mp)->m_io_ops.xfs_iodone)(vfsp)
+#define XFS_SWAP_EXTENTS(mp, io, tio, sxp) \
+ (*(mp)->m_io_ops.xfs_swap_extents_func) \
+ ((io)->io_obj, (tio)->io_obj, sxp)
#ifdef HAVE_PERCPU_SB
@@ -386,8 +403,6 @@ typedef struct xfs_mount {
__uint8_t m_inode_quiesce;/* call quiesce on new inodes.
field governed by m_ilock */
__uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
- __uint8_t m_dirversion; /* 1 or 2 */
- xfs_dirops_t m_dirops; /* table of dir funcs */
int m_dirblksize; /* directory block sz--bytes */
int m_dirblkfsbs; /* directory block sz--fsbs */
xfs_dablk_t m_dirdatablk; /* blockno of dir data v2 */
@@ -494,16 +509,7 @@ xfs_preferred_iosize(xfs_mount_t *mp)
#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
#define xfs_force_shutdown(m,f) \
- VFS_FORCE_SHUTDOWN((XFS_MTOVFS(m)), f, __FILE__, __LINE__)
-
-/*
- * Flags sent to xfs_force_shutdown.
- */
-#define XFS_METADATA_IO_ERROR 0x1
-#define XFS_LOG_IO_ERROR 0x2
-#define XFS_FORCE_UMOUNT 0x4
-#define XFS_CORRUPT_INCORE 0x8 /* Corrupt in-memory data structures */
-#define XFS_SHUTDOWN_REMOTE_REQ 0x10 /* Shutdown came from remote cell */
+ bhv_vfs_force_shutdown((XFS_MTOVFS(m)), f, __FILE__, __LINE__)
/*
* Flags for xfs_mountfs
@@ -521,7 +527,7 @@ xfs_preferred_iosize(xfs_mount_t *mp)
* Macros for getting from mount to vfs and back.
*/
#define XFS_MTOVFS(mp) xfs_mtovfs(mp)
-static inline struct vfs *xfs_mtovfs(xfs_mount_t *mp)
+static inline struct bhv_vfs *xfs_mtovfs(xfs_mount_t *mp)
{
return bhvtovfs(&mp->m_bhv);
}
@@ -533,7 +539,7 @@ static inline xfs_mount_t *xfs_bhvtom(bhv_desc_t *bdp)
}
#define XFS_VFSTOM(vfs) xfs_vfstom(vfs)
-static inline xfs_mount_t *xfs_vfstom(vfs_t *vfs)
+static inline xfs_mount_t *xfs_vfstom(bhv_vfs_t *vfs)
{
return XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfs), &xfs_vfsops));
}
@@ -571,7 +577,7 @@ typedef struct xfs_mod_sb {
extern xfs_mount_t *xfs_mount_init(void);
extern void xfs_mod_sb(xfs_trans_t *, __int64_t);
extern void xfs_mount_free(xfs_mount_t *mp, int remove_bhv);
-extern int xfs_mountfs(struct vfs *, xfs_mount_t *mp, int);
+extern int xfs_mountfs(struct bhv_vfs *, xfs_mount_t *mp, int);
extern void xfs_mountfs_check_barriers(xfs_mount_t *mp);
extern int xfs_unmountfs(xfs_mount_t *, struct cred *);
@@ -589,7 +595,7 @@ extern void xfs_freesb(xfs_mount_t *);
extern void xfs_do_force_shutdown(bhv_desc_t *, int, char *, int);
extern int xfs_syncsub(xfs_mount_t *, int, int, int *);
extern int xfs_sync_inodes(xfs_mount_t *, int, int, int *);
-extern xfs_agnumber_t xfs_initialize_perag(struct vfs *, xfs_mount_t *,
+extern xfs_agnumber_t xfs_initialize_perag(struct bhv_vfs *, xfs_mount_t *,
xfs_agnumber_t);
extern void xfs_xlatesb(void *, struct xfs_sb *, int, __int64_t);
diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c
index 1408a32eef8..320d63ff9ca 100644
--- a/fs/xfs/xfs_qmops.c
+++ b/fs/xfs/xfs_qmops.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 7fbef974bce..acb853b33eb 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -365,7 +365,7 @@ typedef struct xfs_dqtrxops {
extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *);
extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
-extern struct bhv_vfsops xfs_qmops;
+extern struct bhv_module_vfsops xfs_qmops;
#endif /* __KERNEL__ */
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 1f148762eb2..d98171deaa1 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -22,13 +22,11 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -40,7 +38,6 @@
#include "xfs_refcache.h"
#include "xfs_utils.h"
#include "xfs_trans_space.h"
-#include "xfs_dir_leaf.h"
/*
@@ -87,8 +84,8 @@ STATIC int
xfs_lock_for_rename(
xfs_inode_t *dp1, /* old (source) directory inode */
xfs_inode_t *dp2, /* new (target) directory inode */
- vname_t *vname1,/* old entry name */
- vname_t *vname2,/* new entry name */
+ bhv_vname_t *vname1,/* old entry name */
+ bhv_vname_t *vname2,/* new entry name */
xfs_inode_t **ipp1, /* inode of old entry */
xfs_inode_t **ipp2, /* inode of new entry, if it
already exists, NULL otherwise. */
@@ -225,9 +222,9 @@ xfs_lock_for_rename(
int
xfs_rename(
bhv_desc_t *src_dir_bdp,
- vname_t *src_vname,
- vnode_t *target_dir_vp,
- vname_t *target_vname,
+ bhv_vname_t *src_vname,
+ bhv_vnode_t *target_dir_vp,
+ bhv_vname_t *target_vname,
cred_t *credp)
{
xfs_trans_t *tp;
@@ -242,7 +239,7 @@ xfs_rename(
int committed;
xfs_inode_t *inodes[4];
int target_ip_dropped = 0; /* dropped target_ip link? */
- vnode_t *src_dir_vp;
+ bhv_vnode_t *src_dir_vp;
int spaceres;
int target_link_zero = 0;
int num_inodes;
@@ -398,34 +395,29 @@ xfs_rename(
* fit before actually inserting it.
*/
if (spaceres == 0 &&
- (error = XFS_DIR_CANENTER(mp, tp, target_dp, target_name,
- target_namelen))) {
+ (error = xfs_dir_canenter(tp, target_dp, target_name,
+ target_namelen)))
goto error_return;
- }
/*
* If target does not exist and the rename crosses
* directories, adjust the target directory link count
* to account for the ".." reference from the new entry.
*/
- error = XFS_DIR_CREATENAME(mp, tp, target_dp, target_name,
+ error = xfs_dir_createname(tp, target_dp, target_name,
target_namelen, src_ip->i_ino,
&first_block, &free_list, spaceres);
- if (error == ENOSPC) {
+ if (error == ENOSPC)
goto error_return;
- }
- if (error) {
+ if (error)
goto abort_return;
- }
xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
if (new_parent && src_is_directory) {
error = xfs_bumplink(tp, target_dp);
- if (error) {
+ if (error)
goto abort_return;
- }
}
} else { /* target_ip != NULL */
-
/*
* If target exists and it's a directory, check that both
* target and source are directories and that target can be
@@ -435,7 +427,7 @@ xfs_rename(
/*
* Make sure target dir is empty.
*/
- if (!(XFS_DIR_ISEMPTY(target_ip->i_mount, target_ip)) ||
+ if (!(xfs_dir_isempty(target_ip)) ||
(target_ip->i_d.di_nlink > 2)) {
error = XFS_ERROR(EEXIST);
goto error_return;
@@ -451,12 +443,11 @@ xfs_rename(
* In case there is already an entry with the same
* name at the destination directory, remove it first.
*/
- error = XFS_DIR_REPLACE(mp, tp, target_dp, target_name,
- target_namelen, src_ip->i_ino, &first_block,
- &free_list, spaceres);
- if (error) {
+ error = xfs_dir_replace(tp, target_dp, target_name,
+ target_namelen, src_ip->i_ino,
+ &first_block, &free_list, spaceres);
+ if (error)
goto abort_return;
- }
xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
/*
@@ -464,9 +455,8 @@ xfs_rename(
* dir no longer points to it.
*/
error = xfs_droplink(tp, target_ip);
- if (error) {
+ if (error)
goto abort_return;
- }
target_ip_dropped = 1;
if (src_is_directory) {
@@ -474,9 +464,8 @@ xfs_rename(
* Drop the link from the old "." entry.
*/
error = xfs_droplink(tp, target_ip);
- if (error) {
+ if (error)
goto abort_return;
- }
}
/* Do this test while we still hold the locks */
@@ -488,18 +477,15 @@ xfs_rename(
* Remove the source.
*/
if (new_parent && src_is_directory) {
-
/*
* Rewrite the ".." entry to point to the new
* directory.
*/
- error = XFS_DIR_REPLACE(mp, tp, src_ip, "..", 2,
- target_dp->i_ino, &first_block,
- &free_list, spaceres);
+ error = xfs_dir_replace(tp, src_ip, "..", 2, target_dp->i_ino,
+ &first_block, &free_list, spaceres);
ASSERT(error != EEXIST);
- if (error) {
+ if (error)
goto abort_return;
- }
xfs_ichgtime(src_ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
} else {
@@ -527,16 +513,14 @@ xfs_rename(
* entry that's moved no longer points to it.
*/
error = xfs_droplink(tp, src_dp);
- if (error) {
+ if (error)
goto abort_return;
- }
}
- error = XFS_DIR_REMOVENAME(mp, tp, src_dp, src_name, src_namelen,
+ error = xfs_dir_removename(tp, src_dp, src_name, src_namelen,
src_ip->i_ino, &first_block, &free_list, spaceres);
- if (error) {
+ if (error)
goto abort_return;
- }
xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
/*
@@ -609,7 +593,7 @@ xfs_rename(
* Let interposed file systems know about removed links.
*/
if (target_ip_dropped) {
- VOP_LINK_REMOVED(XFS_ITOV(target_ip), target_dir_vp,
+ bhv_vop_link_removed(XFS_ITOV(target_ip), target_dir_vp,
target_link_zero);
IRELE(target_ip);
}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 5b413946b1c..0c1e42b037e 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -141,7 +139,7 @@ xfs_growfs_rt_alloc(
cancelflags |= XFS_TRANS_ABORT;
error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks,
XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock,
- resblks, &map, &nmap, &flist);
+ resblks, &map, &nmap, &flist, NULL);
if (!error && nmap < 1)
error = XFS_ERROR(ENOSPC);
if (error)
@@ -2404,10 +2402,10 @@ xfs_rtprint_range(
{
xfs_extlen_t i; /* block number in the extent */
- printk("%Ld: ", (long long)start);
+ cmn_err(CE_DEBUG, "%Ld: ", (long long)start);
for (i = 0; i < len; i++)
- printk("%d", xfs_rtcheck_bit(mp, tp, start + i, 1));
- printk("\n");
+ cmn_err(CE_DEBUG, "%d", xfs_rtcheck_bit(mp, tp, start + i, 1));
+ cmn_err(CE_DEBUG, "\n");
}
/*
@@ -2431,17 +2429,17 @@ xfs_rtprint_summary(
(void)xfs_rtget_summary(mp, tp, l, i, &sumbp, &sb, &c);
if (c) {
if (!p) {
- printk("%Ld-%Ld:", 1LL << l,
+ cmn_err(CE_DEBUG, "%Ld-%Ld:", 1LL << l,
XFS_RTMIN((1LL << l) +
((1LL << l) - 1LL),
mp->m_sb.sb_rextents));
p = 1;
}
- printk(" %Ld:%d", (long long)i, c);
+ cmn_err(CE_DEBUG, " %Ld:%d", (long long)i, c);
}
}
if (p)
- printk("\n");
+ cmn_err(CE_DEBUG, "\n");
}
if (sumbp)
xfs_trans_brelse(tp, sumbp);
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index a59c102cf21..defb2febaaf 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -92,6 +90,90 @@ xfs_write_clear_setuid(
}
/*
+ * Handle logging requirements of various synchronous types of write.
+ */
+int
+xfs_write_sync_logforce(
+ xfs_mount_t *mp,
+ xfs_inode_t *ip)
+{
+ int error = 0;
+
+ /*
+ * If we're treating this as O_DSYNC and we have not updated the
+ * size, force the log.
+ */
+ if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
+ !(ip->i_update_size)) {
+ xfs_inode_log_item_t *iip = ip->i_itemp;
+
+ /*
+ * If an allocation transaction occurred
+ * without extending the size, then we have to force
+ * the log up the proper point to ensure that the
+ * allocation is permanent. We can't count on
+ * the fact that buffered writes lock out direct I/O
+ * writes - the direct I/O write could have extended
+ * the size nontransactionally, then finished before
+ * we started. xfs_write_file will think that the file
+ * didn't grow but the update isn't safe unless the
+ * size change is logged.
+ *
+ * Force the log if we've committed a transaction
+ * against the inode or if someone else has and
+ * the commit record hasn't gone to disk (e.g.
+ * the inode is pinned). This guarantees that
+ * all changes affecting the inode are permanent
+ * when we return.
+ */
+ if (iip && iip->ili_last_lsn) {
+ xfs_log_force(mp, iip->ili_last_lsn,
+ XFS_LOG_FORCE | XFS_LOG_SYNC);
+ } else if (xfs_ipincount(ip) > 0) {
+ xfs_log_force(mp, (xfs_lsn_t)0,
+ XFS_LOG_FORCE | XFS_LOG_SYNC);
+ }
+
+ } else {
+ xfs_trans_t *tp;
+
+ /*
+ * O_SYNC or O_DSYNC _with_ a size update are handled
+ * the same way.
+ *
+ * If the write was synchronous then we need to make
+ * sure that the inode modification time is permanent.
+ * We'll have updated the timestamp above, so here
+ * we use a synchronous transaction to log the inode.
+ * It's not fast, but it's necessary.
+ *
+ * If this a dsync write and the size got changed
+ * non-transactionally, then we need to ensure that
+ * the size change gets logged in a synchronous
+ * transaction.
+ */
+ tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
+ if ((error = xfs_trans_reserve(tp, 0,
+ XFS_SWRITE_LOG_RES(mp),
+ 0, 0, 0))) {
+ /* Transaction reserve failed */
+ xfs_trans_cancel(tp, 0);
+ } else {
+ /* Transaction reserve successful */
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ihold(tp, ip);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp, 0, NULL);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ }
+ }
+
+ return error;
+}
+
+/*
* Force a shutdown of the filesystem instantly while keeping
* the filesystem consistent. We don't do an unmount here; just shutdown
* the shop, make sure that absolutely nothing persistent happens to
@@ -109,12 +191,12 @@ xfs_do_force_shutdown(
xfs_mount_t *mp;
mp = XFS_BHVTOM(bdp);
- logerror = flags & XFS_LOG_IO_ERROR;
+ logerror = flags & SHUTDOWN_LOG_IO_ERROR;
- if (!(flags & XFS_FORCE_UMOUNT)) {
- cmn_err(CE_NOTE,
- "xfs_force_shutdown(%s,0x%x) called from line %d of file %s. Return address = 0x%p",
- mp->m_fsname,flags,lnnum,fname,__return_address);
+ if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
+ cmn_err(CE_NOTE, "xfs_force_shutdown(%s,0x%x) called from "
+ "line %d of file %s. Return address = 0x%p",
+ mp->m_fsname, flags, lnnum, fname, __return_address);
}
/*
* No need to duplicate efforts.
@@ -125,33 +207,37 @@ xfs_do_force_shutdown(
/*
* This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
* queue up anybody new on the log reservations, and wakes up
- * everybody who's sleeping on log reservations and tells
- * them the bad news.
+ * everybody who's sleeping on log reservations to tell them
+ * the bad news.
*/
if (xfs_log_force_umount(mp, logerror))
return;
- if (flags & XFS_CORRUPT_INCORE) {
+ if (flags & SHUTDOWN_CORRUPT_INCORE) {
xfs_cmn_err(XFS_PTAG_SHUTDOWN_CORRUPT, CE_ALERT, mp,
"Corruption of in-memory data detected. Shutting down filesystem: %s",
mp->m_fsname);
if (XFS_ERRLEVEL_HIGH <= xfs_error_level) {
xfs_stack_trace();
}
- } else if (!(flags & XFS_FORCE_UMOUNT)) {
+ } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
if (logerror) {
xfs_cmn_err(XFS_PTAG_SHUTDOWN_LOGERROR, CE_ALERT, mp,
- "Log I/O Error Detected. Shutting down filesystem: %s",
+ "Log I/O Error Detected. Shutting down filesystem: %s",
+ mp->m_fsname);
+ } else if (flags & SHUTDOWN_DEVICE_REQ) {
+ xfs_cmn_err(XFS_PTAG_SHUTDOWN_IOERROR, CE_ALERT, mp,
+ "All device paths lost. Shutting down filesystem: %s",
mp->m_fsname);
- } else if (!(flags & XFS_SHUTDOWN_REMOTE_REQ)) {
+ } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
xfs_cmn_err(XFS_PTAG_SHUTDOWN_IOERROR, CE_ALERT, mp,
- "I/O Error Detected. Shutting down filesystem: %s",
+ "I/O Error Detected. Shutting down filesystem: %s",
mp->m_fsname);
}
}
- if (!(flags & XFS_FORCE_UMOUNT)) {
- cmn_err(CE_ALERT,
- "Please umount the filesystem, and rectify the problem(s)");
+ if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
+ cmn_err(CE_ALERT, "Please umount the filesystem, "
+ "and rectify the problem(s)");
}
}
@@ -335,7 +421,7 @@ xfs_bwrite(
* from bwrite and we could be tracing a buffer that has
* been reused.
*/
- xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
}
return (error);
}
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index e6379564447..188b296ff50 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -75,6 +75,7 @@ xfs_fsb_to_db_io(struct xfs_iocore *io, xfs_fsblock_t fsb)
* Prototypes for functions in xfs_rw.c.
*/
extern int xfs_write_clear_setuid(struct xfs_inode *ip);
+extern int xfs_write_sync_logforce(struct xfs_mount *mp, struct xfs_inode *ip);
extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
extern int xfs_bioerror(struct xfs_buf *bp);
extern int xfs_bioerror_relse(struct xfs_buf *bp);
@@ -87,9 +88,10 @@ extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp,
/*
* Prototypes for functions in xfs_vnodeops.c.
*/
-extern int xfs_rwlock(bhv_desc_t *bdp, vrwlock_t write_lock);
-extern void xfs_rwunlock(bhv_desc_t *bdp, vrwlock_t write_lock);
-extern int xfs_setattr(bhv_desc_t *bdp, vattr_t *vap, int flags, cred_t *credp);
+extern int xfs_rwlock(bhv_desc_t *bdp, bhv_vrwlock_t write_lock);
+extern void xfs_rwunlock(bhv_desc_t *bdp, bhv_vrwlock_t write_lock);
+extern int xfs_setattr(bhv_desc_t *, bhv_vattr_t *vap, int flags,
+ cred_t *credp);
extern int xfs_change_file_space(bhv_desc_t *bdp, int cmd, xfs_flock64_t *bf,
xfs_off_t offset, cred_t *credp, int flags);
extern int xfs_set_dmattrs(bhv_desc_t *bdp, u_int evmask, u_int16_t state,
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 8d056cef5d1..ee2721e0de4 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
@@ -33,7 +32,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -236,11 +234,8 @@ xfs_trans_alloc(
xfs_mount_t *mp,
uint type)
{
- fs_check_frozen(XFS_MTOVFS(mp), SB_FREEZE_TRANS);
- atomic_inc(&mp->m_active_trans);
-
- return (_xfs_trans_alloc(mp, type));
-
+ vfs_wait_for_freeze(XFS_MTOVFS(mp), SB_FREEZE_TRANS);
+ return _xfs_trans_alloc(mp, type);
}
xfs_trans_t *
@@ -250,12 +245,9 @@ _xfs_trans_alloc(
{
xfs_trans_t *tp;
- ASSERT(xfs_trans_zone != NULL);
- tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
+ atomic_inc(&mp->m_active_trans);
- /*
- * Initialize the transaction structure.
- */
+ tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
tp->t_magic = XFS_TRANS_MAGIC;
tp->t_type = type;
tp->t_mountp = mp;
@@ -263,8 +255,7 @@ _xfs_trans_alloc(
tp->t_busy_free = XFS_LBC_NUM_SLOTS;
XFS_LIC_INIT(&(tp->t_items));
XFS_LBC_INIT(&(tp->t_busy));
-
- return (tp);
+ return tp;
}
/*
@@ -303,7 +294,7 @@ xfs_trans_dup(
tp->t_blk_res = tp->t_blk_res_used;
ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
tp->t_rtx_res = tp->t_rtx_res_used;
- PFLAGS_DUP(&tp->t_pflags, &ntp->t_pflags);
+ ntp->t_pflags = tp->t_pflags;
XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp);
@@ -335,14 +326,11 @@ xfs_trans_reserve(
uint logcount)
{
int log_flags;
- int error;
- int rsvd;
-
- error = 0;
- rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
+ int error = 0;
+ int rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
/* Mark this thread as being in a transaction */
- PFLAGS_SET_FSTRANS(&tp->t_pflags);
+ current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
/*
* Attempt to reserve the needed disk blocks by decrementing
@@ -353,7 +341,7 @@ xfs_trans_reserve(
error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS,
-blocks, rsvd);
if (error != 0) {
- PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
return (XFS_ERROR(ENOSPC));
}
tp->t_blk_res += blocks;
@@ -426,9 +414,9 @@ undo_blocks:
tp->t_blk_res = 0;
}
- PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
- return (error);
+ return error;
}
@@ -819,7 +807,7 @@ shut_us_down:
if (commit_lsn == -1 && !shutdown)
shutdown = XFS_ERROR(EIO);
}
- PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0);
xfs_trans_free_busy(tp);
xfs_trans_free(tp);
@@ -846,7 +834,7 @@ shut_us_down:
*/
nvec = xfs_trans_count_vecs(tp);
if (nvec == 0) {
- xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
+ xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
goto shut_us_down;
} else if (nvec <= XFS_TRANS_LOGVEC_COUNT) {
log_vector = log_vector_fast;
@@ -884,7 +872,7 @@ shut_us_down:
* had pinned, clean up, free trans structure, and return error.
*/
if (error || commit_lsn == -1) {
- PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT);
return XFS_ERROR(EIO);
}
@@ -926,7 +914,7 @@ shut_us_down:
/*
* Mark this thread as no longer being in a transaction
*/
- PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
/*
* Once all the items of the transaction have been copied
@@ -1148,7 +1136,7 @@ xfs_trans_cancel(
*/
if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) {
XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
- xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
}
#ifdef DEBUG
if (!(flags & XFS_TRANS_ABORT)) {
@@ -1182,7 +1170,7 @@ xfs_trans_cancel(
}
/* mark this thread as no longer being in a transaction */
- PFLAGS_RESTORE_FSTRANS(&tp->t_pflags);
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
xfs_trans_free_items(tp, flags);
xfs_trans_free_busy(tp);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 100d9a4b38e..cb65c3a603f 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -805,12 +805,9 @@ typedef struct xfs_trans {
((mp)->m_sb.sb_inodesize + \
(mp)->m_sb.sb_sectsize * 2 + \
(mp)->m_dirblksize + \
- (XFS_DIR_IS_V1(mp) ? 0 : \
- XFS_FSB_TO_B(mp, (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1))) + \
+ XFS_FSB_TO_B(mp, (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1)) + \
XFS_ALLOCFREE_LOG_RES(mp, 1) + \
- (128 * (4 + \
- (XFS_DIR_IS_V1(mp) ? 0 : \
- XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) + \
+ (128 * (4 + (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) + \
XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
#define XFS_ADDAFORK_LOG_RES(mp) ((mp)->m_reservations.tr_addafork)
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 19ab24af1c1..558c87ff0c4 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -22,7 +22,6 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
@@ -363,9 +362,10 @@ xfs_trans_delete_ail(
AIL_UNLOCK(mp, s);
else {
xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
- "xfs_trans_delete_ail: attempting to delete a log item that is not in the AIL");
+ "%s: attempting to delete a log item that is not in the AIL",
+ __FUNCTION__);
AIL_UNLOCK(mp, s);
- xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
}
}
}
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index c74c31ebc81..60b6b898022 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -320,7 +318,7 @@ xfs_trans_read_buf(
if (xfs_error_target == target) {
if (((xfs_req_num++) % xfs_error_mod) == 0) {
xfs_buf_relse(bp);
- printk("Returning error!\n");
+ cmn_err(CE_DEBUG, "Returning error!\n");
return XFS_ERROR(EIO);
}
}
@@ -369,7 +367,7 @@ xfs_trans_read_buf(
*/
if (tp->t_flags & XFS_TRANS_DIRTY)
xfs_force_shutdown(tp->t_mountp,
- XFS_METADATA_IO_ERROR);
+ SHUTDOWN_META_IO_ERROR);
return error;
}
}
@@ -414,7 +412,7 @@ xfs_trans_read_buf(
xfs_ioerror_alert("xfs_trans_read_buf", mp,
bp, blkno);
if (tp->t_flags & XFS_TRANS_DIRTY)
- xfs_force_shutdown(tp->t_mountp, XFS_METADATA_IO_ERROR);
+ xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
xfs_buf_relse(bp);
return error;
}
@@ -423,9 +421,9 @@ xfs_trans_read_buf(
if (xfs_error_target == target) {
if (((xfs_req_num++) % xfs_error_mod) == 0) {
xfs_force_shutdown(tp->t_mountp,
- XFS_METADATA_IO_ERROR);
+ SHUTDOWN_META_IO_ERROR);
xfs_buf_relse(bp);
- printk("Returning error in trans!\n");
+ cmn_err(CE_DEBUG, "Returning trans error!\n");
return XFS_ERROR(EIO);
}
}
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 7d7d627f25d..b290270dd4a 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -22,7 +22,6 @@
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
-#include "xfs_dir.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 7c5894d59f8..b8db1d5cde5 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -24,14 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c
index 1117d600d74..2912aac07c7 100644
--- a/fs/xfs/xfs_trans_item.c
+++ b/fs/xfs/xfs_trans_item.c
@@ -493,7 +493,7 @@ xfs_trans_add_busy(xfs_trans_t *tp, xfs_agnumber_t ag, xfs_extlen_t idx)
break;
} else {
/* out-of-order vacancy */
- printk("OOO vacancy lbcp 0x%p\n", lbcp);
+ cmn_err(CE_DEBUG, "OOO vacancy lbcp 0x%p\n", lbcp);
ASSERT(0);
}
}
diff --git a/fs/xfs/xfs_trans_space.h b/fs/xfs/xfs_trans_space.h
index 7fe3792b18d..4ea2e5074bd 100644
--- a/fs/xfs/xfs_trans_space.h
+++ b/fs/xfs/xfs_trans_space.h
@@ -30,8 +30,7 @@
XFS_EXTENTADD_SPACE_RES(mp,w))
#define XFS_DAENTER_1B(mp,w) ((w) == XFS_DATA_FORK ? (mp)->m_dirblkfsbs : 1)
#define XFS_DAENTER_DBS(mp,w) \
- (XFS_DA_NODE_MAXDEPTH + \
- ((XFS_DIR_IS_V2(mp) && (w) == XFS_DATA_FORK) ? 2 : 0))
+ (XFS_DA_NODE_MAXDEPTH + (((w) == XFS_DATA_FORK) ? 2 : 0))
#define XFS_DAENTER_BLOCKS(mp,w) \
(XFS_DAENTER_1B(mp,w) * XFS_DAENTER_DBS(mp,w))
#define XFS_DAENTER_BMAP1B(mp,w) \
@@ -41,10 +40,7 @@
#define XFS_DAENTER_SPACE_RES(mp,w) \
(XFS_DAENTER_BLOCKS(mp,w) + XFS_DAENTER_BMAPS(mp,w))
#define XFS_DAREMOVE_SPACE_RES(mp,w) XFS_DAENTER_BMAPS(mp,w)
-#define XFS_DIRENTER_MAX_SPLIT(mp,nl) \
- (((mp)->m_sb.sb_blocksize == 512 && \
- XFS_DIR_IS_V1(mp) && \
- (nl) >= XFS_DIR_LEAF_CAN_DOUBLE_SPLIT_LEN) ? 2 : 1)
+#define XFS_DIRENTER_MAX_SPLIT(mp,nl) 1
#define XFS_DIRENTER_SPACE_RES(mp,nl) \
(XFS_DAENTER_SPACE_RES(mp, XFS_DATA_FORK) * \
XFS_DIRENTER_MAX_SPLIT(mp,nl))
@@ -57,8 +53,7 @@
* Space reservation values for various transactions.
*/
#define XFS_ADDAFORK_SPACE_RES(mp) \
- ((mp)->m_dirblkfsbs + \
- (XFS_DIR_IS_V1(mp) ? 0 : XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK)))
+ ((mp)->m_dirblkfsbs + XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK))
#define XFS_ATTRRM_SPACE_RES(mp) \
XFS_DAREMOVE_SPACE_RES(mp, XFS_ATTR_FORK)
/* This macro is not used - see inline code in xfs_attr_set */
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 34654ec6ae1..9014d7e4448 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -24,12 +24,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -51,10 +49,10 @@
*/
int
xfs_get_dir_entry(
- vname_t *dentry,
+ bhv_vname_t *dentry,
xfs_inode_t **ipp)
{
- vnode_t *vp;
+ bhv_vnode_t *vp;
vp = VNAME_TO_VNODE(dentry);
@@ -69,11 +67,11 @@ int
xfs_dir_lookup_int(
bhv_desc_t *dir_bdp,
uint lock_mode,
- vname_t *dentry,
+ bhv_vname_t *dentry,
xfs_ino_t *inum,
xfs_inode_t **ipp)
{
- vnode_t *dir_vp;
+ bhv_vnode_t *dir_vp;
xfs_inode_t *dp;
int error;
@@ -82,8 +80,7 @@ xfs_dir_lookup_int(
dp = XFS_BHVTOI(dir_bdp);
- error = XFS_DIR_LOOKUP(dp->i_mount, NULL, dp,
- VNAME(dentry), VNAMELEN(dentry), inum);
+ error = xfs_dir_lookup(NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum);
if (!error) {
/*
* Unlock the directory. We do this because we can't
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index 472661a3b6d..fe953e98afa 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -23,9 +23,10 @@
#define ITRACE(ip) vn_trace_ref(XFS_ITOV(ip), __FILE__, __LINE__, \
(inst_t *)__return_address)
-extern int xfs_rename (bhv_desc_t *, vname_t *, vnode_t *, vname_t *, cred_t *);
-extern int xfs_get_dir_entry (vname_t *, xfs_inode_t **);
-extern int xfs_dir_lookup_int (bhv_desc_t *, uint, vname_t *, xfs_ino_t *,
+extern int xfs_rename (bhv_desc_t *, bhv_vname_t *, bhv_vnode_t *,
+ bhv_vname_t *, cred_t *);
+extern int xfs_get_dir_entry (bhv_vname_t *, xfs_inode_t **);
+extern int xfs_dir_lookup_int (bhv_desc_t *, uint, bhv_vname_t *, xfs_ino_t *,
xfs_inode_t **);
extern int xfs_truncate_file (xfs_mount_t *, xfs_inode_t *);
extern int xfs_dir_ialloc (xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 36ea1b2094f..6c96391f3f1 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
@@ -32,7 +31,6 @@
#include "xfs_bmap_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_alloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
@@ -131,9 +129,6 @@ xfs_init(void)
#ifdef XFS_BMBT_TRACE
xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_SLEEP);
#endif
-#ifdef XFS_DIR_TRACE
- xfs_dir_trace_buf = ktrace_alloc(XFS_DIR_TRACE_SIZE, KM_SLEEP);
-#endif
#ifdef XFS_ATTR_TRACE
xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_SLEEP);
#endif
@@ -177,9 +172,6 @@ xfs_cleanup(void)
#ifdef XFS_ATTR_TRACE
ktrace_free(xfs_attr_trace_buf);
#endif
-#ifdef XFS_DIR_TRACE
- ktrace_free(xfs_dir_trace_buf);
-#endif
#ifdef XFS_BMBT_TRACE
ktrace_free(xfs_bmbt_trace_buf);
#endif
@@ -212,7 +204,7 @@ xfs_cleanup(void)
*/
STATIC int
xfs_start_flags(
- struct vfs *vfs,
+ struct bhv_vfs *vfs,
struct xfs_mount_args *ap,
struct xfs_mount *mp)
{
@@ -337,7 +329,7 @@ xfs_start_flags(
*/
STATIC int
xfs_finish_flags(
- struct vfs *vfs,
+ struct bhv_vfs *vfs,
struct xfs_mount_args *ap,
struct xfs_mount *mp)
{
@@ -423,7 +415,7 @@ xfs_mount(
struct xfs_mount_args *args,
cred_t *credp)
{
- struct vfs *vfsp = bhvtovfs(bhvp);
+ struct bhv_vfs *vfsp = bhvtovfs(bhvp);
struct bhv_desc *p;
struct xfs_mount *mp = XFS_BHVTOM(bhvp);
struct block_device *ddev, *logdev, *rtdev;
@@ -552,10 +544,10 @@ xfs_unmount(
int flags,
cred_t *credp)
{
- struct vfs *vfsp = bhvtovfs(bdp);
+ bhv_vfs_t *vfsp = bhvtovfs(bdp);
xfs_mount_t *mp = XFS_BHVTOM(bdp);
xfs_inode_t *rip;
- vnode_t *rvp;
+ bhv_vnode_t *rvp;
int unmount_event_wanted = 0;
int unmount_event_flags = 0;
int xfs_unmountfs_needed = 0;
@@ -665,9 +657,8 @@ xfs_mntupdate(
int *flags,
struct xfs_mount_args *args)
{
- struct vfs *vfsp = bhvtovfs(bdp);
+ bhv_vfs_t *vfsp = bhvtovfs(bdp);
xfs_mount_t *mp = XFS_BHVTOM(bdp);
- int error;
if (!(*flags & MS_RDONLY)) { /* rw/ro -> rw */
if (vfsp->vfs_flag & VFS_RDONLY)
@@ -679,7 +670,7 @@ xfs_mntupdate(
mp->m_flags &= ~XFS_MOUNT_BARRIER;
}
} else if (!(vfsp->vfs_flag & VFS_RDONLY)) { /* rw -> ro */
- VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error);
+ bhv_vfs_sync(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL);
xfs_quiesce_fs(mp);
xfs_log_unmount_write(mp);
xfs_unmountfs_writesb(mp);
@@ -702,7 +693,7 @@ xfs_unmount_flush(
xfs_inode_t *rip = mp->m_rootip;
xfs_inode_t *rbmip;
xfs_inode_t *rsumip = NULL;
- vnode_t *rvp = XFS_ITOV(rip);
+ bhv_vnode_t *rvp = XFS_ITOV(rip);
int error;
xfs_ilock(rip, XFS_ILOCK_EXCL);
@@ -781,9 +772,9 @@ fscorrupt_out2:
STATIC int
xfs_root(
bhv_desc_t *bdp,
- vnode_t **vpp)
+ bhv_vnode_t **vpp)
{
- vnode_t *vp;
+ bhv_vnode_t *vp;
vp = XFS_ITOV((XFS_BHVTOM(bdp))->m_rootip);
VN_HOLD(vp);
@@ -801,8 +792,8 @@ xfs_root(
STATIC int
xfs_statvfs(
bhv_desc_t *bdp,
- xfs_statfs_t *statp,
- vnode_t *vp)
+ bhv_statvfs_t *statp,
+ bhv_vnode_t *vp)
{
__uint64_t fakeinos;
xfs_extlen_t lsize;
@@ -900,7 +891,7 @@ xfs_sync(
/*
* xfs sync routine for internal use
*
- * This routine supports all of the flags defined for the generic VFS_SYNC
+ * This routine supports all of the flags defined for the generic vfs_sync
* interface as explained above under xfs_sync. In the interests of not
* changing interfaces within the 6.5 family, additional internally-
* required functions are specified within a separate xflags parameter,
@@ -917,7 +908,7 @@ xfs_sync_inodes(
xfs_inode_t *ip = NULL;
xfs_inode_t *ip_next;
xfs_buf_t *bp;
- vnode_t *vp = NULL;
+ bhv_vnode_t *vp = NULL;
int error;
int last_error;
uint64_t fflag;
@@ -1156,9 +1147,9 @@ xfs_sync_inodes(
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (XFS_FORCED_SHUTDOWN(mp)) {
- VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF);
+ bhv_vop_toss_pages(vp, 0, -1, FI_REMAPF);
} else {
- VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_REMAPF);
+ bhv_vop_flushinval_pages(vp, 0, -1, FI_REMAPF);
}
xfs_ilock(ip, XFS_ILOCK_SHARED);
@@ -1178,8 +1169,8 @@ xfs_sync_inodes(
* across calls to the buffer cache.
*/
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1,
- fflag, FI_NONE, error);
+ error = bhv_vop_flush_pages(vp, (xfs_off_t)0,
+ -1, fflag, FI_NONE);
xfs_ilock(ip, XFS_ILOCK_SHARED);
}
@@ -1231,9 +1222,7 @@ xfs_sync_inodes(
* marker and free it.
*/
XFS_MOUNT_ILOCK(mp);
-
IPOINTER_REMOVE(ip, mp);
-
XFS_MOUNT_IUNLOCK(mp);
ASSERT(!(lock_flags &
@@ -1421,7 +1410,7 @@ xfs_sync_inodes(
/*
* xfs sync routine for internal use
*
- * This routine supports all of the flags defined for the generic VFS_SYNC
+ * This routine supports all of the flags defined for the generic vfs_sync
* interface as explained above under xfs_sync. In the interests of not
* changing interfaces within the 6.5 family, additional internally-
* required functions are specified within a separate xflags parameter,
@@ -1574,7 +1563,7 @@ xfs_syncsub(
STATIC int
xfs_vget(
bhv_desc_t *bdp,
- vnode_t **vpp,
+ bhv_vnode_t **vpp,
fid_t *fidp)
{
xfs_mount_t *mp = XFS_BHVTOM(bdp);
@@ -1657,10 +1646,10 @@ xfs_vget(
#define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
STATIC unsigned long
-suffix_strtoul(const char *cp, char **endp, unsigned int base)
+suffix_strtoul(char *s, char **endp, unsigned int base)
{
int last, shift_left_factor = 0;
- char *value = (char *)cp;
+ char *value = s;
last = strlen(value) - 1;
if (value[last] == 'K' || value[last] == 'k') {
@@ -1676,7 +1665,7 @@ suffix_strtoul(const char *cp, char **endp, unsigned int base)
value[last] = '\0';
}
- return simple_strtoul(cp, endp, base) << shift_left_factor;
+ return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
}
STATIC int
@@ -1686,7 +1675,7 @@ xfs_parseargs(
struct xfs_mount_args *args,
int update)
{
- struct vfs *vfsp = bhvtovfs(bhv);
+ bhv_vfs_t *vfsp = bhvtovfs(bhv);
char *this_char, *value, *eov;
int dsunit, dswidth, vol_dsunit, vol_dswidth;
int iosize;
@@ -1708,42 +1697,48 @@ xfs_parseargs(
if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
args->logbufs = simple_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
args->logbufsize = suffix_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
strncpy(args->logname, value, MAXNAMELEN);
} else if (!strcmp(this_char, MNTOPT_MTPT)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
strncpy(args->mtpt, value, MAXNAMELEN);
} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
strncpy(args->rtname, value, MAXNAMELEN);
} else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
@@ -1752,7 +1747,8 @@ xfs_parseargs(
args->iosizelog = (uint8_t) iosize;
} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
@@ -1761,7 +1757,8 @@ xfs_parseargs(
args->iosizelog = ffs(iosize) - 1;
} else if (!strcmp(this_char, MNTOPT_IHASHSIZE)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
@@ -1782,7 +1779,8 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_INO64)) {
args->flags |= XFSMNT_INO64;
#if !XFS_BIG_INUMS
- printk("XFS: %s option not allowed on this system\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option not allowed on this system",
this_char);
return EINVAL;
#endif
@@ -1792,14 +1790,16 @@ xfs_parseargs(
args->flags |= XFSMNT_SWALLOC;
} else if (!strcmp(this_char, MNTOPT_SUNIT)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
dsunit = simple_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
if (!value || !*value) {
- printk("XFS: %s option requires an argument\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option requires an argument",
this_char);
return EINVAL;
}
@@ -1807,7 +1807,8 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
args->flags &= ~XFSMNT_32BITINODES;
#if !XFS_BIG_INUMS
- printk("XFS: %s option not allowed on this system\n",
+ cmn_err(CE_WARN,
+ "XFS: %s option not allowed on this system",
this_char);
return EINVAL;
#endif
@@ -1831,36 +1832,41 @@ xfs_parseargs(
args->flags &= ~XFSMNT_ATTR2;
} else if (!strcmp(this_char, "osyncisdsync")) {
/* no-op, this is now the default */
-printk("XFS: osyncisdsync is now the default, option is deprecated.\n");
+ cmn_err(CE_WARN,
+ "XFS: osyncisdsync is now the default, option is deprecated.");
} else if (!strcmp(this_char, "irixsgid")) {
-printk("XFS: irixsgid is now a sysctl(2) variable, option is deprecated.\n");
+ cmn_err(CE_WARN,
+ "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
} else {
- printk("XFS: unknown mount option [%s].\n", this_char);
+ cmn_err(CE_WARN,
+ "XFS: unknown mount option [%s].", this_char);
return EINVAL;
}
}
if (args->flags & XFSMNT_NORECOVERY) {
if ((vfsp->vfs_flag & VFS_RDONLY) == 0) {
- printk("XFS: no-recovery mounts must be read-only.\n");
+ cmn_err(CE_WARN,
+ "XFS: no-recovery mounts must be read-only.");
return EINVAL;
}
}
if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) {
- printk(
- "XFS: sunit and swidth options incompatible with the noalign option\n");
+ cmn_err(CE_WARN,
+ "XFS: sunit and swidth options incompatible with the noalign option");
return EINVAL;
}
if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
- printk("XFS: sunit and swidth must be specified together\n");
+ cmn_err(CE_WARN,
+ "XFS: sunit and swidth must be specified together");
return EINVAL;
}
if (dsunit && (dswidth % dsunit != 0)) {
- printk(
- "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)\n",
+ cmn_err(CE_WARN,
+ "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)",
dswidth, dsunit);
return EINVAL;
}
@@ -1907,7 +1913,7 @@ xfs_showargs(
};
struct proc_xfs_info *xfs_infop;
struct xfs_mount *mp = XFS_BHVTOM(bhv);
- struct vfs *vfsp = XFS_MTOVFS(mp);
+ struct bhv_vfs *vfsp = XFS_MTOVFS(mp);
for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) {
if (mp->m_flags & xfs_infop->flag)
@@ -1967,7 +1973,7 @@ xfs_freeze(
}
-vfsops_t xfs_vfsops = {
+bhv_vfsops_t xfs_vfsops = {
BHV_IDENTITY_INIT(VFS_BHV_XFS,VFS_POSITION_XFS),
.vfs_parseargs = xfs_parseargs,
.vfs_showargs = xfs_showargs,
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 7027ae68ee3..00a6b7dc24a 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -16,8 +16,6 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/capability.h>
-
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
@@ -27,7 +25,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
@@ -35,13 +32,11 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
-#include "xfs_dir_leaf.h"
#include "xfs_itable.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
@@ -58,32 +53,14 @@
#include "xfs_log_priv.h"
#include "xfs_mac.h"
-
-/*
- * The maximum pathlen is 1024 bytes. Since the minimum file system
- * blocksize is 512 bytes, we can get a max of 2 extents back from
- * bmapi.
- */
-#define SYMLINK_MAPS 2
-
-/*
- * For xfs, we check that the file isn't too big to be opened by this kernel.
- * No other open action is required for regular files. Devices are handled
- * through the specfs file system, pipes through fifofs. Device and
- * fifo vnodes are "wrapped" by specfs and fifofs vnodes, respectively,
- * when a new vnode is first looked up or created.
- */
STATIC int
xfs_open(
bhv_desc_t *bdp,
cred_t *credp)
{
int mode;
- vnode_t *vp;
- xfs_inode_t *ip;
-
- vp = BHV_TO_VNODE(bdp);
- ip = XFS_BHVTOI(bdp);
+ bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
+ xfs_inode_t *ip = XFS_BHVTOI(bdp);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return XFS_ERROR(EIO);
@@ -101,6 +78,35 @@ xfs_open(
return 0;
}
+STATIC int
+xfs_close(
+ bhv_desc_t *bdp,
+ int flags,
+ lastclose_t lastclose,
+ cred_t *credp)
+{
+ bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
+ xfs_inode_t *ip = XFS_BHVTOI(bdp);
+
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ return XFS_ERROR(EIO);
+
+ if (lastclose != L_TRUE || !VN_ISREG(vp))
+ return 0;
+
+ /*
+ * If we previously truncated this file and removed old data in
+ * the process, we want to initiate "early" writeout on the last
+ * close. This is an attempt to combat the notorious NULL files
+ * problem which is particularly noticable from a truncate down,
+ * buffered (re-)write (delalloc), followed by a crash. What we
+ * are effectively doing here is significantly reducing the time
+ * window where we'd otherwise be exposed to that problem.
+ */
+ if (VUNTRUNCATE(vp) && VN_DIRTY(vp) && ip->i_delayed_blks > 0)
+ return bhv_vop_flush_pages(vp, 0, -1, XFS_B_ASYNC, FI_NONE);
+ return 0;
+}
/*
* xfs_getattr
@@ -108,13 +114,13 @@ xfs_open(
STATIC int
xfs_getattr(
bhv_desc_t *bdp,
- vattr_t *vap,
+ bhv_vattr_t *vap,
int flags,
cred_t *credp)
{
xfs_inode_t *ip;
xfs_mount_t *mp;
- vnode_t *vp;
+ bhv_vnode_t *vp;
vp = BHV_TO_VNODE(bdp);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
@@ -241,7 +247,7 @@ xfs_getattr(
int
xfs_setattr(
bhv_desc_t *bdp,
- vattr_t *vap,
+ bhv_vattr_t *vap,
int flags,
cred_t *credp)
{
@@ -255,7 +261,7 @@ xfs_setattr(
uid_t uid=0, iuid=0;
gid_t gid=0, igid=0;
int timeflags = 0;
- vnode_t *vp;
+ bhv_vnode_t *vp;
xfs_prid_t projid=0, iprojid=0;
int mandlock_before, mandlock_after;
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
@@ -347,7 +353,6 @@ xfs_setattr(
*/
tp = NULL;
lock_flags = XFS_ILOCK_EXCL;
- ASSERT(flags & ATTR_NOLOCK ? flags & ATTR_DMI : 1);
if (flags & ATTR_NOLOCK)
need_iolock = 0;
if (!(mask & XFS_AT_SIZE)) {
@@ -666,9 +671,17 @@ xfs_setattr(
((ip->i_d.di_nlink != 0 ||
!(mp->m_flags & XFS_MOUNT_WSYNC))
? 1 : 0));
- if (code) {
+ if (code)
goto abort_return;
- }
+ /*
+ * Truncated "down", so we're removing references
+ * to old data here - if we now delay flushing for
+ * a long time, we expose ourselves unduly to the
+ * notorious NULL files problem. So, we mark this
+ * vnode and flush it when the file is closed, and
+ * do not wait the usual (long) time for writeout.
+ */
+ VTRUNCATE(vp);
}
/*
* Have to do this even if the file's size doesn't change.
@@ -800,6 +813,8 @@ xfs_setattr(
di_flags |= XFS_DIFLAG_NODUMP;
if (vap->va_xflags & XFS_XFLAG_PROJINHERIT)
di_flags |= XFS_DIFLAG_PROJINHERIT;
+ if (vap->va_xflags & XFS_XFLAG_NODEFRAG)
+ di_flags |= XFS_DIFLAG_NODEFRAG;
if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
if (vap->va_xflags & XFS_XFLAG_RTINHERIT)
di_flags |= XFS_DIFLAG_RTINHERIT;
@@ -869,7 +884,7 @@ xfs_setattr(
*/
mandlock_after = MANDLOCK(vp, ip->i_d.di_mode);
if (mandlock_before != mandlock_after) {
- VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_ENF_LOCKING,
+ bhv_vop_vnode_change(vp, VCHANGE_FLAGS_ENF_LOCKING,
mandlock_after);
}
@@ -936,6 +951,13 @@ xfs_access(
/*
+ * The maximum pathlen is 1024 bytes. Since the minimum file system
+ * blocksize is 512 bytes, we can get a max of 2 extents back from
+ * bmapi.
+ */
+#define SYMLINK_MAPS 2
+
+/*
* xfs_readlink
*
*/
@@ -950,7 +972,7 @@ xfs_readlink(
int count;
xfs_off_t offset;
int pathlen;
- vnode_t *vp;
+ bhv_vnode_t *vp;
int error = 0;
xfs_mount_t *mp;
int nmaps;
@@ -1000,7 +1022,7 @@ xfs_readlink(
nmaps = SYMLINK_MAPS;
error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen),
- 0, NULL, 0, mval, &nmaps, NULL);
+ 0, NULL, 0, mval, &nmaps, NULL, NULL);
if (error) {
goto error_return;
@@ -1208,8 +1230,8 @@ xfs_inactive_free_eofblocks(
nimaps = 1;
xfs_ilock(ip, XFS_ILOCK_SHARED);
- error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0,
- NULL, 0, &imap, &nimaps, NULL);
+ error = XFS_BMAPI(mp, NULL, &ip->i_iocore, end_fsb, map_len, 0,
+ NULL, 0, &imap, &nimaps, NULL, NULL);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!error && (nimaps != 0) &&
@@ -1338,7 +1360,7 @@ xfs_inactive_symlink_rmt(
nmaps = ARRAY_SIZE(mval);
if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size),
XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps,
- &free_list)))
+ &free_list, NULL)))
goto error0;
/*
* Invalidate the block(s).
@@ -1353,7 +1375,7 @@ xfs_inactive_symlink_rmt(
* Unmap the dead block(s) to the free_list.
*/
if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
- &first_block, &free_list, &done)))
+ &first_block, &free_list, NULL, &done)))
goto error1;
ASSERT(done);
/*
@@ -1469,9 +1491,6 @@ xfs_inactive_symlink_local(
return 0;
}
-/*
- *
- */
STATIC int
xfs_inactive_attrs(
xfs_inode_t *ip,
@@ -1524,16 +1543,16 @@ xfs_release(
bhv_desc_t *bdp)
{
xfs_inode_t *ip;
- vnode_t *vp;
+ bhv_vnode_t *vp;
xfs_mount_t *mp;
int error;
vp = BHV_TO_VNODE(bdp);
ip = XFS_BHVTOI(bdp);
+ mp = ip->i_mount;
- if (!VN_ISREG(vp) || (ip->i_d.di_mode == 0)) {
+ if (!VN_ISREG(vp) || (ip->i_d.di_mode == 0))
return 0;
- }
/* If this is a read-only mount, don't do this (would generate I/O) */
if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
@@ -1545,8 +1564,6 @@ xfs_release(
return 0;
#endif
- mp = ip->i_mount;
-
if (ip->i_d.di_nlink != 0) {
if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
@@ -1579,8 +1596,8 @@ xfs_inactive(
cred_t *credp)
{
xfs_inode_t *ip;
- vnode_t *vp;
- xfs_bmap_free_t free_list;
+ bhv_vnode_t *vp;
+ xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
int committed;
xfs_trans_t *tp;
@@ -1760,7 +1777,7 @@ xfs_inactive(
cmn_err(CE_NOTE,
"xfs_inactive: xfs_ifree() returned an error = %d on %s",
error, mp->m_fsname);
- xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
}
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
} else {
@@ -1795,17 +1812,17 @@ xfs_inactive(
STATIC int
xfs_lookup(
bhv_desc_t *dir_bdp,
- vname_t *dentry,
- vnode_t **vpp,
+ bhv_vname_t *dentry,
+ bhv_vnode_t **vpp,
int flags,
- vnode_t *rdir,
+ bhv_vnode_t *rdir,
cred_t *credp)
{
xfs_inode_t *dp, *ip;
xfs_ino_t e_inum;
int error;
uint lock_mode;
- vnode_t *dir_vp;
+ bhv_vnode_t *dir_vp;
dir_vp = BHV_TO_VNODE(dir_bdp);
vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
@@ -1832,15 +1849,15 @@ xfs_lookup(
STATIC int
xfs_create(
bhv_desc_t *dir_bdp,
- vname_t *dentry,
- vattr_t *vap,
- vnode_t **vpp,
+ bhv_vname_t *dentry,
+ bhv_vattr_t *vap,
+ bhv_vnode_t **vpp,
cred_t *credp)
{
char *name = VNAME(dentry);
- vnode_t *dir_vp;
+ bhv_vnode_t *dir_vp;
xfs_inode_t *dp, *ip;
- vnode_t *vp=NULL;
+ bhv_vnode_t *vp = NULL;
xfs_trans_t *tp;
xfs_mount_t *mp;
xfs_dev_t rdev;
@@ -1938,8 +1955,7 @@ xfs_create(
if (error)
goto error_return;
- if (resblks == 0 &&
- (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen)))
+ if (resblks == 0 && (error = xfs_dir_canenter(tp, dp, name, namelen)))
goto error_return;
rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
error = xfs_dir_ialloc(&tp, dp, vap->va_mode, 1,
@@ -1970,9 +1986,9 @@ xfs_create(
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
dp_joined_to_trans = B_TRUE;
- error = XFS_DIR_CREATENAME(mp, tp, dp, name, namelen, ip->i_ino,
- &first_block, &free_list,
- resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
+ error = xfs_dir_createname(tp, dp, name, namelen, ip->i_ino,
+ &first_block, &free_list, resblks ?
+ resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
if (error) {
ASSERT(error != ENOSPC);
goto abort_return;
@@ -2026,7 +2042,7 @@ xfs_create(
* Propagate the fact that the vnode changed after the
* xfs_inode locks have been released.
*/
- VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3);
+ bhv_vop_vnode_change(vp, VCHANGE_FLAGS_TRUNCATED, 3);
*vpp = vp;
@@ -2107,7 +2123,7 @@ int xfs_rm_attempts;
STATIC int
xfs_lock_dir_and_entry(
xfs_inode_t *dp,
- vname_t *dentry,
+ bhv_vname_t *dentry,
xfs_inode_t *ip) /* inode of entry 'name' */
{
int attempts;
@@ -2321,10 +2337,10 @@ int remove_which_error_return = 0;
STATIC int
xfs_remove(
bhv_desc_t *dir_bdp,
- vname_t *dentry,
+ bhv_vname_t *dentry,
cred_t *credp)
{
- vnode_t *dir_vp;
+ bhv_vnode_t *dir_vp;
char *name = VNAME(dentry);
xfs_inode_t *dp, *ip;
xfs_trans_t *tp = NULL;
@@ -2448,8 +2464,8 @@ xfs_remove(
* Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
*/
XFS_BMAP_INIT(&free_list, &first_block);
- error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, ip->i_ino,
- &first_block, &free_list, 0);
+ error = xfs_dir_removename(tp, dp, name, namelen, ip->i_ino,
+ &first_block, &free_list, 0);
if (error) {
ASSERT(error != ENOENT);
REMOVE_DEBUG_TRACE(__LINE__);
@@ -2511,7 +2527,7 @@ xfs_remove(
/*
* Let interposed file systems know about removed links.
*/
- VOP_LINK_REMOVED(XFS_ITOV(ip), dir_vp, link_zero);
+ bhv_vop_link_removed(XFS_ITOV(ip), dir_vp, link_zero);
IRELE(ip);
@@ -2564,8 +2580,8 @@ xfs_remove(
STATIC int
xfs_link(
bhv_desc_t *target_dir_bdp,
- vnode_t *src_vp,
- vname_t *dentry,
+ bhv_vnode_t *src_vp,
+ bhv_vname_t *dentry,
cred_t *credp)
{
xfs_inode_t *tdp, *sip;
@@ -2577,7 +2593,7 @@ xfs_link(
xfs_fsblock_t first_block;
int cancel_flags;
int committed;
- vnode_t *target_dir_vp;
+ bhv_vnode_t *target_dir_vp;
int resblks;
char *target_name = VNAME(dentry);
int target_namelen;
@@ -2668,13 +2684,12 @@ xfs_link(
}
if (resblks == 0 &&
- (error = XFS_DIR_CANENTER(mp, tp, tdp, target_name,
- target_namelen)))
+ (error = xfs_dir_canenter(tp, tdp, target_name, target_namelen)))
goto error_return;
XFS_BMAP_INIT(&free_list, &first_block);
- error = XFS_DIR_CREATENAME(mp, tp, tdp, target_name, target_namelen,
+ error = xfs_dir_createname(tp, tdp, target_name, target_namelen,
sip->i_ino, &first_block, &free_list,
resblks);
if (error)
@@ -2734,15 +2749,15 @@ std_return:
STATIC int
xfs_mkdir(
bhv_desc_t *dir_bdp,
- vname_t *dentry,
- vattr_t *vap,
- vnode_t **vpp,
+ bhv_vname_t *dentry,
+ bhv_vattr_t *vap,
+ bhv_vnode_t **vpp,
cred_t *credp)
{
char *dir_name = VNAME(dentry);
xfs_inode_t *dp;
xfs_inode_t *cdp; /* inode of created dir */
- vnode_t *cvp; /* vnode of created dir */
+ bhv_vnode_t *cvp; /* vnode of created dir */
xfs_trans_t *tp;
xfs_mount_t *mp;
int cancel_flags;
@@ -2750,7 +2765,7 @@ xfs_mkdir(
int committed;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
- vnode_t *dir_vp;
+ bhv_vnode_t *dir_vp;
boolean_t dp_joined_to_trans;
boolean_t created = B_FALSE;
int dm_event_sent = 0;
@@ -2840,7 +2855,7 @@ xfs_mkdir(
goto error_return;
if (resblks == 0 &&
- (error = XFS_DIR_CANENTER(mp, tp, dp, dir_name, dir_namelen)))
+ (error = xfs_dir_canenter(tp, dp, dir_name, dir_namelen)))
goto error_return;
/*
* create the directory inode.
@@ -2867,9 +2882,9 @@ xfs_mkdir(
XFS_BMAP_INIT(&free_list, &first_block);
- error = XFS_DIR_CREATENAME(mp, tp, dp, dir_name, dir_namelen,
- cdp->i_ino, &first_block, &free_list,
- resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
+ error = xfs_dir_createname(tp, dp, dir_name, dir_namelen, cdp->i_ino,
+ &first_block, &free_list, resblks ?
+ resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
if (error) {
ASSERT(error != ENOSPC);
goto error1;
@@ -2883,16 +2898,14 @@ xfs_mkdir(
*/
dp->i_gen++;
- error = XFS_DIR_INIT(mp, tp, cdp, dp);
- if (error) {
+ error = xfs_dir_init(tp, cdp, dp);
+ if (error)
goto error2;
- }
cdp->i_gen = 1;
error = xfs_bumplink(tp, dp);
- if (error) {
+ if (error)
goto error2;
- }
cvp = XFS_ITOV(cdp);
@@ -2969,7 +2982,7 @@ std_return:
STATIC int
xfs_rmdir(
bhv_desc_t *dir_bdp,
- vname_t *dentry,
+ bhv_vname_t *dentry,
cred_t *credp)
{
char *name = VNAME(dentry);
@@ -2982,7 +2995,7 @@ xfs_rmdir(
xfs_fsblock_t first_block;
int cancel_flags;
int committed;
- vnode_t *dir_vp;
+ bhv_vnode_t *dir_vp;
int dm_di_mode = 0;
int last_cdp_link;
int namelen;
@@ -3101,16 +3114,15 @@ xfs_rmdir(
error = XFS_ERROR(ENOTEMPTY);
goto error_return;
}
- if (!XFS_DIR_ISEMPTY(mp, cdp)) {
+ if (!xfs_dir_isempty(cdp)) {
error = XFS_ERROR(ENOTEMPTY);
goto error_return;
}
- error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, cdp->i_ino,
- &first_block, &free_list, resblks);
- if (error) {
+ error = xfs_dir_removename(tp, dp, name, namelen, cdp->i_ino,
+ &first_block, &free_list, resblks);
+ if (error)
goto error1;
- }
xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -3181,7 +3193,7 @@ xfs_rmdir(
/*
* Let interposed file systems know about removed links.
*/
- VOP_LINK_REMOVED(XFS_ITOV(cdp), dir_vp, last_cdp_link);
+ bhv_vop_link_removed(XFS_ITOV(cdp), dir_vp, last_cdp_link);
IRELE(cdp);
@@ -3209,8 +3221,6 @@ xfs_rmdir(
/*
- * xfs_readdir
- *
* Read dp's entries starting at uiop->uio_offset and translate them into
* bufsize bytes worth of struct dirents starting at bufbase.
*/
@@ -3230,28 +3240,23 @@ xfs_readdir(
(inst_t *)__return_address);
dp = XFS_BHVTOI(dir_bdp);
- if (XFS_FORCED_SHUTDOWN(dp->i_mount)) {
+ if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return XFS_ERROR(EIO);
- }
lock_mode = xfs_ilock_map_shared(dp);
- error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp);
+ error = xfs_dir_getdents(tp, dp, uiop, eofp);
xfs_iunlock_map_shared(dp, lock_mode);
return error;
}
-/*
- * xfs_symlink
- *
- */
STATIC int
xfs_symlink(
bhv_desc_t *dir_bdp,
- vname_t *dentry,
- vattr_t *vap,
+ bhv_vname_t *dentry,
+ bhv_vattr_t *vap,
char *target_path,
- vnode_t **vpp,
+ bhv_vnode_t **vpp,
cred_t *credp)
{
xfs_trans_t *tp;
@@ -3263,7 +3268,7 @@ xfs_symlink(
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
boolean_t dp_joined_to_trans;
- vnode_t *dir_vp;
+ bhv_vnode_t *dir_vp;
uint cancel_flags;
int committed;
xfs_fileoff_t first_fsb;
@@ -3308,7 +3313,7 @@ xfs_symlink(
int len, total;
char *path;
- for(total = 0, path = target_path; total < pathlen;) {
+ for (total = 0, path = target_path; total < pathlen;) {
/*
* Skip any slashes.
*/
@@ -3402,7 +3407,7 @@ xfs_symlink(
* Check for ability to enter directory entry, if no space reserved.
*/
if (resblks == 0 &&
- (error = XFS_DIR_CANENTER(mp, tp, dp, link_name, link_namelen)))
+ (error = xfs_dir_canenter(tp, dp, link_name, link_namelen)))
goto error_return;
/*
* Initialize the bmap freelist prior to calling either
@@ -3457,7 +3462,7 @@ xfs_symlink(
error = xfs_bmapi(tp, ip, first_fsb, fs_blocks,
XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
&first_block, resblks, mval, &nmaps,
- &free_list);
+ &free_list, NULL);
if (error) {
goto error1;
}
@@ -3489,11 +3494,10 @@ xfs_symlink(
/*
* Create the directory entry for the symlink.
*/
- error = XFS_DIR_CREATENAME(mp, tp, dp, link_name, link_namelen,
- ip->i_ino, &first_block, &free_list, resblks);
- if (error) {
+ error = xfs_dir_createname(tp, dp, link_name, link_namelen, ip->i_ino,
+ &first_block, &free_list, resblks);
+ if (error)
goto error1;
- }
xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
@@ -3541,7 +3545,7 @@ std_return:
}
if (!error) {
- vnode_t *vp;
+ bhv_vnode_t *vp;
ASSERT(ip);
vp = XFS_ITOV(ip);
@@ -3606,10 +3610,10 @@ xfs_fid2(
int
xfs_rwlock(
bhv_desc_t *bdp,
- vrwlock_t locktype)
+ bhv_vrwlock_t locktype)
{
xfs_inode_t *ip;
- vnode_t *vp;
+ bhv_vnode_t *vp;
vp = BHV_TO_VNODE(bdp);
if (VN_ISDIR(vp))
@@ -3637,10 +3641,10 @@ xfs_rwlock(
void
xfs_rwunlock(
bhv_desc_t *bdp,
- vrwlock_t locktype)
+ bhv_vrwlock_t locktype)
{
xfs_inode_t *ip;
- vnode_t *vp;
+ bhv_vnode_t *vp;
vp = BHV_TO_VNODE(bdp);
if (VN_ISDIR(vp))
@@ -3744,7 +3748,6 @@ xfs_inode_flush(
return error;
}
-
int
xfs_set_dmattrs (
bhv_desc_t *bdp,
@@ -3785,16 +3788,12 @@ xfs_set_dmattrs (
return error;
}
-
-/*
- * xfs_reclaim
- */
STATIC int
xfs_reclaim(
bhv_desc_t *bdp)
{
xfs_inode_t *ip;
- vnode_t *vp;
+ bhv_vnode_t *vp;
vp = BHV_TO_VNODE(bdp);
ip = XFS_BHVTOI(bdp);
@@ -3849,7 +3848,7 @@ xfs_finish_reclaim(
int sync_mode)
{
xfs_ihash_t *ih = ip->i_hash;
- vnode_t *vp = XFS_ITOV_NULL(ip);
+ bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
int error;
if (vp && VN_BAD(vp))
@@ -4116,10 +4115,10 @@ retry:
* Issue the xfs_bmapi() call to allocate the blocks
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
- error = xfs_bmapi(tp, ip, startoffset_fsb,
+ error = XFS_BMAPI(mp, tp, &ip->i_iocore, startoffset_fsb,
allocatesize_fsb, bmapi_flag,
&firstfsb, 0, imapp, &nimaps,
- &free_list);
+ &free_list, NULL);
if (error) {
goto error0;
}
@@ -4199,8 +4198,8 @@ xfs_zero_remaining_bytes(
for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimap = 1;
- error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, NULL, 0, &imap,
- &nimap, NULL);
+ error = XFS_BMAPI(mp, NULL, &ip->i_iocore, offset_fsb, 1, 0,
+ NULL, 0, &imap, &nimap, NULL, NULL);
if (error || nimap < 1)
break;
ASSERT(imap.br_blockcount >= 1);
@@ -4259,7 +4258,7 @@ xfs_free_file_space(
xfs_off_t len,
int attr_flags)
{
- vnode_t *vp;
+ bhv_vnode_t *vp;
int committed;
int done;
xfs_off_t end_dmi_offset;
@@ -4308,7 +4307,6 @@ xfs_free_file_space(
return error;
}
- ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1);
if (attr_flags & ATTR_NOLOCK)
need_iolock = 0;
if (need_iolock) {
@@ -4326,7 +4324,7 @@ xfs_free_file_space(
if (VN_CACHED(vp) != 0) {
xfs_inval_cached_trace(&ip->i_iocore, ioffset, -1,
ctooff(offtoct(ioffset)), -1);
- VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(ioffset)),
+ bhv_vop_flushinval_pages(vp, ctooff(offtoct(ioffset)),
-1, FI_REMAPF_LOCKED);
}
@@ -4338,8 +4336,8 @@ xfs_free_file_space(
*/
if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
nimap = 1;
- error = xfs_bmapi(NULL, ip, startoffset_fsb, 1, 0, NULL, 0,
- &imap, &nimap, NULL);
+ error = XFS_BMAPI(mp, NULL, &ip->i_iocore, startoffset_fsb,
+ 1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
@@ -4353,8 +4351,8 @@ xfs_free_file_space(
startoffset_fsb += mp->m_sb.sb_rextsize - mod;
}
nimap = 1;
- error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, 1, 0, NULL, 0,
- &imap, &nimap, NULL);
+ error = XFS_BMAPI(mp, NULL, &ip->i_iocore, endoffset_fsb - 1,
+ 1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
@@ -4426,9 +4424,9 @@ xfs_free_file_space(
* issue the bunmapi() call to free the blocks
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
- error = xfs_bunmapi(tp, ip, startoffset_fsb,
+ error = XFS_BUNMAPI(mp, tp, &ip->i_iocore, startoffset_fsb,
endoffset_fsb - startoffset_fsb,
- 0, 2, &firstfsb, &free_list, &done);
+ 0, 2, &firstfsb, &free_list, NULL, &done);
if (error) {
goto error0;
}
@@ -4488,8 +4486,8 @@ xfs_change_file_space(
xfs_off_t startoffset;
xfs_off_t llen;
xfs_trans_t *tp;
- vattr_t va;
- vnode_t *vp;
+ bhv_vattr_t va;
+ bhv_vnode_t *vp;
vp = BHV_TO_VNODE(bdp);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
@@ -4642,9 +4640,10 @@ xfs_change_file_space(
return error;
}
-vnodeops_t xfs_vnodeops = {
+bhv_vnodeops_t xfs_vnodeops = {
BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS),
.vop_open = xfs_open,
+ .vop_close = xfs_close,
.vop_read = xfs_read,
#ifdef HAVE_SENDFILE
.vop_sendfile = xfs_sendfile,