aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_super.c7
-rw-r--r--fs/Kconfig180
-rw-r--r--fs/binfmt_elf.c23
-rw-r--r--fs/binfmt_misc.c18
-rw-r--r--fs/binfmt_som.c10
-rw-r--r--fs/cifs/CHANGES3
-rw-r--r--fs/cifs/README9
-rw-r--r--fs/cifs/cifs_dfs_ref.c31
-rw-r--r--fs/cifs/cifsacl.c14
-rw-r--r--fs/cifs/cifsacl.h1
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifspdu.h121
-rw-r--r--fs/cifs/cifsproto.h9
-rw-r--r--fs/cifs/cifssmb.c32
-rw-r--r--fs/cifs/connect.c1
-rw-r--r--fs/cifs/inode.c15
-rw-r--r--fs/cifs/transport.c18
-rw-r--r--fs/dcache.c114
-rw-r--r--fs/dlm/Makefile1
-rw-r--r--fs/dlm/config.c50
-rw-r--r--fs/dlm/config.h3
-rw-r--r--fs/dlm/dlm_internal.h8
-rw-r--r--fs/dlm/lock.c5
-rw-r--r--fs/dlm/lock.h1
-rw-r--r--fs/dlm/main.c7
-rw-r--r--fs/dlm/member.c34
-rw-r--r--fs/dlm/plock.c (renamed from fs/gfs2/locking/dlm/plock.c)169
-rw-r--r--fs/dlm/recoverd.c1
-rw-r--r--fs/exec.c28
-rw-r--r--fs/fcntl.c40
-rw-r--r--fs/fuse/inode.c5
-rw-r--r--fs/gfs2/locking/dlm/Makefile2
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h12
-rw-r--r--fs/gfs2/locking/dlm/main.c8
-rw-r--r--fs/gfs2/locking/dlm/mount.c21
-rw-r--r--fs/internal.h11
-rw-r--r--fs/jffs2/README.Locking22
-rw-r--r--fs/jffs2/build.c1
-rw-r--r--fs/jffs2/debug.c164
-rw-r--r--fs/jffs2/debug.h6
-rw-r--r--fs/jffs2/dir.c58
-rw-r--r--fs/jffs2/erase.c80
-rw-r--r--fs/jffs2/file.c16
-rw-r--r--fs/jffs2/fs.c42
-rw-r--r--fs/jffs2/gc.c42
-rw-r--r--fs/jffs2/ioctl.c1
-rw-r--r--fs/jffs2/jffs2_fs_i.h4
-rw-r--r--fs/jffs2/jffs2_fs_sb.h7
-rw-r--r--fs/jffs2/nodelist.h2
-rw-r--r--fs/jffs2/nodemgmt.c24
-rw-r--r--fs/jffs2/readinode.c38
-rw-r--r--fs/jffs2/super.c14
-rw-r--r--fs/jffs2/wbuf.c28
-rw-r--r--fs/jffs2/write.c52
-rw-r--r--fs/lockd/clntproc.c184
-rw-r--r--fs/lockd/host.c93
-rw-r--r--fs/lockd/mon.c113
-rw-r--r--fs/lockd/svc.c162
-rw-r--r--fs/lockd/svclock.c8
-rw-r--r--fs/lockd/svcshare.c3
-rw-r--r--fs/lockd/svcsubs.c69
-rw-r--r--fs/locks.c33
-rw-r--r--fs/namespace.c340
-rw-r--r--fs/nfs/Makefile3
-rw-r--r--fs/nfs/callback.c93
-rw-r--r--fs/nfs/client.c23
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/direct.c88
-rw-r--r--fs/nfs/file.c18
-rw-r--r--fs/nfs/inode.c45
-rw-r--r--fs/nfs/internal.h13
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs2xdr.c113
-rw-r--r--fs/nfs/nfs3xdr.c71
-rw-r--r--fs/nfs/nfs4proc.c39
-rw-r--r--fs/nfs/nfs4state.c49
-rw-r--r--fs/nfs/nfs4xdr.c147
-rw-r--r--fs/nfs/read.c94
-rw-r--r--fs/nfs/super.c155
-rw-r--r--fs/nfs/symlink.c1
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c207
-rw-r--r--fs/nfsd/auth.c1
-rw-r--r--fs/nfsd/export.c9
-rw-r--r--fs/nfsd/nfs4callback.c28
-rw-r--r--fs/nfsd/nfs4idmap.c2
-rw-r--r--fs/nfsd/nfs4state.c74
-rw-r--r--fs/nfsd/nfs4xdr.c27
-rw-r--r--fs/nfsd/nfsctl.c87
-rw-r--r--fs/nfsd/nfsfh.c228
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--fs/nfsd/vfs.c35
-rw-r--r--fs/pipe.c3
-rw-r--r--fs/pnode.c60
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/proc/base.c125
-rw-r--r--fs/read_write.c6
-rw-r--r--fs/seq_file.c113
-rw-r--r--fs/super.c1
-rw-r--r--fs/udf/Makefile2
-rw-r--r--fs/udf/balloc.c13
-rw-r--r--fs/udf/crc.c172
-rw-r--r--fs/udf/dir.c83
-rw-r--r--fs/udf/ecma_167.h13
-rw-r--r--fs/udf/file.c47
-rw-r--r--fs/udf/ialloc.c13
-rw-r--r--fs/udf/inode.c208
-rw-r--r--fs/udf/lowlevel.c1
-rw-r--r--fs/udf/misc.c26
-rw-r--r--fs/udf/namei.c218
-rw-r--r--fs/udf/partition.c67
-rw-r--r--fs/udf/super.c1262
-rw-r--r--fs/udf/symlink.c1
-rw-r--r--fs/udf/truncate.c81
-rw-r--r--fs/udf/udf_i.h30
-rw-r--r--fs/udf/udf_sb.h109
-rw-r--r--fs/udf/udfdecl.h67
-rw-r--r--fs/udf/udfend.h22
-rw-r--r--fs/udf/udftime.c35
-rw-r--r--fs/udf/unicode.c62
-rw-r--r--fs/xattr.c1
122 files changed, 4127 insertions, 3004 deletions
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 678c02f1ae2..a452ac67fc9 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -224,12 +224,11 @@ static int v9fs_show_options(struct seq_file *m, struct vfsmount *mnt)
}
static void
-v9fs_umount_begin(struct vfsmount *vfsmnt, int flags)
+v9fs_umount_begin(struct super_block *sb)
{
- struct v9fs_session_info *v9ses = vfsmnt->mnt_sb->s_fs_info;
+ struct v9fs_session_info *v9ses = sb->s_fs_info;
- if (flags & MNT_FORCE)
- v9fs_session_cancel(v9ses);
+ v9fs_session_cancel(v9ses);
}
static const struct super_operations v9fs_super_ops = {
diff --git a/fs/Kconfig b/fs/Kconfig
index 028ae38ecc5..2e43d46f65d 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -411,7 +411,7 @@ config JFS_STATISTICS
to be made available to the user in the /proc/fs/jfs/ directory.
config FS_POSIX_ACL
-# Posix ACL utility routines (for now, only ext2/ext3/jfs/reiserfs)
+# Posix ACL utility routines (for now, only ext2/ext3/jfs/reiserfs/nfs4)
#
# NOTE: you can implement Posix ACLs without these helpers (XFS does).
# Never use this symbol for ifdefs.
@@ -689,6 +689,7 @@ config ZISOFS
config UDF_FS
tristate "UDF file system support"
+ select CRC_ITU_T
help
This is the new file system used on some CD-ROMs and DVDs. Say Y if
you intend to mount DVD discs or CDRW's written in packet mode, or
@@ -1663,105 +1664,86 @@ config NFS_V4
If unsure, say N.
-config NFS_DIRECTIO
- bool "Allow direct I/O on NFS files"
- depends on NFS_FS
- help
- This option enables applications to perform uncached I/O on files
- in NFS file systems using the O_DIRECT open() flag. When O_DIRECT
- is set for a file, its data is not cached in the system's page
- cache. Data is moved to and from user-level application buffers
- directly. Unlike local disk-based file systems, NFS O_DIRECT has
- no alignment restrictions.
-
- Unless your program is designed to use O_DIRECT properly, you are
- much better off allowing the NFS client to manage data caching for
- you. Misusing O_DIRECT can cause poor server performance or network
- storms. This kernel build option defaults OFF to avoid exposing
- system administrators unwittingly to a potentially hazardous
- feature.
-
- For more details on NFS O_DIRECT, see fs/nfs/direct.c.
-
- If unsure, say N. This reduces the size of the NFS client, and
- causes open() to return EINVAL if a file residing in NFS is
- opened with the O_DIRECT flag.
-
config NFSD
tristate "NFS server support"
depends on INET
select LOCKD
select SUNRPC
select EXPORTFS
- select NFSD_V2_ACL if NFSD_V3_ACL
select NFS_ACL_SUPPORT if NFSD_V2_ACL
- select NFSD_TCP if NFSD_V4
- select CRYPTO_MD5 if NFSD_V4
- select CRYPTO if NFSD_V4
- select FS_POSIX_ACL if NFSD_V4
- select PROC_FS if NFSD_V4
- select PROC_FS if SUNRPC_GSS
- help
- If you want your Linux box to act as an NFS *server*, so that other
- computers on your local network which support NFS can access certain
- directories on your box transparently, you have two options: you can
- use the self-contained user space program nfsd, in which case you
- should say N here, or you can say Y and use the kernel based NFS
- server. The advantage of the kernel based solution is that it is
- faster.
-
- In either case, you will need support software; the respective
- locations are given in the file <file:Documentation/Changes> in the
- NFS section.
-
- If you say Y here, you will get support for version 2 of the NFS
- protocol (NFSv2). If you also want NFSv3, say Y to the next question
- as well.
-
- Please read the NFS-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile the NFS server support as a module, choose M here: the
- module will be called nfsd. If unsure, say N.
+ help
+ Choose Y here if you want to allow other computers to access
+ files residing on this system using Sun's Network File System
+ protocol. To compile the NFS server support as a module,
+ choose M here: the module will be called nfsd.
+
+ You may choose to use a user-space NFS server instead, in which
+ case you can choose N here.
+
+ To export local file systems using NFS, you also need to install
+ user space programs which can be found in the Linux nfs-utils
+ package, available from http://linux-nfs.org/. More detail about
+ the Linux NFS server implementation is available via the
+ exports(5) man page.
+
+ Below you can choose which versions of the NFS protocol are
+ available to clients mounting the NFS server on this system.
+ Support for NFS version 2 (RFC 1094) is always available when
+ CONFIG_NFSD is selected.
+
+ If unsure, say N.
config NFSD_V2_ACL
bool
depends on NFSD
config NFSD_V3
- bool "Provide NFSv3 server support"
+ bool "NFS server support for NFS version 3"
depends on NFSD
help
- If you would like to include the NFSv3 server as well as the NFSv2
- server, say Y here. If unsure, say Y.
+ This option enables support in your system's NFS server for
+ version 3 of the NFS protocol (RFC 1813).
+
+ If unsure, say Y.
config NFSD_V3_ACL
- bool "Provide server support for the NFSv3 ACL protocol extension"
+ bool "NFS server support for the NFSv3 ACL protocol extension"
depends on NFSD_V3
+ select NFSD_V2_ACL
help
- Implement the NFSv3 ACL protocol extension for manipulating POSIX
- Access Control Lists on exported file systems. NFS clients should
- be compiled with the NFSv3 ACL protocol extension; see the
- CONFIG_NFS_V3_ACL option. If unsure, say N.
+ Solaris NFS servers support an auxiliary NFSv3 ACL protocol that
+ never became an official part of the NFS version 3 protocol.
+ This protocol extension allows applications on NFS clients to
+ manipulate POSIX Access Control Lists on files residing on NFS
+ servers. NFS servers enforce POSIX ACLs on local files whether
+ this protocol is available or not.
+
+ This option enables support in your system's NFS server for the
+ NFSv3 ACL protocol extension allowing NFS clients to manipulate
+ POSIX ACLs on files exported by your system's NFS server. NFS
+ clients which support the Solaris NFSv3 ACL protocol can then
+ access and modify ACLs on your NFS server.
+
+ To store ACLs on your NFS server, you also need to enable ACL-
+ related CONFIG options for your local file systems of choice.
+
+ If unsure, say N.
config NFSD_V4
- bool "Provide NFSv4 server support (EXPERIMENTAL)"
- depends on NFSD && NFSD_V3 && EXPERIMENTAL
+ bool "NFS server support for NFS version 4 (EXPERIMENTAL)"
+ depends on NFSD && PROC_FS && EXPERIMENTAL
+ select NFSD_V3
+ select FS_POSIX_ACL
select RPCSEC_GSS_KRB5
help
- If you would like to include the NFSv4 server as well as the NFSv2
- and NFSv3 servers, say Y here. This feature is experimental, and
- should only be used if you are interested in helping to test NFSv4.
- If unsure, say N.
+ This option enables support in your system's NFS server for
+ version 4 of the NFS protocol (RFC 3530).
-config NFSD_TCP
- bool "Provide NFS server over TCP support"
- depends on NFSD
- default y
- help
- If you want your NFS server to support TCP connections, say Y here.
- TCP connections usually perform better than the default UDP when
- the network is lossy or congested. If unsure, say Y.
+ To export files using NFSv4, you need to install additional user
+ space programs which can be found in the Linux nfs-utils package,
+ available from http://linux-nfs.org/.
+
+ If unsure, say N.
config ROOT_NFS
bool "Root file system on NFS"
@@ -1807,15 +1789,33 @@ config SUNRPC_XPRT_RDMA
tristate
depends on SUNRPC && INFINIBAND && EXPERIMENTAL
default SUNRPC && INFINIBAND
+ help
+ This option enables an RPC client transport capability that
+ allows the NFS client to mount servers via an RDMA-enabled
+ transport.
+
+ To compile RPC client RDMA transport support as a module,
+ choose M here: the module will be called xprtrdma.
+
+ If unsure, say N.
config SUNRPC_BIND34
bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)"
depends on SUNRPC && EXPERIMENTAL
+ default n
help
- Provides kernel support for querying rpcbind servers via versions 3
- and 4 of the rpcbind protocol. The kernel automatically falls back
- to version 2 if a remote rpcbind service does not support versions
- 3 or 4.
+ RPC requests over IPv6 networks require support for larger
+ addresses when performing an RPC bind. Sun added support for
+ IPv6 addressing by creating two new versions of the rpcbind
+ protocol (RFC 1833).
+
+ This option enables support in the kernel RPC client for
+ querying rpcbind servers via versions 3 and 4 of the rpcbind
+ protocol. The kernel automatically falls back to version 2
+ if a remote rpcbind service does not support versions 3 or 4.
+ By themselves, these new versions do not provide support for
+ RPC over IPv6, but the new protocol versions are necessary to
+ support it.
If unsure, say N to get traditional behavior (version 2 rpcbind
requests only).
@@ -1829,12 +1829,13 @@ config RPCSEC_GSS_KRB5
select CRYPTO_DES
select CRYPTO_CBC
help
- Provides for secure RPC calls by means of a gss-api
- mechanism based on Kerberos V5. This is required for
- NFSv4.
+ Choose Y here to enable Secure RPC using the Kerberos version 5
+ GSS-API mechanism (RFC 1964).
- Note: Requires an auxiliary userspace daemon which may be found on
- http://www.citi.umich.edu/projects/nfsv4/
+ Secure RPC calls with Kerberos require an auxiliary user-space
+ daemon which may be found in the Linux nfs-utils package
+ available from http://linux-nfs.org/. In addition, user-space
+ Kerberos support should be installed.
If unsure, say N.
@@ -1848,11 +1849,12 @@ config RPCSEC_GSS_SPKM3
select CRYPTO_CAST5
select CRYPTO_CBC
help
- Provides for secure RPC calls by means of a gss-api
- mechanism based on the SPKM3 public-key mechanism.
+ Choose Y here to enable Secure RPC using the SPKM3 public key
+ GSS-API mechansim (RFC 2025).
- Note: Requires an auxiliary userspace daemon which may be found on
- http://www.citi.umich.edu/projects/nfsv4/
+ Secure RPC calls with SPKM3 require an auxiliary userspace
+ daemon which may be found in the Linux nfs-utils package
+ available from http://linux-nfs.org/.
If unsure, say N.
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 5e1a4fb5cac..9924581df6f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -543,7 +543,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc = 0;
- struct files_struct *files;
int executable_stack = EXSTACK_DEFAULT;
unsigned long def_flags = 0;
struct {
@@ -593,20 +592,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_ph;
}
- files = current->files; /* Refcounted so ok */
- retval = unshare_files();
- if (retval < 0)
- goto out_free_ph;
- if (files == current->files) {
- put_files_struct(files);
- files = NULL;
- }
-
- /* exec will make our files private anyway, but for the a.out
- loader stuff we need to do it earlier */
retval = get_unused_fd();
if (retval < 0)
- goto out_free_fh;
+ goto out_free_ph;
get_file(bprm->file);
fd_install(elf_exec_fileno = retval, bprm->file);
@@ -728,12 +716,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (retval)
goto out_free_dentry;
- /* Discard our unneeded old files struct */
- if (files) {
- put_files_struct(files);
- files = NULL;
- }
-
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
@@ -1016,9 +998,6 @@ out_free_interp:
kfree(elf_interpreter);
out_free_file:
sys_close(elf_exec_fileno);
-out_free_fh:
- if (files)
- reset_files_struct(current, files);
out_free_ph:
kfree(elf_phdata);
goto out;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index b53c7e5f41b..dbf0ac0523d 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -110,7 +110,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
char *iname_addr = iname;
int retval;
int fd_binary = -1;
- struct files_struct *files = NULL;
retval = -ENOEXEC;
if (!enabled)
@@ -133,21 +132,13 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (fmt->flags & MISC_FMT_OPEN_BINARY) {
- files = current->files;
- retval = unshare_files();
- if (retval < 0)
- goto _ret;
- if (files == current->files) {
- put_files_struct(files);
- files = NULL;
- }
/* if the binary should be opened on behalf of the
* interpreter than keep it open and assign descriptor
* to it */
fd_binary = get_unused_fd();
if (fd_binary < 0) {
retval = fd_binary;
- goto _unshare;
+ goto _ret;
}
fd_install(fd_binary, bprm->file);
@@ -205,10 +196,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (retval < 0)
goto _error;
- if (files) {
- put_files_struct(files);
- files = NULL;
- }
_ret:
return retval;
_error:
@@ -216,9 +203,6 @@ _error:
sys_close(fd_binary);
bprm->interp_flags = 0;
bprm->interp_data = 0;
-_unshare:
- if (files)
- reset_files_struct(current, files);
goto _ret;
}
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index 14c63527c76..fdc36bfd6a7 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -194,7 +194,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
unsigned long som_entry;
struct som_hdr *som_ex;
struct som_exec_auxhdr *hpuxhdr;
- struct files_struct *files;
/* Get the exec-header */
som_ex = (struct som_hdr *) bprm->buf;
@@ -221,15 +220,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
goto out_free;
}
- files = current->files; /* Refcounted so ok */
- retval = unshare_files();
- if (retval < 0)
- goto out_free;
- if (files == current->files) {
- put_files_struct(files);
- files = NULL;
- }
-
retval = get_unused_fd();
if (retval < 0)
goto out_free;
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index dbd91461853..05c9da6181c 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -8,7 +8,8 @@ of second share to disconnected server session (autoreconnect on this).
Add ability to modify cifs acls for handling chmod (when mounted with
cifsacl flag). Fix prefixpath path separator so we can handle mounts
with prefixpaths longer than one directory (one path component) when
-mounted to Windows servers.
+mounted to Windows servers. Fix slow file open when cifsacl
+enabled.
Version 1.51
------------
diff --git a/fs/cifs/README b/fs/cifs/README
index 50306229b0f..621aa1a8597 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -3,7 +3,14 @@ features such as hierarchical dfs like namespace, hardlinks, locking and more.
It was designed to comply with the SNIA CIFS Technical Reference (which
supersedes the 1992 X/Open SMB Standard) as well as to perform best practice
practical interoperability with Windows 2000, Windows XP, Samba and equivalent
-servers.
+servers. This code was developed in participation with the Protocol Freedom
+Information Foundation.
+
+Please see
+ http://protocolfreedom.org/ and
+ http://samba.org/samba/PFIF/
+for more details.
+
For questions or bug reports please contact:
sfrench@samba.org (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 56c924033b7..95024c066d8 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -23,16 +23,28 @@
#include "dns_resolve.h"
#include "cifs_debug.h"
-LIST_HEAD(cifs_dfs_automount_list);
+static LIST_HEAD(cifs_dfs_automount_list);
-/*
- * DFS functions
-*/
+static void cifs_dfs_expire_automounts(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cifs_dfs_automount_task,
+ cifs_dfs_expire_automounts);
+static int cifs_dfs_mountpoint_expiry_timeout = 500 * HZ;
+
+static void cifs_dfs_expire_automounts(struct work_struct *work)
+{
+ struct list_head *list = &cifs_dfs_automount_list;
+
+ mark_mounts_for_expiry(list);
+ if (!list_empty(list))
+ schedule_delayed_work(&cifs_dfs_automount_task,
+ cifs_dfs_mountpoint_expiry_timeout);
+}
-void dfs_shrink_umount_helper(struct vfsmount *vfsmnt)
+void cifs_dfs_release_automount_timer(void)
{
- mark_mounts_for_expiry(&cifs_dfs_automount_list);
- mark_mounts_for_expiry(&cifs_dfs_automount_list);
+ BUG_ON(!list_empty(&cifs_dfs_automount_list));
+ cancel_delayed_work(&cifs_dfs_automount_task);
+ flush_scheduled_work();
}
/**
@@ -261,10 +273,11 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
err = do_add_mount(newmnt, nd, nd->path.mnt->mnt_flags, mntlist);
switch (err) {
case 0:
- dput(nd->path.dentry);
- mntput(nd->path.mnt);
+ path_put(&nd->path);
nd->path.mnt = newmnt;
nd->path.dentry = dget(newmnt->mnt_root);
+ schedule_delayed_work(&cifs_dfs_automount_task,
+ cifs_dfs_mountpoint_expiry_timeout);
break;
case -EBUSY:
/* someone else made a mount here whilst we were busy */
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 1cb5b0a9f2a..e99d4faf5f0 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -516,7 +516,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
/* Convert permission bits from mode to equivalent CIFS ACL */
static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
- int acl_len, struct inode *inode, __u64 nmode)
+ struct inode *inode, __u64 nmode)
{
int rc = 0;
__u32 dacloffset;
@@ -692,14 +692,14 @@ void acl_to_uid_mode(struct inode *inode, const char *path, const __u16 *pfid)
int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
{
int rc = 0;
- __u32 acllen = 0;
+ __u32 secdesclen = 0;
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
cFYI(DBG2, ("set ACL from mode for %s", path));
/* Get the security descriptor */
- pntsd = get_cifs_acl(&acllen, inode, path, NULL);
+ pntsd = get_cifs_acl(&secdesclen, inode, path, NULL);
/* Add three ACEs for owner, group, everyone getting rid of
other ACEs as chmod disables ACEs and set the security descriptor */
@@ -709,20 +709,22 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
set security descriptor request security descriptor
parameters, and secuirty descriptor itself */
- pnntsd = kmalloc(acllen, GFP_KERNEL);
+ secdesclen = secdesclen < DEFSECDESCLEN ?
+ DEFSECDESCLEN : secdesclen;
+ pnntsd = kmalloc(secdesclen, GFP_KERNEL);
if (!pnntsd) {
cERROR(1, ("Unable to allocate security descriptor"));
kfree(pntsd);
return (-ENOMEM);
}
- rc = build_sec_desc(pntsd, pnntsd, acllen, inode, nmode);
+ rc = build_sec_desc(pntsd, pnntsd, inode, nmode);
cFYI(DBG2, ("build_sec_desc rc: %d", rc));
if (!rc) {
/* Set the security descriptor */
- rc = set_cifs_acl(pnntsd, acllen, inode, path);
+ rc = set_cifs_acl(pnntsd, secdesclen, inode, path);
cFYI(DBG2, ("set_cifs_acl rc: %d", rc));
}
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index 93a7c3462ea..6c8096cf515 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -27,6 +27,7 @@
#define NUM_SUBAUTHS 5 /* number of sub authority fields */
#define NUM_WK_SIDS 7 /* number of well known sids */
#define SIDNAMELENGTH 20 /* long enough for the ones we care about */
+#define DEFSECDESCLEN 192 /* sec desc len contaiting a dacl with three aces */
#define READ_BIT 0x4
#define WRITE_BIT 0x2
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index a04b17e5a9d..39c2cbdface 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -466,16 +466,11 @@ static struct quotactl_ops cifs_quotactl_ops = {
};
#endif
-static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
+static void cifs_umount_begin(struct super_block *sb)
{
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsTconInfo *tcon;
- dfs_shrink_umount_helper(vfsmnt);
-
- if (!(flags & MNT_FORCE))
- return;
- cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
if (cifs_sb == NULL)
return;
@@ -1100,6 +1095,7 @@ exit_cifs(void)
cFYI(DBG2, ("exit_cifs"));
cifs_proc_clean();
#ifdef CONFIG_CIFS_DFS_UPCALL
+ cifs_dfs_release_automount_timer();
unregister_key_type(&key_type_dns_resolver);
#endif
#ifdef CONFIG_CIFS_UPCALL
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 68978306c3c..e1dd9f32e1d 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -62,11 +62,9 @@ extern int cifs_setattr(struct dentry *, struct iattr *);
extern const struct inode_operations cifs_file_inode_ops;
extern const struct inode_operations cifs_symlink_inode_ops;
-extern struct list_head cifs_dfs_automount_list;
extern struct inode_operations cifs_dfs_referral_inode_operations;
-
/* Functions related to files and directories */
extern const struct file_operations cifs_file_ops;
extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 47f79504f57..9f49c2f3582 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifspdu.h
*
- * Copyright (c) International Business Machines Corp., 2002,2007
+ * Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -163,7 +163,10 @@
path names in response */
#define SMBFLG2_KNOWS_EAS cpu_to_le16(2)
#define SMBFLG2_SECURITY_SIGNATURE cpu_to_le16(4)
+#define SMBFLG2_COMPRESSED (8)
+#define SMBFLG2_SECURITY_SIGNATURE_REQUIRED (0x10)
#define SMBFLG2_IS_LONG_NAME cpu_to_le16(0x40)
+#define SMBFLG2_REPARSE_PATH (0x400)
#define SMBFLG2_EXT_SEC cpu_to_le16(0x800)
#define SMBFLG2_DFS cpu_to_le16(0x1000)
#define SMBFLG2_PAGING_IO cpu_to_le16(0x2000)
@@ -305,7 +308,7 @@
#define FILE_SHARE_DELETE 0x00000004
#define FILE_SHARE_ALL 0x00000007
-/* CreateDisposition flags */
+/* CreateDisposition flags, similar to CreateAction as well */
#define FILE_SUPERSEDE 0x00000000
#define FILE_OPEN 0x00000001
#define FILE_CREATE 0x00000002
@@ -317,15 +320,25 @@
#define CREATE_NOT_FILE 0x00000001 /* if set must not be file */
#define CREATE_WRITE_THROUGH 0x00000002
#define CREATE_SEQUENTIAL 0x00000004
-#define CREATE_SYNC_ALERT 0x00000010
-#define CREATE_ASYNC_ALERT 0x00000020
+#define CREATE_NO_BUFFER 0x00000008 /* should not buffer on srv */
+#define CREATE_SYNC_ALERT 0x00000010 /* MBZ */
+#define CREATE_ASYNC_ALERT 0x00000020 /* MBZ */
#define CREATE_NOT_DIR 0x00000040 /* if set must not be directory */
+#define CREATE_TREE_CONNECTION 0x00000080 /* should be zero */
+#define CREATE_COMPLETE_IF_OPLK 0x00000100 /* should be zero */
#define CREATE_NO_EA_KNOWLEDGE 0x00000200
-#define CREATE_EIGHT_DOT_THREE 0x00000400
+#define CREATE_EIGHT_DOT_THREE 0x00000400 /* doc says this is obsolete
+ open for recovery flag - should
+ be zero */
#define CREATE_RANDOM_ACCESS 0x00000800
#define CREATE_DELETE_ON_CLOSE 0x00001000
#define CREATE_OPEN_BY_ID 0x00002000
+#define CREATE_OPEN_BACKUP_INTN 0x00004000
+#define CREATE_NO_COMPRESSION 0x00008000
+#define CREATE_RESERVE_OPFILTER 0x00100000 /* should be zero */
#define OPEN_REPARSE_POINT 0x00200000
+#define OPEN_NO_RECALL 0x00400000
+#define OPEN_FREE_SPACE_QUERY 0x00800000 /* should be zero */
#define CREATE_OPTIONS_MASK 0x007FFFFF
#define CREATE_OPTION_SPECIAL 0x20000000 /* system. NB not sent over wire */
@@ -470,7 +483,7 @@ typedef struct lanman_neg_rsp {
typedef struct negotiate_rsp {
struct smb_hdr hdr; /* wct = 17 */
- __le16 DialectIndex;
+ __le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
__u8 SecurityMode;
__le16 MaxMpxCount;
__le16 MaxNumberVcs;
@@ -516,10 +529,11 @@ typedef struct negotiate_rsp {
#define CAP_INFOLEVEL_PASSTHRU 0x00002000
#define CAP_LARGE_READ_X 0x00004000
#define CAP_LARGE_WRITE_X 0x00008000
+#define CAP_LWIO 0x00010000 /* support fctl_srv_req_resume_key */
#define CAP_UNIX 0x00800000
-#define CAP_RESERVED 0x02000000
-#define CAP_BULK_TRANSFER 0x20000000
-#define CAP_COMPRESSED_DATA 0x40000000
+#define CAP_COMPRESSED_DATA 0x02000000
+#define CAP_DYNAMIC_REAUTH 0x20000000
+#define CAP_PERSISTENT_HANDLES 0x40000000
#define CAP_EXTENDED_SECURITY 0x80000000
typedef union smb_com_session_setup_andx {
@@ -668,9 +682,7 @@ typedef struct smb_com_tconx_req {
} __attribute__((packed)) TCONX_REQ;
typedef struct smb_com_tconx_rsp {
- struct smb_hdr hdr; /* wct = 3 note that Win2000 has sent wct = 7
- in some cases on responses. Four unspecified
- words followed OptionalSupport */
+ struct smb_hdr hdr; /* wct = 3 , not extended response */
__u8 AndXCommand;
__u8 AndXReserved;
__le16 AndXOffset;
@@ -680,13 +692,48 @@ typedef struct smb_com_tconx_rsp {
/* STRING NativeFileSystem */
} __attribute__((packed)) TCONX_RSP;
+typedef struct smb_com_tconx_rsp_ext {
+ struct smb_hdr hdr; /* wct = 7, extended response */
+ __u8 AndXCommand;
+ __u8 AndXReserved;
+ __le16 AndXOffset;
+ __le16 OptionalSupport; /* see below */
+ __le32 MaximalShareAccessRights;
+ __le32 GuestMaximalShareAccessRights;
+ __u16 ByteCount;
+ unsigned char Service[1]; /* always ASCII, not Unicode */
+ /* STRING NativeFileSystem */
+} __attribute__((packed)) TCONX_RSP_EXT;
+
+
/* tree connect Flags */
#define DISCONNECT_TID 0x0001
+#define TCON_EXTENDED_SIGNATURES 0x0004
#define TCON_EXTENDED_SECINFO 0x0008
+
/* OptionalSupport bits */
#define SMB_SUPPORT_SEARCH_BITS 0x0001 /* "must have" directory search bits
(exclusive searches supported) */
#define SMB_SHARE_IS_IN_DFS 0x0002
+#define SMB_CSC_MASK 0x000C
+/* CSC flags defined as follows */
+#define SMB_CSC_CACHE_MANUAL_REINT 0x0000
+#define SMB_CSC_CACHE_AUTO_REINT 0x0004
+#define SMB_CSC_CACHE_VDO 0x0008
+#define SMB_CSC_NO_CACHING 0x000C
+
+#define SMB_UNIQUE_FILE_NAME 0x0010
+#define SMB_EXTENDED_SIGNATURES 0x0020
+
+/* services
+ *
+ * A: ie disk
+ * LPT1: ie printer
+ * IPC ie named pipe
+ * COMM
+ * ????? ie any type
+ *
+ */
typedef struct smb_com_logoff_andx_req {
struct smb_hdr hdr; /* wct = 2 */
@@ -750,6 +797,17 @@ typedef struct smb_com_findclose_req {
#define COMM_DEV_TYPE 0x0004
#define UNKNOWN_TYPE 0xFFFF
+/* Device Type or File Status Flags */
+#define NO_EAS 0x0001
+#define NO_SUBSTREAMS 0x0002
+#define NO_REPARSETAG 0x0004
+/* following flags can apply if pipe */
+#define ICOUNT_MASK 0x00FF
+#define PIPE_READ_MODE 0x0100
+#define NAMED_PIPE_TYPE 0x0400
+#define PIPE_END_POINT 0x0800
+#define BLOCKING_NAMED_PIPE 0x8000
+
typedef struct smb_com_open_req { /* also handles create */
struct smb_hdr hdr; /* wct = 24 */
__u8 AndXCommand;
@@ -758,7 +816,7 @@ typedef struct smb_com_open_req { /* also handles create */
__u8 Reserved; /* Must Be Zero */
__le16 NameLength;
__le32 OpenFlags;
- __le32 RootDirectoryFid;
+ __u32 RootDirectoryFid;
__le32 DesiredAccess;
__le64 AllocationSize;
__le32 FileAttributes;
@@ -801,6 +859,32 @@ typedef struct smb_com_open_rsp {
__u16 ByteCount; /* bct = 0 */
} __attribute__((packed)) OPEN_RSP;
+typedef struct smb_com_open_rsp_ext {
+ struct smb_hdr hdr; /* wct = 42 but meaningless due to MS bug? */
+ __u8 AndXCommand;
+ __u8 AndXReserved;
+ __le16 AndXOffset;
+ __u8 OplockLevel;
+ __u16 Fid;
+ __le32 CreateAction;
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le32 FileAttributes;
+ __le64 AllocationSize;
+ __le64 EndOfFile;
+ __le16 FileType;
+ __le16 DeviceState;
+ __u8 DirectoryFlag;
+ __u8 VolumeGUID[16];
+ __u64 FileId; /* note no endian conversion - is opaque UniqueID */
+ __le32 MaximalAccessRights;
+ __le32 GuestMaximalAccessRights;
+ __u16 ByteCount; /* bct = 0 */
+} __attribute__((packed)) OPEN_RSP_EXT;
+
+
/* format of legacy open request */
typedef struct smb_com_openx_req {
struct smb_hdr hdr; /* wct = 15 */
@@ -1703,6 +1787,12 @@ typedef struct smb_com_transaction2_fnext_rsp_parms {
#define SMB_QUERY_CIFS_UNIX_INFO 0x200
#define SMB_QUERY_POSIX_FS_INFO 0x201
#define SMB_QUERY_POSIX_WHO_AM_I 0x202
+#define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203
+#define SMB_QUERY_FS_PROXY 0x204 /* WAFS enabled. Returns structure
+ FILE_SYSTEM__UNIX_INFO to tell
+ whether new NTIOCTL available
+ (0xACE) for WAN friendly SMB
+ operations to be carried */
#define SMB_QUERY_LABEL_INFO 0x3ea
#define SMB_QUERY_FS_QUOTA_INFO 0x3ee
#define SMB_QUERY_FS_FULL_SIZE_INFO 0x3ef
@@ -1959,7 +2049,10 @@ typedef struct {
#define CIFS_UNIX_LARGE_READ_CAP 0x00000040 /* support reads >128K (up
to 0xFFFF00 */
#define CIFS_UNIX_LARGE_WRITE_CAP 0x00000080
-
+#define CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP 0x00000100 /* can do SPNEGO crypt */
+#define CIFS_UNIX_TRANPSORT_ENCRYPTION_MANDATORY_CAP 0x00000200 /* must do */
+#define CIFS_UNIX_PROXY_CAP 0x00000400 /* Proxy cap: 0xACE ioctl and
+ QFS PROXY call */
#ifdef CONFIG_CIFS_POSIX
/* Can not set pathnames cap yet until we send new posix create SMB since
otherwise server can treat such handles opened with older ntcreatex
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 7e5e0e78cd7..50f9fdae19b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -84,6 +84,7 @@ extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
struct cifsTconInfo *);
extern void DeleteOplockQEntry(struct oplock_q_entry *);
+extern void DeleteTconOplockQEntries(struct cifsTconInfo *);
extern struct timespec cifs_NTtimeToUnix(u64 utc_nanoseconds_since_1601);
extern u64 cifs_UnixTimeToNT(struct timespec);
extern __le64 cnvrtDosCifsTm(__u16 date, __u16 time);
@@ -103,13 +104,7 @@ extern int mode_to_acl(struct inode *inode, const char *path, __u64);
extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
const char *);
extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
-#ifdef CONFIG_CIFS_DFS_UPCALL
-extern void dfs_shrink_umount_helper(struct vfsmount *vfsmnt);
-#else
-static inline void dfs_shrink_umount_helper(struct vfsmount *vfsmnt)
-{
-}
-#endif /* DFS_UPCALL */
+extern void cifs_dfs_release_automount_timer(void);
void cifs_proc_init(void);
void cifs_proc_clean(void);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 30bbe448e26..4728fa982a4 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -165,17 +165,19 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
rc = CIFSTCon(0, tcon->ses, tcon->treeName,
tcon, nls_codepage);
up(&tcon->ses->sesSem);
- /* tell server which Unix caps we support */
- if (tcon->ses->capabilities & CAP_UNIX)
- reset_cifs_unix_caps(0 /* no xid */,
- tcon,
- NULL /* we do not know sb */,
- NULL /* no vol info */);
/* BB FIXME add code to check if wsize needs
update due to negotiated smb buffer size
shrinking */
- if (rc == 0)
+ if (rc == 0) {
atomic_inc(&tconInfoReconnectCount);
+ /* tell server Unix caps we support */
+ if (tcon->ses->capabilities & CAP_UNIX)
+ reset_cifs_unix_caps(
+ 0 /* no xid */,
+ tcon,
+ NULL /* we do not know sb */,
+ NULL /* no vol info */);
+ }
cFYI(1, ("reconnect tcon rc = %d", rc));
/* Removed call to reopen open files here.
@@ -310,17 +312,19 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
rc = CIFSTCon(0, tcon->ses, tcon->treeName,
tcon, nls_codepage);
up(&tcon->ses->sesSem);
- /* tell server which Unix caps we support */
- if (tcon->ses->capabilities & CAP_UNIX)
- reset_cifs_unix_caps(0 /* no xid */,
- tcon,
- NULL /* do not know sb */,
- NULL /* no vol info */);
/* BB FIXME add code to check if wsize needs
update due to negotiated smb buffer size
shrinking */
- if (rc == 0)
+ if (rc == 0) {
atomic_inc(&tconInfoReconnectCount);
+ /* tell server Unix caps we support */
+ if (tcon->ses->capabilities & CAP_UNIX)
+ reset_cifs_unix_caps(
+ 0 /* no xid */,
+ tcon,
+ NULL /* do not know sb */,
+ NULL /* no vol info */);
+ }
cFYI(1, ("reconnect tcon rc = %d", rc));
/* Removed call to reopen open files here.
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 8dbfa97cd18..e1710673016 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3527,6 +3527,7 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
FreeXid(xid);
return 0;
}
+ DeleteTconOplockQEntries(cifs_sb->tcon);
tconInfoFree(cifs_sb->tcon);
if ((ses) && (ses->server)) {
/* save off task so we do not refer to ses later */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index bc673c8c1e6..e1031b9e2c5 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -161,12 +161,14 @@ static void cifs_unix_info_to_inode(struct inode *inode,
spin_unlock(&inode->i_lock);
}
-static const unsigned char *cifs_get_search_path(struct cifsTconInfo *pTcon,
- const char *search_path)
+static const unsigned char *cifs_get_search_path(struct cifs_sb_info *cifs_sb,
+ const char *search_path)
{
int tree_len;
int path_len;
+ int i;
char *tmp_path;
+ struct cifsTconInfo *pTcon = cifs_sb->tcon;
if (!(pTcon->Flags & SMB_SHARE_IS_IN_DFS))
return search_path;
@@ -180,6 +182,11 @@ static const unsigned char *cifs_get_search_path(struct cifsTconInfo *pTcon,
return search_path;
strncpy(tmp_path, pTcon->treeName, tree_len);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
+ for (i = 0; i < tree_len; i++) {
+ if (tmp_path[i] == '\\')
+ tmp_path[i] = '/';
+ }
strncpy(tmp_path+tree_len, search_path, path_len);
tmp_path[tree_len+path_len] = 0;
return tmp_path;
@@ -199,7 +206,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
pTcon = cifs_sb->tcon;
cFYI(1, ("Getting info on %s", search_path));
- full_path = cifs_get_search_path(pTcon, search_path);
+ full_path = cifs_get_search_path(cifs_sb, search_path);
try_again_CIFSSMBUnixQPathInfo:
/* could have done a find first instead but this returns more info */
@@ -402,7 +409,7 @@ int cifs_get_inode_info(struct inode **pinode,
return -ENOMEM;
pfindData = (FILE_ALL_INFO *)buf;
- full_path = cifs_get_search_path(pTcon, search_path);
+ full_path = cifs_get_search_path(cifs_sb, search_path);
try_again_CIFSSMBQPathInfo:
/* could do find first instead but this returns more info */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 3612d6c0a0b..000ac509c98 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -142,6 +142,24 @@ void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
kmem_cache_free(cifs_oplock_cachep, oplockEntry);
}
+
+void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
+{
+ struct oplock_q_entry *temp;
+
+ if (tcon == NULL)
+ return;
+
+ spin_lock(&GlobalMid_Lock);
+ list_for_each_entry(temp, &GlobalOplock_Q, qhead) {
+ if ((temp->tcon) && (temp->tcon == tcon)) {
+ list_del(&temp->qhead);
+ kmem_cache_free(cifs_oplock_cachep, temp);
+ }
+ }
+ spin_unlock(&GlobalMid_Lock);
+}
+
int
smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
unsigned int smb_buf_length, struct sockaddr *sin)
diff --git a/fs/dcache.c b/fs/dcache.c
index 43455776711..3ee588d5f58 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1746,12 +1746,21 @@ shouldnt_be_hashed:
goto shouldnt_be_hashed;
}
+static int prepend(char **buffer, int *buflen, const char *str,
+ int namelen)
+{
+ *buflen -= namelen;
+ if (*buflen < 0)
+ return -ENAMETOOLONG;
+ *buffer -= namelen;
+ memcpy(*buffer, str, namelen);
+ return 0;
+}
+
/**
* d_path - return the path of a dentry
- * @dentry: dentry to report
- * @vfsmnt: vfsmnt to which the dentry belongs
- * @root: root dentry
- * @rootmnt: vfsmnt to which the root dentry belongs
+ * @path: the dentry/vfsmount to report
+ * @root: root vfsmnt/dentry (may be modified by this function)
* @buffer: buffer to return value in
* @buflen: buffer length
*
@@ -1761,23 +1770,22 @@ shouldnt_be_hashed:
* Returns the buffer or an error code if the path was too long.
*
* "buflen" should be positive. Caller holds the dcache_lock.
+ *
+ * If path is not reachable from the supplied root, then the value of
+ * root is changed (without modifying refcounts).
*/
-static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
- struct path *root, char *buffer, int buflen)
+char *__d_path(const struct path *path, struct path *root,
+ char *buffer, int buflen)
{
+ struct dentry *dentry = path->dentry;
+ struct vfsmount *vfsmnt = path->mnt;
char * end = buffer+buflen;
char * retval;
- int namelen;
-
- *--end = '\0';
- buflen--;
- if (!IS_ROOT(dentry) && d_unhashed(dentry)) {
- buflen -= 10;
- end -= 10;
- if (buflen < 0)
+
+ prepend(&end, &buflen, "\0", 1);
+ if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
+ (prepend(&end, &buflen, " (deleted)", 10) != 0))
goto Elong;
- memcpy(end, " (deleted)", 10);
- }
if (buflen < 1)
goto Elong;
@@ -1804,13 +1812,10 @@ static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
}
parent = dentry->d_parent;
prefetch(parent);
- namelen = dentry->d_name.len;
- buflen -= namelen + 1;
- if (buflen < 0)
+ if ((prepend(&end, &buflen, dentry->d_name.name,
+ dentry->d_name.len) != 0) ||
+ (prepend(&end, &buflen, "/", 1) != 0))
goto Elong;
- end -= namelen;
- memcpy(end, dentry->d_name.name, namelen);
- *--end = '/';
retval = end;
dentry = parent;
}
@@ -1818,12 +1823,12 @@ static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
return retval;
global_root:
- namelen = dentry->d_name.len;
- buflen -= namelen;
- if (buflen < 0)
+ retval += 1; /* hit the slash */
+ if (prepend(&retval, &buflen, dentry->d_name.name,
+ dentry->d_name.len) != 0)
goto Elong;
- retval -= namelen-1; /* hit the slash */
- memcpy(retval, dentry->d_name.name, namelen);
+ root->mnt = vfsmnt;
+ root->dentry = dentry;
return retval;
Elong:
return ERR_PTR(-ENAMETOOLONG);
@@ -1846,6 +1851,7 @@ char *d_path(struct path *path, char *buf, int buflen)
{
char *res;
struct path root;
+ struct path tmp;
/*
* We have various synthetic filesystems that never get mounted. On
@@ -1859,10 +1865,11 @@ char *d_path(struct path *path, char *buf, int buflen)
read_lock(&current->fs->lock);
root = current->fs->root;
- path_get(&current->fs->root);
+ path_get(&root);
read_unlock(&current->fs->lock);
spin_lock(&dcache_lock);
- res = __d_path(path->dentry, path->mnt, &root, buf, buflen);
+ tmp = root;
+ res = __d_path(path, &tmp, buf, buflen);
spin_unlock(&dcache_lock);
path_put(&root);
return res;
@@ -1890,6 +1897,48 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
}
/*
+ * Write full pathname from the root of the filesystem into the buffer.
+ */
+char *dentry_path(struct dentry *dentry, char *buf, int buflen)
+{
+ char *end = buf + buflen;
+ char *retval;
+
+ spin_lock(&dcache_lock);
+ prepend(&end, &buflen, "\0", 1);
+ if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
+ (prepend(&end, &buflen, "//deleted", 9) != 0))
+ goto Elong;
+ if (buflen < 1)
+ goto Elong;
+ /* Get '/' right */
+ retval = end-1;
+ *retval = '/';
+
+ for (;;) {
+ struct dentry *parent;
+ if (IS_ROOT(dentry))
+ break;
+
+ parent = dentry->d_parent;
+ prefetch(parent);
+
+ if ((prepend(&end, &buflen, dentry->d_name.name,
+ dentry->d_name.len) != 0) ||
+ (prepend(&end, &buflen, "/", 1) != 0))
+ goto Elong;
+
+ retval = end;
+ dentry = parent;
+ }
+ spin_unlock(&dcache_lock);
+ return retval;
+Elong:
+ spin_unlock(&dcache_lock);
+ return ERR_PTR(-ENAMETOOLONG);
+}
+
+/*
* NOTE! The user-level library version returns a
* character pointer. The kernel system call just
* returns the length of the buffer filled (which
@@ -1918,9 +1967,9 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
read_lock(&current->fs->lock);
pwd = current->fs->pwd;
- path_get(&current->fs->pwd);
+ path_get(&pwd);
root = current->fs->root;
- path_get(&current->fs->root);
+ path_get(&root);
read_unlock(&current->fs->lock);
error = -ENOENT;
@@ -1928,9 +1977,10 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
spin_lock(&dcache_lock);
if (pwd.dentry->d_parent == pwd.dentry || !d_unhashed(pwd.dentry)) {
unsigned long len;
+ struct path tmp = root;
char * cwd;
- cwd = __d_path(pwd.dentry, pwd.mnt, &root, page, PAGE_SIZE);
+ cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE);
spin_unlock(&dcache_lock);
error = PTR_ERR(cwd);
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
index d248e60951b..ca1c9124c8c 100644
--- a/fs/dlm/Makefile
+++ b/fs/dlm/Makefile
@@ -10,6 +10,7 @@ dlm-y := ast.o \
midcomms.o \
netlink.o \
lowcomms.o \
+ plock.o \
rcom.o \
recover.o \
recoverd.o \
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index c3ad1dff3b2..eac23bd288b 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -114,7 +114,7 @@ struct cluster_attribute {
};
static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field,
- unsigned int *info_field, int check_zero,
+ int *info_field, int check_zero,
const char *buf, size_t len)
{
unsigned int x;
@@ -284,6 +284,7 @@ struct node {
struct list_head list; /* space->members */
int nodeid;
int weight;
+ int new;
};
static struct configfs_group_operations clusters_ops = {
@@ -565,6 +566,7 @@ static struct config_item *make_node(struct config_group *g, const char *name)
config_item_init_type_name(&nd->item, name, &node_type);
nd->nodeid = -1;
nd->weight = 1; /* default weight of 1 if none is set */
+ nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */
mutex_lock(&sp->members_lock);
list_add(&nd->list, &sp->members);
@@ -805,12 +807,13 @@ static void put_comm(struct comm *cm)
}
/* caller must free mem */
-int dlm_nodeid_list(char *lsname, int **ids_out)
+int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
+ int **new_out, int *new_count_out)
{
struct space *sp;
struct node *nd;
- int i = 0, rv = 0;
- int *ids;
+ int i = 0, rv = 0, ids_count = 0, new_count = 0;
+ int *ids, *new;
sp = get_space(lsname);
if (!sp)
@@ -818,23 +821,50 @@ int dlm_nodeid_list(char *lsname, int **ids_out)
mutex_lock(&sp->members_lock);
if (!sp->members_count) {
- rv = 0;
+ rv = -EINVAL;
+ printk(KERN_ERR "dlm: zero members_count\n");
goto out;
}
- ids = kcalloc(sp->members_count, sizeof(int), GFP_KERNEL);
+ ids_count = sp->members_count;
+
+ ids = kcalloc(ids_count, sizeof(int), GFP_KERNEL);
if (!ids) {
rv = -ENOMEM;
goto out;
}
- rv = sp->members_count;
- list_for_each_entry(nd, &sp->members, list)
+ list_for_each_entry(nd, &sp->members, list) {
ids[i++] = nd->nodeid;
+ if (nd->new)
+ new_count++;
+ }
+
+ if (ids_count != i)
+ printk(KERN_ERR "dlm: bad nodeid count %d %d\n", ids_count, i);
+
+ if (!new_count)
+ goto out_ids;
+
+ new = kcalloc(new_count, sizeof(int), GFP_KERNEL);
+ if (!new) {
+ kfree(ids);
+ rv = -ENOMEM;
+ goto out;
+ }
- if (rv != i)
- printk("bad nodeid count %d %d\n", rv, i);
+ i = 0;
+ list_for_each_entry(nd, &sp->members, list) {
+ if (nd->new) {
+ new[i++] = nd->nodeid;
+ nd->new = 0;
+ }
+ }
+ *new_count_out = new_count;
+ *new_out = new;
+ out_ids:
+ *ids_count_out = ids_count;
*ids_out = ids;
out:
mutex_unlock(&sp->members_lock);
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index a3170fe2209..4f1d6fce58c 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -35,7 +35,8 @@ extern struct dlm_config_info dlm_config;
int dlm_config_init(void);
void dlm_config_exit(void);
int dlm_node_weight(char *lsname, int nodeid);
-int dlm_nodeid_list(char *lsname, int **ids_out);
+int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
+ int **new_out, int *new_count_out);
int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr);
int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid);
int dlm_our_nodeid(void);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 7a8824f475f..5a7ac33b629 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -42,8 +42,6 @@
#include <linux/dlm.h>
#include "config.h"
-#define DLM_LOCKSPACE_LEN 64
-
/* Size of the temp buffer midcomms allocates on the stack.
We try to make this large enough so most messages fit.
FIXME: should sctp make this unnecessary? */
@@ -132,8 +130,10 @@ struct dlm_member {
struct dlm_recover {
struct list_head list;
- int *nodeids;
+ int *nodeids; /* nodeids of all members */
int node_count;
+ int *new; /* nodeids of new members */
+ int new_count;
uint64_t seq;
};
@@ -579,6 +579,8 @@ static inline int dlm_no_directory(struct dlm_ls *ls)
int dlm_netlink_init(void);
void dlm_netlink_exit(void);
void dlm_timeout_warn(struct dlm_lkb *lkb);
+int dlm_plock_init(void);
+void dlm_plock_exit(void);
#ifdef CONFIG_DLM_DEBUG
int dlm_register_debugfs(void);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 8f250ac8b92..2d3d1027ce2 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -165,7 +165,7 @@ void dlm_print_lkb(struct dlm_lkb *lkb)
lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
}
-void dlm_print_rsb(struct dlm_rsb *r)
+static void dlm_print_rsb(struct dlm_rsb *r)
{
printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
r->res_nodeid, r->res_flags, r->res_first_lkid,
@@ -1956,8 +1956,7 @@ static void confirm_master(struct dlm_rsb *r, int error)
list_del_init(&lkb->lkb_rsb_lookup);
r->res_first_lkid = lkb->lkb_id;
_request_lock(r, lkb);
- } else
- r->res_nodeid = -1;
+ }
break;
default:
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 05d9c82e646..88e93c80cc2 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -13,7 +13,6 @@
#ifndef __LOCK_DOT_H__
#define __LOCK_DOT_H__
-void dlm_print_rsb(struct dlm_rsb *r);
void dlm_dump_rsb(struct dlm_rsb *r);
void dlm_print_lkb(struct dlm_lkb *lkb);
void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms);
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index 58487fb95a4..b80e0aa3cfa 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -46,10 +46,16 @@ static int __init init_dlm(void)
if (error)
goto out_user;
+ error = dlm_plock_init();
+ if (error)
+ goto out_netlink;
+
printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
return 0;
+ out_netlink:
+ dlm_netlink_exit();
out_user:
dlm_user_exit();
out_debug:
@@ -66,6 +72,7 @@ static int __init init_dlm(void)
static void __exit exit_dlm(void)
{
+ dlm_plock_exit();
dlm_netlink_exit();
dlm_user_exit();
dlm_config_exit();
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index fa17f5a2788..26133f05ae3 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -210,6 +210,23 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
}
}
+ /* Add an entry to ls_nodes_gone for members that were removed and
+ then added again, so that previous state for these nodes will be
+ cleared during recovery. */
+
+ for (i = 0; i < rv->new_count; i++) {
+ if (!dlm_is_member(ls, rv->new[i]))
+ continue;
+ log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]);
+
+ memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL);
+ if (!memb)
+ return -ENOMEM;
+ memb->nodeid = rv->new[i];
+ list_add_tail(&memb->list, &ls->ls_nodes_gone);
+ neg++;
+ }
+
/* add new members to ls_nodes */
for (i = 0; i < rv->node_count; i++) {
@@ -314,15 +331,16 @@ int dlm_ls_stop(struct dlm_ls *ls)
int dlm_ls_start(struct dlm_ls *ls)
{
struct dlm_recover *rv = NULL, *rv_old;
- int *ids = NULL;
- int error, count;
+ int *ids = NULL, *new = NULL;
+ int error, ids_count = 0, new_count = 0;
rv = kzalloc(sizeof(struct dlm_recover), GFP_KERNEL);
if (!rv)
return -ENOMEM;
- error = count = dlm_nodeid_list(ls->ls_name, &ids);
- if (error <= 0)
+ error = dlm_nodeid_list(ls->ls_name, &ids, &ids_count,
+ &new, &new_count);
+ if (error < 0)
goto fail;
spin_lock(&ls->ls_recover_lock);
@@ -337,14 +355,19 @@ int dlm_ls_start(struct dlm_ls *ls)
}
rv->nodeids = ids;
- rv->node_count = count;
+ rv->node_count = ids_count;
+ rv->new = new;
+ rv->new_count = new_count;
rv->seq = ++ls->ls_recover_seq;
rv_old = ls->ls_recover_args;
ls->ls_recover_args = rv;
spin_unlock(&ls->ls_recover_lock);
if (rv_old) {
+ log_error(ls, "unused recovery %llx %d",
+ (unsigned long long)rv_old->seq, rv_old->node_count);
kfree(rv_old->nodeids);
+ kfree(rv_old->new);
kfree(rv_old);
}
@@ -354,6 +377,7 @@ int dlm_ls_start(struct dlm_ls *ls)
fail:
kfree(rv);
kfree(ids);
+ kfree(new);
return error;
}
diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/dlm/plock.c
index 2ebd374b314..d6d6e370f89 100644
--- a/fs/gfs2/locking/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -1,17 +1,19 @@
/*
- * Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
+#include <linux/fs.h>
#include <linux/miscdevice.h>
-#include <linux/lock_dlm_plock.h>
#include <linux/poll.h>
+#include <linux/dlm.h>
+#include <linux/dlm_plock.h>
-#include "lock_dlm.h"
-
+#include "dlm_internal.h"
+#include "lockspace.h"
static spinlock_t ops_lock;
static struct list_head send_list;
@@ -22,7 +24,7 @@ static wait_queue_head_t recv_wq;
struct plock_op {
struct list_head list;
int done;
- struct gdlm_plock_info info;
+ struct dlm_plock_info info;
};
struct plock_xop {
@@ -34,22 +36,22 @@ struct plock_xop {
};
-static inline void set_version(struct gdlm_plock_info *info)
+static inline void set_version(struct dlm_plock_info *info)
{
- info->version[0] = GDLM_PLOCK_VERSION_MAJOR;
- info->version[1] = GDLM_PLOCK_VERSION_MINOR;
- info->version[2] = GDLM_PLOCK_VERSION_PATCH;
+ info->version[0] = DLM_PLOCK_VERSION_MAJOR;
+ info->version[1] = DLM_PLOCK_VERSION_MINOR;
+ info->version[2] = DLM_PLOCK_VERSION_PATCH;
}
-static int check_version(struct gdlm_plock_info *info)
+static int check_version(struct dlm_plock_info *info)
{
- if ((GDLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
- (GDLM_PLOCK_VERSION_MINOR < info->version[1])) {
- log_error("plock device version mismatch: "
+ if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
+ (DLM_PLOCK_VERSION_MINOR < info->version[1])) {
+ log_print("plock device version mismatch: "
"kernel (%u.%u.%u), user (%u.%u.%u)",
- GDLM_PLOCK_VERSION_MAJOR,
- GDLM_PLOCK_VERSION_MINOR,
- GDLM_PLOCK_VERSION_PATCH,
+ DLM_PLOCK_VERSION_MAJOR,
+ DLM_PLOCK_VERSION_MINOR,
+ DLM_PLOCK_VERSION_PATCH,
info->version[0],
info->version[1],
info->version[2]);
@@ -68,25 +70,31 @@ static void send_op(struct plock_op *op)
wake_up(&send_wq);
}
-int gdlm_plock(void *lockspace, struct lm_lockname *name,
- struct file *file, int cmd, struct file_lock *fl)
+int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ int cmd, struct file_lock *fl)
{
- struct gdlm_ls *ls = lockspace;
+ struct dlm_ls *ls;
struct plock_op *op;
struct plock_xop *xop;
int rv;
+ ls = dlm_find_lockspace_local(lockspace);
+ if (!ls)
+ return -EINVAL;
+
xop = kzalloc(sizeof(*xop), GFP_KERNEL);
- if (!xop)
- return -ENOMEM;
+ if (!xop) {
+ rv = -ENOMEM;
+ goto out;
+ }
op = &xop->xop;
- op->info.optype = GDLM_PLOCK_OP_LOCK;
+ op->info.optype = DLM_PLOCK_OP_LOCK;
op->info.pid = fl->fl_pid;
op->info.ex = (fl->fl_type == F_WRLCK);
op->info.wait = IS_SETLKW(cmd);
- op->info.fsid = ls->id;
- op->info.number = name->ln_number;
+ op->info.fsid = ls->ls_global_id;
+ op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
@@ -107,12 +115,15 @@ int gdlm_plock(void *lockspace, struct lm_lockname *name,
if (xop->callback == NULL)
wait_event(recv_wq, (op->done != 0));
- else
- return -EINPROGRESS;
+ else {
+ rv = -EINPROGRESS;
+ goto out;
+ }
spin_lock(&ops_lock);
if (!list_empty(&op->list)) {
- printk(KERN_INFO "plock op on list\n");
+ log_error(ls, "dlm_posix_lock: op on list %llx",
+ (unsigned long long)number);
list_del(&op->list);
}
spin_unlock(&ops_lock);
@@ -121,17 +132,19 @@ int gdlm_plock(void *lockspace, struct lm_lockname *name,
if (!rv) {
if (posix_lock_file_wait(file, fl) < 0)
- log_error("gdlm_plock: vfs lock error %x,%llx",
- name->ln_type,
- (unsigned long long)name->ln_number);
+ log_error(ls, "dlm_posix_lock: vfs lock error %llx",
+ (unsigned long long)number);
}
kfree(xop);
+out:
+ dlm_put_lockspace(ls);
return rv;
}
+EXPORT_SYMBOL_GPL(dlm_posix_lock);
/* Returns failure iff a succesful lock operation should be canceled */
-static int gdlm_plock_callback(struct plock_op *op)
+static int dlm_plock_callback(struct plock_op *op)
{
struct file *file;
struct file_lock *fl;
@@ -142,7 +155,8 @@ static int gdlm_plock_callback(struct plock_op *op)
spin_lock(&ops_lock);
if (!list_empty(&op->list)) {
- printk(KERN_INFO "plock op on list\n");
+ log_print("dlm_plock_callback: op on list %llx",
+ (unsigned long long)op->info.number);
list_del(&op->list);
}
spin_unlock(&ops_lock);
@@ -165,19 +179,19 @@ static int gdlm_plock_callback(struct plock_op *op)
* This can only happen in the case of kmalloc() failure.
* The filesystem's own lock is the authoritative lock,
* so a failure to get the lock locally is not a disaster.
- * As long as GFS cannot reliably cancel locks (especially
+ * As long as the fs cannot reliably cancel locks (especially
* in a low-memory situation), we're better off ignoring
* this failure than trying to recover.
*/
- log_error("gdlm_plock: vfs lock error file %p fl %p",
- file, fl);
+ log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p",
+ (unsigned long long)op->info.number, file, fl);
}
rv = notify(flc, NULL, 0);
if (rv) {
/* XXX: We need to cancel the fs lock here: */
- printk("gfs2 lock granted after lock request failed;"
- " dangling lock!\n");
+ log_print("dlm_plock_callback: lock granted after lock request "
+ "failed; dangling lock!\n");
goto out;
}
@@ -186,25 +200,31 @@ out:
return rv;
}
-int gdlm_punlock(void *lockspace, struct lm_lockname *name,
- struct file *file, struct file_lock *fl)
+int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl)
{
- struct gdlm_ls *ls = lockspace;
+ struct dlm_ls *ls;
struct plock_op *op;
int rv;
+ ls = dlm_find_lockspace_local(lockspace);
+ if (!ls)
+ return -EINVAL;
+
op = kzalloc(sizeof(*op), GFP_KERNEL);
- if (!op)
- return -ENOMEM;
+ if (!op) {
+ rv = -ENOMEM;
+ goto out;
+ }
if (posix_lock_file_wait(file, fl) < 0)
- log_error("gdlm_punlock: vfs unlock error %x,%llx",
- name->ln_type, (unsigned long long)name->ln_number);
+ log_error(ls, "dlm_posix_unlock: vfs unlock error %llx",
+ (unsigned long long)number);
- op->info.optype = GDLM_PLOCK_OP_UNLOCK;
+ op->info.optype = DLM_PLOCK_OP_UNLOCK;
op->info.pid = fl->fl_pid;
- op->info.fsid = ls->id;
- op->info.number = name->ln_number;
+ op->info.fsid = ls->ls_global_id;
+ op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
if (fl->fl_lmops && fl->fl_lmops->fl_grant)
@@ -217,7 +237,8 @@ int gdlm_punlock(void *lockspace, struct lm_lockname *name,
spin_lock(&ops_lock);
if (!list_empty(&op->list)) {
- printk(KERN_INFO "punlock op on list\n");
+ log_error(ls, "dlm_posix_unlock: op on list %llx",
+ (unsigned long long)number);
list_del(&op->list);
}
spin_unlock(&ops_lock);
@@ -228,25 +249,34 @@ int gdlm_punlock(void *lockspace, struct lm_lockname *name,
rv = 0;
kfree(op);
+out:
+ dlm_put_lockspace(ls);
return rv;
}
+EXPORT_SYMBOL_GPL(dlm_posix_unlock);
-int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
- struct file *file, struct file_lock *fl)
+int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl)
{
- struct gdlm_ls *ls = lockspace;
+ struct dlm_ls *ls;
struct plock_op *op;
int rv;
+ ls = dlm_find_lockspace_local(lockspace);
+ if (!ls)
+ return -EINVAL;
+
op = kzalloc(sizeof(*op), GFP_KERNEL);
- if (!op)
- return -ENOMEM;
+ if (!op) {
+ rv = -ENOMEM;
+ goto out;
+ }
- op->info.optype = GDLM_PLOCK_OP_GET;
+ op->info.optype = DLM_PLOCK_OP_GET;
op->info.pid = fl->fl_pid;
op->info.ex = (fl->fl_type == F_WRLCK);
- op->info.fsid = ls->id;
- op->info.number = name->ln_number;
+ op->info.fsid = ls->ls_global_id;
+ op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
if (fl->fl_lmops && fl->fl_lmops->fl_grant)
@@ -259,7 +289,8 @@ int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
spin_lock(&ops_lock);
if (!list_empty(&op->list)) {
- printk(KERN_INFO "plock_get op on list\n");
+ log_error(ls, "dlm_posix_get: op on list %llx",
+ (unsigned long long)number);
list_del(&op->list);
}
spin_unlock(&ops_lock);
@@ -281,14 +312,17 @@ int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
}
kfree(op);
+out:
+ dlm_put_lockspace(ls);
return rv;
}
+EXPORT_SYMBOL_GPL(dlm_posix_get);
/* a read copies out one plock request from the send list */
static ssize_t dev_read(struct file *file, char __user *u, size_t count,
loff_t *ppos)
{
- struct gdlm_plock_info info;
+ struct dlm_plock_info info;
struct plock_op *op = NULL;
if (count < sizeof(info))
@@ -315,7 +349,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
loff_t *ppos)
{
- struct gdlm_plock_info info;
+ struct dlm_plock_info info;
struct plock_op *op;
int found = 0;
@@ -345,12 +379,12 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
struct plock_xop *xop;
xop = (struct plock_xop *)op;
if (xop->callback)
- count = gdlm_plock_callback(op);
+ count = dlm_plock_callback(op);
else
wake_up(&recv_wq);
} else
- printk(KERN_INFO "gdlm dev_write no op %x %llx\n", info.fsid,
- (unsigned long long)info.number);
+ log_print("dev_write no op %x %llx", info.fsid,
+ (unsigned long long)info.number);
return count;
}
@@ -377,11 +411,11 @@ static const struct file_operations dev_fops = {
static struct miscdevice plock_dev_misc = {
.minor = MISC_DYNAMIC_MINOR,
- .name = GDLM_PLOCK_MISC_NAME,
+ .name = DLM_PLOCK_MISC_NAME,
.fops = &dev_fops
};
-int gdlm_plock_init(void)
+int dlm_plock_init(void)
{
int rv;
@@ -393,14 +427,13 @@ int gdlm_plock_init(void)
rv = misc_register(&plock_dev_misc);
if (rv)
- printk(KERN_INFO "gdlm_plock_init: misc_register failed %d",
- rv);
+ log_print("dlm_plock_init: misc_register failed %d", rv);
return rv;
}
-void gdlm_plock_exit(void)
+void dlm_plock_exit(void)
{
if (misc_deregister(&plock_dev_misc) < 0)
- printk(KERN_INFO "gdlm_plock_exit: misc_deregister failed");
+ log_print("dlm_plock_exit: misc_deregister failed");
}
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 997f9531d59..fd677c8c3d3 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -257,6 +257,7 @@ static void do_ls_recovery(struct dlm_ls *ls)
if (rv) {
ls_recover(ls, rv);
kfree(rv->nodeids);
+ kfree(rv->new);
kfree(rv);
}
}
diff --git a/fs/exec.c b/fs/exec.c
index 54a0a557b67..b152029f18f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -953,7 +953,6 @@ int flush_old_exec(struct linux_binprm * bprm)
{
char * name;
int i, ch, retval;
- struct files_struct *files;
char tcomm[sizeof(current->comm)];
/*
@@ -965,26 +964,15 @@ int flush_old_exec(struct linux_binprm * bprm)
goto out;
/*
- * Make sure we have private file handles. Ask the
- * fork helper to do the work for us and the exit
- * helper to do the cleanup of the old one.
- */
- files = current->files; /* refcounted so safe to hold */
- retval = unshare_files();
- if (retval)
- goto out;
- /*
* Release all of the old mmap stuff
*/
retval = exec_mmap(bprm->mm);
if (retval)
- goto mmap_failed;
+ goto out;
bprm->mm = NULL; /* We're using it now */
/* This is the point of no return */
- put_files_struct(files);
-
current->sas_ss_sp = current->sas_ss_size = 0;
if (current->euid == current->uid && current->egid == current->gid)
@@ -1034,8 +1022,6 @@ int flush_old_exec(struct linux_binprm * bprm)
return 0;
-mmap_failed:
- reset_files_struct(current, files);
out:
return retval;
}
@@ -1283,12 +1269,17 @@ int do_execve(char * filename,
struct linux_binprm *bprm;
struct file *file;
unsigned long env_p;
+ struct files_struct *displaced;
int retval;
+ retval = unshare_files(&displaced);
+ if (retval)
+ goto out_ret;
+
retval = -ENOMEM;
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
if (!bprm)
- goto out_ret;
+ goto out_files;
file = open_exec(filename);
retval = PTR_ERR(file);
@@ -1343,6 +1334,8 @@ int do_execve(char * filename,
security_bprm_free(bprm);
acct_update_integrals(current);
kfree(bprm);
+ if (displaced)
+ put_files_struct(displaced);
return retval;
}
@@ -1363,6 +1356,9 @@ out_file:
out_kfree:
kfree(bprm);
+out_files:
+ if (displaced)
+ reset_files_struct(displaced);
out_ret:
return retval;
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index e632da761fc..3f3ac630ccd 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -55,14 +55,16 @@ static int get_close_on_exec(unsigned int fd)
* file_lock held for write.
*/
-static int locate_fd(struct files_struct *files,
- struct file *file, unsigned int orig_start)
+static int locate_fd(unsigned int orig_start, int cloexec)
{
+ struct files_struct *files = current->files;
unsigned int newfd;
unsigned int start;
int error;
struct fdtable *fdt;
+ spin_lock(&files->file_lock);
+
error = -EINVAL;
if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
goto out;
@@ -97,42 +99,28 @@ repeat:
if (error)
goto repeat;
- /*
- * We reacquired files_lock, so we are safe as long as
- * we reacquire the fdtable pointer and use it while holding
- * the lock, no one can free it during that time.
- */
if (start <= files->next_fd)
files->next_fd = newfd + 1;
+ FD_SET(newfd, fdt->open_fds);
+ if (cloexec)
+ FD_SET(newfd, fdt->close_on_exec);
+ else
+ FD_CLR(newfd, fdt->close_on_exec);
error = newfd;
-
+
out:
+ spin_unlock(&files->file_lock);
return error;
}
static int dupfd(struct file *file, unsigned int start, int cloexec)
{
- struct files_struct * files = current->files;
- struct fdtable *fdt;
- int fd;
-
- spin_lock(&files->file_lock);
- fd = locate_fd(files, file, start);
- if (fd >= 0) {
- /* locate_fd() may have expanded fdtable, load the ptr */
- fdt = files_fdtable(files);
- FD_SET(fd, fdt->open_fds);
- if (cloexec)
- FD_SET(fd, fdt->close_on_exec);
- else
- FD_CLR(fd, fdt->close_on_exec);
- spin_unlock(&files->file_lock);
+ int fd = locate_fd(start, cloexec);
+ if (fd >= 0)
fd_install(fd, file);
- } else {
- spin_unlock(&files->file_lock);
+ else
fput(file);
- }
return fd;
}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 033f7bdd47e..4df34da2284 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -242,10 +242,9 @@ struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
return inode;
}
-static void fuse_umount_begin(struct vfsmount *vfsmnt, int flags)
+static void fuse_umount_begin(struct super_block *sb)
{
- if (flags & MNT_FORCE)
- fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb));
+ fuse_abort_conn(get_fuse_conn_super(sb));
}
static void fuse_send_destroy(struct fuse_conn *fc)
diff --git a/fs/gfs2/locking/dlm/Makefile b/fs/gfs2/locking/dlm/Makefile
index 89b93b6b45c..2609bb6cd01 100644
--- a/fs/gfs2/locking/dlm/Makefile
+++ b/fs/gfs2/locking/dlm/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o
-lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o plock.o
+lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 58fcf8c5bf3..a243cf69c54 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -25,6 +25,7 @@
#include <net/sock.h>
#include <linux/dlm.h>
+#include <linux/dlm_plock.h>
#include <linux/lm_interface.h>
/*
@@ -173,17 +174,6 @@ void gdlm_cancel(void *);
int gdlm_hold_lvb(void *, char **);
void gdlm_unhold_lvb(void *, char *);
-/* plock.c */
-
-int gdlm_plock_init(void);
-void gdlm_plock_exit(void);
-int gdlm_plock(void *, struct lm_lockname *, struct file *, int,
- struct file_lock *);
-int gdlm_plock_get(void *, struct lm_lockname *, struct file *,
- struct file_lock *);
-int gdlm_punlock(void *, struct lm_lockname *, struct file *,
- struct file_lock *);
-
/* mount.c */
extern const struct lm_lockops gdlm_ops;
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
index 36a225850bd..b9a03a7ff80 100644
--- a/fs/gfs2/locking/dlm/main.c
+++ b/fs/gfs2/locking/dlm/main.c
@@ -28,13 +28,6 @@ static int __init init_lock_dlm(void)
return error;
}
- error = gdlm_plock_init();
- if (error) {
- gdlm_sysfs_exit();
- gfs2_unregister_lockproto(&gdlm_ops);
- return error;
- }
-
printk(KERN_INFO
"Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
return 0;
@@ -42,7 +35,6 @@ static int __init init_lock_dlm(void)
static void __exit exit_lock_dlm(void)
{
- gdlm_plock_exit();
gdlm_sysfs_exit();
gfs2_unregister_lockproto(&gdlm_ops);
}
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index f2efff42422..470bdf650b5 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -236,6 +236,27 @@ static void gdlm_withdraw(void *lockspace)
gdlm_kobject_release(ls);
}
+static int gdlm_plock(void *lockspace, struct lm_lockname *name,
+ struct file *file, int cmd, struct file_lock *fl)
+{
+ struct gdlm_ls *ls = lockspace;
+ return dlm_posix_lock(ls->dlm_lockspace, name->ln_number, file, cmd, fl);
+}
+
+static int gdlm_punlock(void *lockspace, struct lm_lockname *name,
+ struct file *file, struct file_lock *fl)
+{
+ struct gdlm_ls *ls = lockspace;
+ return dlm_posix_unlock(ls->dlm_lockspace, name->ln_number, file, fl);
+}
+
+static int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
+ struct file *file, struct file_lock *fl)
+{
+ struct gdlm_ls *ls = lockspace;
+ return dlm_posix_get(ls->dlm_lockspace, name->ln_number, file, fl);
+}
+
const struct lm_lockops gdlm_ops = {
.lm_proto_name = "lock_dlm",
.lm_mount = gdlm_mount,
diff --git a/fs/internal.h b/fs/internal.h
index 392e8ccd6fc..80aa9a02337 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -43,3 +43,14 @@ extern void __init chrdev_init(void);
* namespace.c
*/
extern int copy_mount_options(const void __user *, unsigned long *);
+
+extern void free_vfsmnt(struct vfsmount *);
+extern struct vfsmount *alloc_vfsmnt(const char *);
+extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
+extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
+ struct vfsmount *);
+extern void release_mounts(struct list_head *);
+extern void umount_tree(struct vfsmount *, int, struct list_head *);
+extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
+
+extern void __init mnt_init(void);
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
index d14d5a4dc5a..3ea36554107 100644
--- a/fs/jffs2/README.Locking
+++ b/fs/jffs2/README.Locking
@@ -14,7 +14,7 @@ be fairly close.
alloc_sem
---------
-The alloc_sem is a per-filesystem semaphore, used primarily to ensure
+The alloc_sem is a per-filesystem mutex, used primarily to ensure
contiguous allocation of space on the medium. It is automatically
obtained during space allocations (jffs2_reserve_space()) and freed
upon write completion (jffs2_complete_reservation()). Note that
@@ -41,10 +41,10 @@ if the wbuf is currently holding any data is permitted, though.
Ordering constraints: See f->sem.
- File Semaphore f->sem
+ File Mutex f->sem
---------------------
-This is the JFFS2-internal equivalent of the inode semaphore i->i_sem.
+This is the JFFS2-internal equivalent of the inode mutex i->i_sem.
It protects the contents of the jffs2_inode_info private inode data,
including the linked list of node fragments (but see the notes below on
erase_completion_lock), etc.
@@ -60,14 +60,14 @@ lead to deadlock, unless we played games with unlocking the i_sem
before calling the space allocation functions.
Instead of playing such games, we just have an extra internal
-semaphore, which is obtained by the garbage collection code and also
+mutex, which is obtained by the garbage collection code and also
by the normal file system code _after_ allocation of space.
Ordering constraints:
1. Never attempt to allocate space or lock alloc_sem with
any f->sem held.
- 2. Never attempt to lock two file semaphores in one thread.
+ 2. Never attempt to lock two file mutexes in one thread.
No ordering rules have been made for doing so.
@@ -86,8 +86,8 @@ a simple spin_lock() rather than spin_lock_bh().
Note that the per-inode list of physical nodes (f->nodes) is a special
case. Any changes to _valid_ nodes (i.e. ->flash_offset & 1 == 0) in
-the list are protected by the file semaphore f->sem. But the erase
-code may remove _obsolete_ nodes from the list while holding only the
+the list are protected by the file mutex f->sem. But the erase code
+may remove _obsolete_ nodes from the list while holding only the
erase_completion_lock. So you can walk the list only while holding the
erase_completion_lock, and can drop the lock temporarily mid-walk as
long as the pointer you're holding is to a _valid_ node, not an
@@ -124,10 +124,10 @@ Ordering constraints:
erase_free_sem
--------------
-This semaphore is only used by the erase code which frees obsolete
-node references and the jffs2_garbage_collect_deletion_dirent()
-function. The latter function on NAND flash must read _obsolete_ nodes
-to determine whether the 'deletion dirent' under consideration can be
+This mutex is only used by the erase code which frees obsolete node
+references and the jffs2_garbage_collect_deletion_dirent() function.
+The latter function on NAND flash must read _obsolete_ nodes to
+determine whether the 'deletion dirent' under consideration can be
discarded or whether it is still required to show that an inode has
been unlinked. Because reading from the flash may sleep, the
erase_completion_lock cannot be held, so an alternative, more
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 722a6b68295..d58f845ccb8 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -345,6 +345,7 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
INIT_LIST_HEAD(&c->dirty_list);
INIT_LIST_HEAD(&c->erasable_list);
INIT_LIST_HEAD(&c->erasing_list);
+ INIT_LIST_HEAD(&c->erase_checking_list);
INIT_LIST_HEAD(&c->erase_pending_list);
INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
INIT_LIST_HEAD(&c->erase_complete_list);
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 3a32c64ed49..5544d31c066 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -62,9 +62,9 @@ __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c,
void
__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f)
{
- down(&f->sem);
+ mutex_lock(&f->sem);
__jffs2_dbg_fragtree_paranoia_check_nolock(f);
- up(&f->sem);
+ mutex_unlock(&f->sem);
}
void
@@ -153,6 +153,139 @@ __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
kfree(buf);
}
+void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
+{
+ struct jffs2_eraseblock *jeb;
+ uint32_t free = 0, dirty = 0, used = 0, wasted = 0,
+ erasing = 0, bad = 0, unchecked = 0;
+ int nr_counted = 0;
+ int dump = 0;
+
+ if (c->gcblock) {
+ nr_counted++;
+ free += c->gcblock->free_size;
+ dirty += c->gcblock->dirty_size;
+ used += c->gcblock->used_size;
+ wasted += c->gcblock->wasted_size;
+ unchecked += c->gcblock->unchecked_size;
+ }
+ if (c->nextblock) {
+ nr_counted++;
+ free += c->nextblock->free_size;
+ dirty += c->nextblock->dirty_size;
+ used += c->nextblock->used_size;
+ wasted += c->nextblock->wasted_size;
+ unchecked += c->nextblock->unchecked_size;
+ }
+ list_for_each_entry(jeb, &c->clean_list, list) {
+ nr_counted++;
+ free += jeb->free_size;
+ dirty += jeb->dirty_size;
+ used += jeb->used_size;
+ wasted += jeb->wasted_size;
+ unchecked += jeb->unchecked_size;
+ }
+ list_for_each_entry(jeb, &c->very_dirty_list, list) {
+ nr_counted++;
+ free += jeb->free_size;
+ dirty += jeb->dirty_size;
+ used += jeb->used_size;
+ wasted += jeb->wasted_size;
+ unchecked += jeb->unchecked_size;
+ }
+ list_for_each_entry(jeb, &c->dirty_list, list) {
+ nr_counted++;
+ free += jeb->free_size;
+ dirty += jeb->dirty_size;
+ used += jeb->used_size;
+ wasted += jeb->wasted_size;
+ unchecked += jeb->unchecked_size;
+ }
+ list_for_each_entry(jeb, &c->erasable_list, list) {
+ nr_counted++;
+ free += jeb->free_size;
+ dirty += jeb->dirty_size;
+ used += jeb->used_size;
+ wasted += jeb->wasted_size;
+ unchecked += jeb->unchecked_size;
+ }
+ list_for_each_entry(jeb, &c->erasable_pending_wbuf_list, list) {
+ nr_counted++;
+ free += jeb->free_size;
+ dirty += jeb->dirty_size;
+ used += jeb->used_size;
+ wasted += jeb->wasted_size;
+ unchecked += jeb->unchecked_size;
+ }
+ list_for_each_entry(jeb, &c->erase_pending_list, list) {
+ nr_counted++;
+ free += jeb->free_size;
+ dirty += jeb->dirty_size;
+ used += jeb->used_size;
+ wasted += jeb->wasted_size;
+ unchecked += jeb->unchecked_size;
+ }
+ list_for_each_entry(jeb, &c->free_list, list) {
+ nr_counted++;
+ free += jeb->free_size;
+ dirty += jeb->dirty_size;
+ used += jeb->used_size;
+ wasted += jeb->wasted_size;
+ unchecked += jeb->unchecked_size;
+ }
+ list_for_each_entry(jeb, &c->bad_used_list, list) {
+ nr_counted++;
+ free += jeb->free_size;
+ dirty += jeb->dirty_size;
+ used += jeb->used_size;
+ wasted += jeb->wasted_size;
+ unchecked += jeb->unchecked_size;
+ }
+
+ list_for_each_entry(jeb, &c->erasing_list, list) {
+ nr_counted++;
+ erasing += c->sector_size;
+ }
+ list_for_each_entry(jeb, &c->erase_checking_list, list) {
+ nr_counted++;
+ erasing += c->sector_size;
+ }
+ list_for_each_entry(jeb, &c->erase_complete_list, list) {
+ nr_counted++;
+ erasing += c->sector_size;
+ }
+ list_for_each_entry(jeb, &c->bad_list, list) {
+ nr_counted++;
+ bad += c->sector_size;
+ }
+
+#define check(sz) \
+ if (sz != c->sz##_size) { \
+ printk(KERN_WARNING #sz "_size mismatch counted 0x%x, c->" #sz "_size 0x%x\n", \
+ sz, c->sz##_size); \
+ dump = 1; \
+ }
+ check(free);
+ check(dirty);
+ check(used);
+ check(wasted);
+ check(unchecked);
+ check(bad);
+ check(erasing);
+#undef check
+
+ if (nr_counted != c->nr_blocks) {
+ printk(KERN_WARNING "%s counted only 0x%x blocks of 0x%x. Where are the others?\n",
+ __func__, nr_counted, c->nr_blocks);
+ dump = 1;
+ }
+
+ if (dump) {
+ __jffs2_dbg_dump_block_lists_nolock(c);
+ BUG();
+ }
+}
+
/*
* Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'.
*/
@@ -229,6 +362,9 @@ __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
}
#endif
+ if (!(c->flags & (JFFS2_SB_FLAG_BUILDING|JFFS2_SB_FLAG_SCANNING)))
+ __jffs2_dbg_superblock_counts(c);
+
return;
error:
@@ -268,7 +404,10 @@ __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
printk(JFFS2_DBG);
for (ref = jeb->first_node; ; ref = ref_next(ref)) {
- printk("%#08x(%#x)", ref_offset(ref), ref->__totlen);
+ printk("%#08x", ref_offset(ref));
+#ifdef TEST_TOTLEN
+ printk("(%x)", ref->__totlen);
+#endif
if (ref_next(ref))
printk("->");
else
@@ -447,6 +586,21 @@ __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c)
}
}
}
+ if (list_empty(&c->erase_checking_list)) {
+ printk(JFFS2_DBG "erase_checking_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->erase_checking_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "erase_checking_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
if (list_empty(&c->erase_pending_list)) {
printk(JFFS2_DBG "erase_pending_list: empty\n");
@@ -532,9 +686,9 @@ __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c)
void
__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f)
{
- down(&f->sem);
+ mutex_lock(&f->sem);
jffs2_dbg_dump_fragtree_nolock(f);
- up(&f->sem);
+ mutex_unlock(&f->sem);
}
void
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h
index 4130adabd76..9645275023e 100644
--- a/fs/jffs2/debug.h
+++ b/fs/jffs2/debug.h
@@ -38,6 +38,7 @@
#if CONFIG_JFFS2_FS_DEBUG > 1
#define JFFS2_DBG_FRAGTREE2_MESSAGES
+#define JFFS2_DBG_READINODE2_MESSAGES
#define JFFS2_DBG_MEMALLOC_MESSAGES
#endif
@@ -115,6 +116,11 @@
#else
#define dbg_readinode(fmt, ...)
#endif
+#ifdef JFFS2_DBG_READINODE2_MESSAGES
+#define dbg_readinode2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_readinode2(fmt, ...)
+#endif
/* Fragtree build debugging messages */
#ifdef JFFS2_DBG_FRAGTREE_MESSAGES
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index f948f7e6ec8..c63e7a96af0 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -86,7 +86,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
dir_f = JFFS2_INODE_INFO(dir_i);
c = JFFS2_SB_INFO(dir_i->i_sb);
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
/* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */
for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) {
@@ -99,7 +99,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
}
if (fd)
ino = fd->ino;
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
if (ino) {
inode = jffs2_iget(dir_i->i_sb, ino);
if (IS_ERR(inode)) {
@@ -146,7 +146,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
curofs=1;
- down(&f->sem);
+ mutex_lock(&f->sem);
for (fd = f->dents; fd; fd = fd->next) {
curofs++;
@@ -166,7 +166,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
break;
offset++;
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
out:
filp->f_pos = offset;
return 0;
@@ -275,9 +275,9 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now);
if (!ret) {
- down(&f->sem);
+ mutex_lock(&f->sem);
old_dentry->d_inode->i_nlink = ++f->inocache->nlink;
- up(&f->sem);
+ mutex_unlock(&f->sem);
d_instantiate(dentry, old_dentry->d_inode);
dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
atomic_inc(&old_dentry->d_inode->i_count);
@@ -351,7 +351,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
if (IS_ERR(fn)) {
/* Eeek. Wave bye bye */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
jffs2_clear_inode(inode);
return PTR_ERR(fn);
@@ -361,7 +361,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
f->target = kmalloc(targetlen + 1, GFP_KERNEL);
if (!f->target) {
printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
jffs2_clear_inode(inode);
return -ENOMEM;
@@ -374,7 +374,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
obsoleted by the first data write
*/
f->metadata = fn;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
@@ -406,7 +406,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
}
dir_f = JFFS2_INODE_INFO(dir_i);
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
@@ -429,7 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
as if it were the final unlink() */
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_clear_inode(inode);
return PTR_ERR(fd);
}
@@ -442,7 +442,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
@@ -507,7 +507,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
if (IS_ERR(fn)) {
/* Eeek. Wave bye bye */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
jffs2_clear_inode(inode);
return PTR_ERR(fn);
@@ -516,7 +516,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
obsoleted by the first data write
*/
f->metadata = fn;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
@@ -548,7 +548,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
}
dir_f = JFFS2_INODE_INFO(dir_i);
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
@@ -571,7 +571,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
as if it were the final unlink() */
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_clear_inode(inode);
return PTR_ERR(fd);
}
@@ -585,7 +585,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
@@ -673,7 +673,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
if (IS_ERR(fn)) {
/* Eeek. Wave bye bye */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
jffs2_clear_inode(inode);
return PTR_ERR(fn);
@@ -682,7 +682,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
obsoleted by the first data write
*/
f->metadata = fn;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
@@ -714,7 +714,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
}
dir_f = JFFS2_INODE_INFO(dir_i);
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
@@ -740,7 +740,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
as if it were the final unlink() */
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_clear_inode(inode);
return PTR_ERR(fd);
}
@@ -753,7 +753,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
@@ -780,14 +780,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
if (S_ISDIR(new_dentry->d_inode->i_mode)) {
struct jffs2_full_dirent *fd;
- down(&victim_f->sem);
+ mutex_lock(&victim_f->sem);
for (fd = victim_f->dents; fd; fd = fd->next) {
if (fd->ino) {
- up(&victim_f->sem);
+ mutex_unlock(&victim_f->sem);
return -ENOTEMPTY;
}
}
- up(&victim_f->sem);
+ mutex_unlock(&victim_f->sem);
}
}
@@ -816,9 +816,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
/* Don't oops if the victim was a dirent pointing to an
inode which didn't exist. */
if (victim_f->inocache) {
- down(&victim_f->sem);
+ mutex_lock(&victim_f->sem);
victim_f->inocache->nlink--;
- up(&victim_f->sem);
+ mutex_unlock(&victim_f->sem);
}
}
@@ -836,11 +836,11 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
if (ret) {
/* Oh shit. We really ought to make a single node which can do both atomically */
struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
- down(&f->sem);
+ mutex_lock(&f->sem);
inc_nlink(old_dentry->d_inode);
if (f->inocache)
f->inocache->nlink++;
- up(&f->sem);
+ mutex_unlock(&f->sem);
printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
/* Might as well let the VFS know */
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index a1db9180633..25a640e566d 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -50,14 +50,14 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
if (!instr) {
printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
return;
}
@@ -84,14 +84,14 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
if (ret == -ENOMEM || ret == -EAGAIN) {
/* Erase failed immediately. Refile it on the list */
D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
return;
}
@@ -107,7 +107,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
{
struct jffs2_eraseblock *jeb;
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
@@ -116,9 +116,9 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
if (!list_empty(&c->erase_complete_list)) {
jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
- list_del(&jeb->list);
+ list_move(&jeb->list, &c->erase_checking_list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
jffs2_mark_erased_block(c, jeb);
if (!--count) {
@@ -139,7 +139,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
jffs2_free_jeb_node_refs(c, jeb);
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
jffs2_erase_block(c, jeb);
@@ -149,12 +149,12 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
/* Be nice */
yield();
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
}
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
done:
D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
}
@@ -162,11 +162,11 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move_tail(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
/* Ensure that kupdated calls us again to mark them clean */
jffs2_erase_pending_trigger(c);
}
@@ -180,26 +180,26 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
failed too many times. */
if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
/* We'd like to give this block another try. */
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
return;
}
}
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->bad_size += c->sector_size;
list_move(&jeb->list, &c->bad_list);
c->nr_erasing_blocks--;
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
}
@@ -350,9 +350,11 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
break;
} while(--retlen);
c->mtd->unpoint(c->mtd, ebuf, jeb->offset, c->sector_size);
- if (retlen)
+ if (retlen) {
printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
*wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf));
+ return -EIO;
+ }
return 0;
}
do_flash_read:
@@ -373,10 +375,12 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf);
if (ret) {
printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
+ ret = -EIO;
goto fail;
}
if (retlen != readlen) {
printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
+ ret = -EIO;
goto fail;
}
for (i=0; i<readlen; i += sizeof(unsigned long)) {
@@ -385,6 +389,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
if (*datum + 1) {
*bad_offset += i;
printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset);
+ ret = -EIO;
goto fail;
}
}
@@ -419,9 +424,6 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
if (jffs2_write_nand_cleanmarker(c, jeb))
goto filebad;
}
-
- /* Everything else got zeroed before the erase */
- jeb->free_size = c->sector_size;
} else {
struct kvec vecs[1];
@@ -449,48 +451,50 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
goto filebad;
}
-
- /* Everything else got zeroed before the erase */
- jeb->free_size = c->sector_size;
- /* FIXME Special case for cleanmarker in empty block */
- jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
}
+ /* Everything else got zeroed before the erase */
+ jeb->free_size = c->sector_size;
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
+
c->erasing_size -= c->sector_size;
- c->free_size += jeb->free_size;
- c->used_size += jeb->used_size;
+ c->free_size += c->sector_size;
- jffs2_dbg_acct_sanity_check_nolock(c,jeb);
- jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+ /* Account for cleanmarker now, if it's in-band */
+ if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c))
+ jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
- list_add_tail(&jeb->list, &c->free_list);
+ list_move_tail(&jeb->list, &c->free_list);
c->nr_erasing_blocks--;
c->nr_free_blocks++;
+
+ jffs2_dbg_acct_sanity_check_nolock(c, jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
return;
filebad:
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
/* Stick it on a list (any list) so erase_failed can take it
right off again. Silly, but shouldn't happen often. */
- list_add(&jeb->list, &c->erasing_list);
+ list_move(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
jffs2_erase_failed(c, jeb, bad_offset);
return;
refile:
/* Stick it back on the list from whence it came and come back later */
jffs2_erase_pending_trigger(c);
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
- list_add(&jeb->list, &c->erase_complete_list);
+ list_move(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
return;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index dcc2734e0b5..5e920343b2c 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -115,9 +115,9 @@ static int jffs2_readpage (struct file *filp, struct page *pg)
struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
int ret;
- down(&f->sem);
+ mutex_lock(&f->sem);
ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
- up(&f->sem);
+ mutex_unlock(&f->sem);
return ret;
}
@@ -154,7 +154,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
if (ret)
goto out_page;
- down(&f->sem);
+ mutex_lock(&f->sem);
memset(&ri, 0, sizeof(ri));
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -181,7 +181,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
jffs2_complete_reservation(c);
- up(&f->sem);
+ mutex_unlock(&f->sem);
goto out_page;
}
ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -195,12 +195,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
jffs2_complete_reservation(c);
- up(&f->sem);
+ mutex_unlock(&f->sem);
goto out_page;
}
jffs2_complete_reservation(c);
inode->i_size = pageofs;
- up(&f->sem);
+ mutex_unlock(&f->sem);
}
/*
@@ -209,9 +209,9 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
* case of a short-copy.
*/
if (!PageUptodate(pg)) {
- down(&f->sem);
+ mutex_lock(&f->sem);
ret = jffs2_do_readpage_nolock(inode, pg);
- up(&f->sem);
+ mutex_unlock(&f->sem);
if (ret)
goto out_page;
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index e26ea78c789..3eb1c84b0a3 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -36,6 +36,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
unsigned int ivalid;
uint32_t alloclen;
int ret;
+ int alloc_type = ALLOC_NORMAL;
D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
@@ -50,20 +51,20 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
mdata = (char *)&dev;
D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
} else if (S_ISLNK(inode->i_mode)) {
- down(&f->sem);
+ mutex_lock(&f->sem);
mdatalen = f->metadata->size;
mdata = kmalloc(f->metadata->size, GFP_USER);
if (!mdata) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
return -ENOMEM;
}
ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
if (ret) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
kfree(mdata);
return ret;
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
}
@@ -82,7 +83,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
kfree(mdata);
return ret;
}
- down(&f->sem);
+ mutex_lock(&f->sem);
ivalid = iattr->ia_valid;
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -115,6 +116,10 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
ri->compr = JFFS2_COMPR_ZERO;
ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
ri->offset = cpu_to_je32(inode->i_size);
+ } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
+ /* For truncate-to-zero, treat it as deletion because
+ it'll always be obsoleting all previous nodes */
+ alloc_type = ALLOC_DELETION;
}
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
if (mdatalen)
@@ -122,14 +127,14 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
else
ri->data_crc = cpu_to_je32(0);
- new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, ALLOC_NORMAL);
+ new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
if (S_ISLNK(inode->i_mode))
kfree(mdata);
if (IS_ERR(new_metadata)) {
jffs2_complete_reservation(c);
jffs2_free_raw_inode(ri);
- up(&f->sem);
+ mutex_unlock(&f->sem);
return PTR_ERR(new_metadata);
}
/* It worked. Update the inode */
@@ -149,6 +154,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
jffs2_add_full_dnode_to_inode(c, f, new_metadata);
inode->i_size = iattr->ia_size;
+ inode->i_blocks = (inode->i_size + 511) >> 9;
f->metadata = NULL;
} else {
f->metadata = new_metadata;
@@ -159,7 +165,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
}
jffs2_free_raw_inode(ri);
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
/* We have to do the vmtruncate() without f->sem held, since
@@ -167,8 +173,10 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
We are protected from a simultaneous write() extending i_size
back past iattr->ia_size, because do_truncate() holds the
generic inode semaphore. */
- if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
- vmtruncate(inode, iattr->ia_size);
+ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
+ vmtruncate(inode, iattr->ia_size);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+ }
return 0;
}
@@ -248,12 +256,12 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
c = JFFS2_SB_INFO(inode->i_sb);
jffs2_init_inode_info(f);
- down(&f->sem);
+ mutex_lock(&f->sem);
ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
if (ret) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
iget_failed(inode);
return ERR_PTR(ret);
}
@@ -330,7 +338,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
unlock_new_inode(inode);
@@ -339,7 +347,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
error_io:
ret = -EIO;
error:
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
iget_failed(inode);
return ERR_PTR(ret);
@@ -380,9 +388,9 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
Flush the writebuffer, if neccecary, else we loose it */
if (!(sb->s_flags & MS_RDONLY)) {
jffs2_stop_garbage_collect_thread(c);
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
}
if (!(*flags & MS_RDONLY))
@@ -429,7 +437,7 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
f = JFFS2_INODE_INFO(inode);
jffs2_init_inode_info(f);
- down(&f->sem);
+ mutex_lock(&f->sem);
memset(ri, 0, sizeof(*ri));
/* Set OS-specific defaults for new inodes */
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 32ff0373aa0..bad005664e3 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -126,7 +126,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
int ret = 0, inum, nlink;
int xattr = 0;
- if (down_interruptible(&c->alloc_sem))
+ if (mutex_lock_interruptible(&c->alloc_sem))
return -EINTR;
for (;;) {
@@ -143,7 +143,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
c->unchecked_size);
jffs2_dbg_dump_block_lists_nolock(c);
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return -ENOSPC;
}
@@ -190,7 +190,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
made no progress in this case, but that should be OK */
c->checked_ino--;
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
return 0;
@@ -210,7 +210,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return ret;
}
@@ -221,9 +221,15 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
jeb = jffs2_find_gc_block(c);
if (!jeb) {
- D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
+ /* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */
+ if (!list_empty(&c->erase_pending_list)) {
+ spin_unlock(&c->erase_completion_lock);
+ mutex_unlock(&c->alloc_sem);
+ return -EAGAIN;
+ }
+ D1(printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return -EIO;
}
@@ -232,7 +238,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
if (!jeb->used_size) {
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
goto eraseit;
}
@@ -248,7 +254,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
jeb->gc_node = raw;
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
BUG();
}
}
@@ -266,7 +272,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
/* Just mark it obsolete */
jffs2_mark_node_obsolete(c, raw);
}
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
goto eraseit_lock;
}
@@ -334,7 +340,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
*/
printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
ic->ino, ic->state);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
spin_unlock(&c->inocache_lock);
BUG();
@@ -345,7 +351,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
the alloc_sem() (for marking nodes invalid) so we must
drop the alloc_sem before sleeping. */
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
ic->ino, ic->state));
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
@@ -416,7 +422,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
ret = -ENOSPC;
}
release_sem:
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
eraseit_lock:
/* If we've finished this block, start it erasing */
@@ -445,7 +451,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
uint32_t start = 0, end = 0, nrfrags = 0;
int ret = 0;
- down(&f->sem);
+ mutex_lock(&f->sem);
/* Now we have the lock for this inode. Check that it's still the one at the head
of the list. */
@@ -525,7 +531,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
}
}
upnout:
- up(&f->sem);
+ mutex_unlock(&f->sem);
return ret;
}
@@ -846,7 +852,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
/* Prevent the erase code from nicking the obsolete node refs while
we're looking at them. I really don't like this extra lock but
can't see any alternative. Suggestions on a postcard to... */
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
@@ -899,7 +905,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
/* OK. The name really does match. There really is still an older node on
the flash which our deletion dirent obsoletes. So we have to write out
a new deletion dirent to replace it */
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
@@ -908,7 +914,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
return jffs2_garbage_collect_dirent(c, jeb, f, fd);
}
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
kfree(rd);
}
@@ -1081,7 +1087,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
return 0;
}
-static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *orig_jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
uint32_t start, uint32_t end)
{
diff --git a/fs/jffs2/ioctl.c b/fs/jffs2/ioctl.c
index f4d525b0ea5..e2177210f62 100644
--- a/fs/jffs2/ioctl.c
+++ b/fs/jffs2/ioctl.c
@@ -10,6 +10,7 @@
*/
#include <linux/fs.h>
+#include "nodelist.h"
int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
diff --git a/fs/jffs2/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h
index a841f4973a7..31559f45fdd 100644
--- a/fs/jffs2/jffs2_fs_i.h
+++ b/fs/jffs2/jffs2_fs_i.h
@@ -15,7 +15,7 @@
#include <linux/version.h>
#include <linux/rbtree.h>
#include <linux/posix_acl.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
struct jffs2_inode_info {
/* We need an internal mutex similar to inode->i_mutex.
@@ -24,7 +24,7 @@ struct jffs2_inode_info {
before letting GC proceed. Or we'd have to put ugliness
into the GC code so it didn't attempt to obtain the i_mutex
for the inode(s) which are already locked */
- struct semaphore sem;
+ struct mutex sem;
/* The highest (datanode) version number used for this ino */
uint32_t highest_version;
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 18fca2b9e53..85ef6dbb1be 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -16,7 +16,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/list.h>
@@ -44,7 +44,7 @@ struct jffs2_sb_info {
struct completion gc_thread_start; /* GC thread start completion */
struct completion gc_thread_exit; /* GC thread exit completion port */
- struct semaphore alloc_sem; /* Used to protect all the following
+ struct mutex alloc_sem; /* Used to protect all the following
fields, and also to protect against
out-of-order writing of nodes. And GC. */
uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER
@@ -87,6 +87,7 @@ struct jffs2_sb_info {
struct list_head erasable_list; /* Blocks which are completely dirty, and need erasing */
struct list_head erasable_pending_wbuf_list; /* Blocks which need erasing but only after the current wbuf is flushed */
struct list_head erasing_list; /* Blocks which are currently erasing */
+ struct list_head erase_checking_list; /* Blocks which are being checked and marked */
struct list_head erase_pending_list; /* Blocks which need erasing now */
struct list_head erase_complete_list; /* Blocks which are erased and need the clean marker written to them */
struct list_head free_list; /* Blocks which are free and ready to be used */
@@ -104,7 +105,7 @@ struct jffs2_sb_info {
/* Sem to allow jffs2_garbage_collect_deletion_dirent to
drop the erase_completion_lock while it's holding a pointer
to an obsoleted node. I don't like this. Alternatives welcomed. */
- struct semaphore erase_free_sem;
+ struct mutex erase_free_sem;
uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index ec1aae9e695..8219df6eb6d 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -87,7 +87,7 @@ struct jffs2_raw_node_ref
xattr_ref or xattr_datum instead. The common part of those structures
has NULL in the first word. See jffs2_raw_ref_to_ic() below */
uint32_t flash_offset;
-#define TEST_TOTLEN
+#undef TEST_TOTLEN
#ifdef TEST_TOTLEN
uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */
#endif
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index a0313fa8748..9df8f3ef20d 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -48,7 +48,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
minsize = PAD(minsize);
D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
@@ -57,7 +57,6 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
/* this needs a little more thought (true <tglx> :)) */
while(ret == -EAGAIN) {
while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
- int ret;
uint32_t dirty, avail;
/* calculate real dirty size
@@ -82,7 +81,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
dirty, c->unchecked_size, c->sector_size));
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return -ENOSPC;
}
@@ -105,11 +104,11 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
avail, blocksneeded * c->sector_size));
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return -ENOSPC;
}
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
@@ -117,7 +116,10 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
spin_unlock(&c->erase_completion_lock);
ret = jffs2_garbage_collect_pass(c);
- if (ret)
+
+ if (ret == -EAGAIN)
+ jffs2_erase_pending_blocks(c, 1);
+ else if (ret)
return ret;
cond_resched();
@@ -125,7 +127,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
if (signal_pending(current))
return -EINTR;
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
spin_lock(&c->erase_completion_lock);
}
@@ -138,7 +140,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
if (!ret)
ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
if (ret)
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return ret;
}
@@ -463,7 +465,7 @@ void jffs2_complete_reservation(struct jffs2_sb_info *c)
{
D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
jffs2_garbage_collect_trigger(c);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
}
static inline int on_list(struct list_head *obj, struct list_head *head)
@@ -512,7 +514,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
any jffs2_raw_node_refs. So we don't need to stop erases from
happening, or protect against people holding an obsolete
jffs2_raw_node_ref without the erase_completion_lock. */
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
}
spin_lock(&c->erase_completion_lock);
@@ -715,7 +717,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
}
out_erase_sem:
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
}
int jffs2_thread_should_wake(struct jffs2_sb_info *c)
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index e512a93d624..4cb4d76de07 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -825,8 +825,9 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
else // normal case...
tn->fn->size = je32_to_cpu(rd->dsize);
- dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
- ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
+ dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
+ ref_offset(ref), je32_to_cpu(rd->version),
+ je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
ret = jffs2_add_tn_to_tree(c, rii, tn);
@@ -836,13 +837,13 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
jffs2_free_tmp_dnode_info(tn);
return ret;
}
-#ifdef JFFS2_DBG_READINODE_MESSAGES
- dbg_readinode("After adding ver %d:\n", je32_to_cpu(rd->version));
+#ifdef JFFS2_DBG_READINODE2_MESSAGES
+ dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version));
tn = tn_first(&rii->tn_root);
while (tn) {
- dbg_readinode("%p: v %d r 0x%x-0x%x ov %d\n",
- tn, tn->version, tn->fn->ofs,
- tn->fn->ofs+tn->fn->size, tn->overlapped);
+ dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n",
+ tn, tn->version, tn->fn->ofs,
+ tn->fn->ofs+tn->fn->size, tn->overlapped);
tn = tn_next(tn);
}
#endif
@@ -1193,7 +1194,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
ret, retlen, sizeof(*latest_node));
/* FIXME: If this fails, there seems to be a memory leak. Find it. */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return ret?ret:-EIO;
}
@@ -1202,7 +1203,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
if (crc != je32_to_cpu(latest_node->node_crc)) {
JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
f->inocache->ino, ref_offset(rii.latest_ref));
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
}
@@ -1242,7 +1243,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
if (!f->target) {
JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -ENOMEM;
}
@@ -1255,7 +1256,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
ret = -EIO;
kfree(f->target);
f->target = NULL;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -ret;
}
@@ -1273,14 +1274,14 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
if (f->metadata) {
JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
f->inocache->ino, jemode_to_cpu(latest_node->mode));
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
}
if (!frag_first(&f->fragtree)) {
JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
f->inocache->ino, jemode_to_cpu(latest_node->mode));
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
}
@@ -1289,7 +1290,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
f->inocache->ino, jemode_to_cpu(latest_node->mode));
/* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
}
@@ -1379,12 +1380,13 @@ int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
if (!f)
return -ENOMEM;
- init_MUTEX_LOCKED(&f->sem);
+ mutex_init(&f->sem);
+ mutex_lock(&f->sem);
f->inocache = ic;
ret = jffs2_do_read_inode_internal(c, f, &n);
if (!ret) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
}
kfree (f);
@@ -1398,7 +1400,7 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
jffs2_clear_acl(f);
jffs2_xattr_delete_inode(c, f->inocache);
- down(&f->sem);
+ mutex_lock(&f->sem);
deleted = f->inocache && !f->inocache->nlink;
if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
@@ -1430,5 +1432,5 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
jffs2_del_ino_cache(c, f->inocache);
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 4677355996c..f3353df178e 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -47,7 +47,7 @@ static void jffs2_i_init_once(struct kmem_cache *cachep, void *foo)
{
struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
- init_MUTEX(&ei->sem);
+ mutex_init(&ei->sem);
inode_init_once(&ei->vfs_inode);
}
@@ -55,9 +55,9 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return 0;
}
@@ -95,8 +95,8 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
/* Initialize JFFS2 superblock locks, the further initialization will
* be done later */
- init_MUTEX(&c->alloc_sem);
- init_MUTEX(&c->erase_free_sem);
+ mutex_init(&c->alloc_sem);
+ mutex_init(&c->erase_free_sem);
init_waitqueue_head(&c->erase_wait);
init_waitqueue_head(&c->inocache_wq);
spin_lock_init(&c->erase_completion_lock);
@@ -125,9 +125,9 @@ static void jffs2_put_super (struct super_block *sb)
D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
jffs2_sum_exit(c);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index d1d4f27464b..8de52b60767 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -578,8 +578,8 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
if (!jffs2_is_writebuffered(c))
return 0;
- if (!down_trylock(&c->alloc_sem)) {
- up(&c->alloc_sem);
+ if (mutex_trylock(&c->alloc_sem)) {
+ mutex_unlock(&c->alloc_sem);
printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
BUG();
}
@@ -702,10 +702,10 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
if (!c->wbuf)
return 0;
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
if (!jffs2_wbuf_pending_for_ino(c, ino)) {
D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return 0;
}
@@ -725,14 +725,14 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
} else while (old_wbuf_len &&
old_wbuf_ofs == c->wbuf_ofs) {
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
ret = jffs2_garbage_collect_pass(c);
if (ret) {
/* GC failed. Flush it with padding instead */
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
down_write(&c->wbuf_sem);
ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
/* retry flushing wbuf in case jffs2_wbuf_recover
@@ -742,12 +742,12 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
up_write(&c->wbuf_sem);
break;
}
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
}
D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return ret;
}
@@ -1236,12 +1236,24 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
if (!c->wbuf)
return -ENOMEM;
+#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
+ c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+ if (!c->wbuf_verify) {
+ kfree(c->oobbuf);
+ kfree(c->wbuf);
+ return -ENOMEM;
+ }
+#endif
+
printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
return 0;
}
void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
+#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
+ kfree(c->wbuf_verify);
+#endif
kfree(c->wbuf);
}
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 776f13cbf2b..665fce9797d 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -137,12 +137,12 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
JFFS2_SUMMARY_INODE_SIZE);
} else {
/* Locking pain */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy,
alloc_mode, JFFS2_SUMMARY_INODE_SIZE);
- down(&f->sem);
+ mutex_lock(&f->sem);
}
if (!ret) {
@@ -285,12 +285,12 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
JFFS2_SUMMARY_DIRENT_SIZE(namelen));
} else {
/* Locking pain */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy,
alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
- down(&f->sem);
+ mutex_lock(&f->sem);
}
if (!ret) {
@@ -353,7 +353,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
break;
}
- down(&f->sem);
+ mutex_lock(&f->sem);
datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
@@ -381,7 +381,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
if (!retried) {
/* Write error to be retried */
@@ -403,11 +403,11 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
break;
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
if (!datalen) {
printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
@@ -439,7 +439,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
JFFS2_SUMMARY_INODE_SIZE);
D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
if (ret) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
return ret;
}
@@ -454,7 +454,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
if (IS_ERR(fn)) {
D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n"));
/* Eeek. Wave bye bye */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
return PTR_ERR(fn);
}
@@ -463,7 +463,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
*/
f->metadata = fn;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode);
@@ -489,7 +489,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
return -ENOMEM;
}
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
@@ -513,7 +513,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
/* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return PTR_ERR(fd);
}
@@ -522,7 +522,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return 0;
}
@@ -551,7 +551,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
return ret;
}
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
/* Build a deletion node */
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -574,21 +574,21 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
if (IS_ERR(fd)) {
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return PTR_ERR(fd);
}
/* File it. This will mark the old one obsolete. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
} else {
- struct jffs2_full_dirent *fd = dir_f->dents;
uint32_t nhash = full_name_hash(name, namelen);
+ fd = dir_f->dents;
/* We don't actually want to reserve any space, but we do
want to be holding the alloc_sem when we write to flash */
- down(&c->alloc_sem);
- down(&dir_f->sem);
+ mutex_lock(&c->alloc_sem);
+ mutex_lock(&dir_f->sem);
for (fd = dir_f->dents; fd; fd = fd->next) {
if (fd->nhash == nhash &&
@@ -607,7 +607,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
break;
}
}
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
}
/* dead_f is NULL if this was a rename not a real unlink */
@@ -615,7 +615,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
pointing to an inode which didn't exist. */
if (dead_f && dead_f->inocache) {
- down(&dead_f->sem);
+ mutex_lock(&dead_f->sem);
if (S_ISDIR(OFNI_EDONI_2SFFJ(dead_f)->i_mode)) {
while (dead_f->dents) {
@@ -639,7 +639,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
dead_f->inocache->nlink--;
/* NB: Caller must set inode nlink if appropriate */
- up(&dead_f->sem);
+ mutex_unlock(&dead_f->sem);
}
jffs2_complete_reservation(c);
@@ -666,7 +666,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
return ret;
}
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
/* Build a deletion node */
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -691,7 +691,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
if (IS_ERR(fd)) {
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return PTR_ERR(fd);
}
@@ -699,7 +699,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return 0;
}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index b6b74a60e1e..40b16f23e49 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -155,8 +155,6 @@ static void nlmclnt_release_lockargs(struct nlm_rqst *req)
int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
{
struct nlm_rqst *call;
- sigset_t oldset;
- unsigned long flags;
int status;
nlm_get_host(host);
@@ -168,22 +166,6 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
- /* Keep the old signal mask */
- spin_lock_irqsave(&current->sighand->siglock, flags);
- oldset = current->blocked;
-
- /* If we're cleaning up locks because the process is exiting,
- * perform the RPC call asynchronously. */
- if ((IS_SETLK(cmd) || IS_SETLKW(cmd))
- && fl->fl_type == F_UNLCK
- && (current->flags & PF_EXITING)) {
- sigfillset(&current->blocked); /* Mask all signals */
- recalc_sigpending();
-
- call->a_flags = RPC_TASK_ASYNC;
- }
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
@@ -198,11 +180,6 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
- spin_lock_irqsave(&current->sighand->siglock, flags);
- current->blocked = oldset;
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
dprintk("lockd: clnt proc returns %d\n", status);
return status;
}
@@ -221,6 +198,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
for(;;) {
call = kzalloc(sizeof(*call), GFP_KERNEL);
if (call != NULL) {
+ atomic_set(&call->a_count, 1);
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
call->a_host = host;
@@ -237,6 +215,8 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
void nlm_release_call(struct nlm_rqst *call)
{
+ if (!atomic_dec_and_test(&call->a_count))
+ return;
nlm_release_host(call->a_host);
nlmclnt_release_lockargs(call);
kfree(call);
@@ -267,7 +247,7 @@ static int nlm_wait_on_grace(wait_queue_head_t *queue)
* Generic NLM call
*/
static int
-nlmclnt_call(struct nlm_rqst *req, u32 proc)
+nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
@@ -276,6 +256,7 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc)
struct rpc_message msg = {
.rpc_argp = argp,
.rpc_resp = resp,
+ .rpc_cred = cred,
};
int status;
@@ -343,10 +324,16 @@ in_grace_period:
/*
* Generic NLM call, async version.
*/
-static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
+static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
+ struct rpc_task_setup task_setup_data = {
+ .rpc_message = msg,
+ .callback_ops = tk_ops,
+ .callback_data = req,
+ .flags = RPC_TASK_ASYNC,
+ };
dprintk("lockd: call procedure %d on %s (async)\n",
(int)proc, host->h_name);
@@ -356,21 +343,36 @@ static int __nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *
if (clnt == NULL)
goto out_err;
msg->rpc_proc = &clnt->cl_procinfo[proc];
+ task_setup_data.rpc_client = clnt;
/* bootstrap and kick off the async RPC call */
- return rpc_call_async(clnt, msg, RPC_TASK_ASYNC, tk_ops, req);
+ return rpc_run_task(&task_setup_data);
out_err:
tk_ops->rpc_release(req);
- return -ENOLCK;
+ return ERR_PTR(-ENOLCK);
}
+static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
+{
+ struct rpc_task *task;
+
+ task = __nlm_async_call(req, proc, msg, tk_ops);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ rpc_put_task(task);
+ return 0;
+}
+
+/*
+ * NLM asynchronous call.
+ */
int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct rpc_message msg = {
.rpc_argp = &req->a_args,
.rpc_resp = &req->a_res,
};
- return __nlm_async_call(req, proc, &msg, tk_ops);
+ return nlm_do_async_call(req, proc, &msg, tk_ops);
}
int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
@@ -378,7 +380,33 @@ int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *t
struct rpc_message msg = {
.rpc_argp = &req->a_res,
};
- return __nlm_async_call(req, proc, &msg, tk_ops);
+ return nlm_do_async_call(req, proc, &msg, tk_ops);
+}
+
+/*
+ * NLM client asynchronous call.
+ *
+ * Note that although the calls are asynchronous, and are therefore
+ * guaranteed to complete, we still always attempt to wait for
+ * completion in order to be able to correctly track the lock
+ * state.
+ */
+static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
+{
+ struct rpc_message msg = {
+ .rpc_argp = &req->a_args,
+ .rpc_resp = &req->a_res,
+ .rpc_cred = cred,
+ };
+ struct rpc_task *task;
+ int err;
+
+ task = __nlm_async_call(req, proc, &msg, tk_ops);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ err = rpc_wait_for_completion_task(task);
+ rpc_put_task(task);
+ return err;
}
/*
@@ -389,7 +417,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
{
int status;
- status = nlmclnt_call(req, NLMPROC_TEST);
+ status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
if (status < 0)
goto out;
@@ -480,10 +508,12 @@ static int do_vfs_lock(struct file_lock *fl)
static int
nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
{
+ struct rpc_cred *cred = nfs_file_cred(fl->fl_file);
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
struct nlm_wait *block = NULL;
unsigned char fl_flags = fl->fl_flags;
+ unsigned char fl_type;
int status = -ENOLCK;
if (nsm_monitor(host) < 0) {
@@ -493,18 +523,22 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
}
fl->fl_flags |= FL_ACCESS;
status = do_vfs_lock(fl);
+ fl->fl_flags = fl_flags;
if (status < 0)
goto out;
block = nlmclnt_prepare_block(host, fl);
again:
+ /*
+ * Initialise resp->status to a valid non-zero value,
+ * since 0 == nlm_lck_granted
+ */
+ resp->status = nlm_lck_blocked;
for(;;) {
/* Reboot protection */
fl->fl_u.nfs_fl.state = host->h_state;
- status = nlmclnt_call(req, NLMPROC_LOCK);
+ status = nlmclnt_call(cred, req, NLMPROC_LOCK);
if (status < 0)
- goto out_unblock;
- if (!req->a_args.block)
break;
/* Did a reclaimer thread notify us of a server reboot? */
if (resp->status == nlm_lck_denied_grace_period)
@@ -513,15 +547,22 @@ again:
break;
/* Wait on an NLM blocking lock */
status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
- /* if we were interrupted. Send a CANCEL request to the server
- * and exit
- */
if (status < 0)
- goto out_unblock;
+ break;
if (resp->status != nlm_lck_blocked)
break;
}
+ /* if we were interrupted while blocking, then cancel the lock request
+ * and exit
+ */
+ if (resp->status == nlm_lck_blocked) {
+ if (!req->a_args.block)
+ goto out_unlock;
+ if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
+ goto out_unblock;
+ }
+
if (resp->status == nlm_granted) {
down_read(&host->h_rwsem);
/* Check whether or not the server has rebooted */
@@ -530,20 +571,34 @@ again:
goto again;
}
/* Ensure the resulting lock will get added to granted list */
- fl->fl_flags = fl_flags | FL_SLEEP;
+ fl->fl_flags |= FL_SLEEP;
if (do_vfs_lock(fl) < 0)
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__);
up_read(&host->h_rwsem);
+ fl->fl_flags = fl_flags;
+ status = 0;
}
+ if (status < 0)
+ goto out_unlock;
status = nlm_stat_to_errno(resp->status);
out_unblock:
nlmclnt_finish_block(block);
- /* Cancel the blocked request if it is still pending */
- if (resp->status == nlm_lck_blocked)
- nlmclnt_cancel(host, req->a_args.block, fl);
out:
nlm_release_call(req);
+ return status;
+out_unlock:
+ /* Fatal error: ensure that we remove the lock altogether */
+ dprintk("lockd: lock attempt ended in fatal error.\n"
+ " Attempting to unlock.\n");
+ nlmclnt_finish_block(block);
+ fl_type = fl->fl_type;
+ fl->fl_type = F_UNLCK;
+ down_read(&host->h_rwsem);
+ do_vfs_lock(fl);
+ up_read(&host->h_rwsem);
+ fl->fl_type = fl_type;
fl->fl_flags = fl_flags;
+ nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
return status;
}
@@ -567,8 +622,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
nlmclnt_setlockargs(req, fl);
req->a_args.reclaim = 1;
- if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
- && req->a_res.status == nlm_granted)
+ status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
+ if (status >= 0 && req->a_res.status == nlm_granted)
return 0;
printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
@@ -598,7 +653,8 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
- int status = 0;
+ int status;
+ unsigned char fl_flags = fl->fl_flags;
/*
* Note: the server is supposed to either grant us the unlock
@@ -607,16 +663,17 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
*/
fl->fl_flags |= FL_EXISTS;
down_read(&host->h_rwsem);
- if (do_vfs_lock(fl) == -ENOENT) {
- up_read(&host->h_rwsem);
+ status = do_vfs_lock(fl);
+ up_read(&host->h_rwsem);
+ fl->fl_flags = fl_flags;
+ if (status == -ENOENT) {
+ status = 0;
goto out;
}
- up_read(&host->h_rwsem);
-
- if (req->a_flags & RPC_TASK_ASYNC)
- return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
- status = nlmclnt_call(req, NLMPROC_UNLOCK);
+ atomic_inc(&req->a_count);
+ status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
+ NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
if (status < 0)
goto out;
@@ -671,16 +728,10 @@ static const struct rpc_call_ops nlmclnt_unlock_ops = {
static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
{
struct nlm_rqst *req;
- unsigned long flags;
- sigset_t oldset;
- int status;
+ int status;
- /* Block all signals while setting up call */
- spin_lock_irqsave(&current->sighand->siglock, flags);
- oldset = current->blocked;
- sigfillset(&current->blocked);
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
+ " Attempting to cancel lock.\n");
req = nlm_alloc_call(nlm_get_host(host));
if (!req)
@@ -690,13 +741,12 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
nlmclnt_setlockargs(req, fl);
req->a_args.block = block;
- status = nlm_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- current->blocked = oldset;
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
+ atomic_inc(&req->a_count);
+ status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
+ NLMPROC_CANCEL, &nlmclnt_cancel_ops);
+ if (status == 0 && req->a_res.status == nlm_lck_denied)
+ status = -ENOLCK;
+ nlm_release_call(req);
return status;
}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index f1ef49fff11..a17664c7eac 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -19,12 +19,11 @@
#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
-#define NLM_HOST_MAX 64
#define NLM_HOST_NRHASH 32
#define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
#define NLM_HOST_REBIND (60 * HZ)
-#define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
-#define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
+#define NLM_HOST_EXPIRE (300 * HZ)
+#define NLM_HOST_COLLECT (120 * HZ)
static struct hlist_head nlm_hosts[NLM_HOST_NRHASH];
static unsigned long next_gc;
@@ -42,11 +41,12 @@ static struct nsm_handle * nsm_find(const struct sockaddr_in *sin,
/*
* Common host lookup routine for server & client
*/
-static struct nlm_host *
-nlm_lookup_host(int server, const struct sockaddr_in *sin,
- int proto, int version, const char *hostname,
- unsigned int hostname_len,
- const struct sockaddr_in *ssin)
+static struct nlm_host *nlm_lookup_host(int server,
+ const struct sockaddr_in *sin,
+ int proto, u32 version,
+ const char *hostname,
+ unsigned int hostname_len,
+ const struct sockaddr_in *ssin)
{
struct hlist_head *chain;
struct hlist_node *pos;
@@ -55,7 +55,7 @@ nlm_lookup_host(int server, const struct sockaddr_in *sin,
int hash;
dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT
- ", p=%d, v=%d, my role=%s, name=%.*s)\n",
+ ", p=%d, v=%u, my role=%s, name=%.*s)\n",
NIPQUAD(ssin->sin_addr.s_addr),
NIPQUAD(sin->sin_addr.s_addr), proto, version,
server? "server" : "client",
@@ -142,9 +142,7 @@ nlm_lookup_host(int server, const struct sockaddr_in *sin,
INIT_LIST_HEAD(&host->h_granted);
INIT_LIST_HEAD(&host->h_reclaim);
- if (++nrhosts > NLM_HOST_MAX)
- next_gc = 0;
-
+ nrhosts++;
out:
mutex_unlock(&nlm_host_mutex);
return host;
@@ -175,9 +173,10 @@ nlm_destroy_host(struct nlm_host *host)
/*
* Find an NLM server handle in the cache. If there is none, create it.
*/
-struct nlm_host *
-nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
- const char *hostname, unsigned int hostname_len)
+struct nlm_host *nlmclnt_lookup_host(const struct sockaddr_in *sin,
+ int proto, u32 version,
+ const char *hostname,
+ unsigned int hostname_len)
{
struct sockaddr_in ssin = {0};
@@ -460,7 +459,7 @@ nlm_gc_hosts(void)
* Manage NSM handles
*/
static LIST_HEAD(nsm_handles);
-static DEFINE_MUTEX(nsm_mutex);
+static DEFINE_SPINLOCK(nsm_lock);
static struct nsm_handle *
__nsm_find(const struct sockaddr_in *sin,
@@ -468,7 +467,7 @@ __nsm_find(const struct sockaddr_in *sin,
int create)
{
struct nsm_handle *nsm = NULL;
- struct list_head *pos;
+ struct nsm_handle *pos;
if (!sin)
return NULL;
@@ -482,38 +481,43 @@ __nsm_find(const struct sockaddr_in *sin,
return NULL;
}
- mutex_lock(&nsm_mutex);
- list_for_each(pos, &nsm_handles) {
- nsm = list_entry(pos, struct nsm_handle, sm_link);
+retry:
+ spin_lock(&nsm_lock);
+ list_for_each_entry(pos, &nsm_handles, sm_link) {
if (hostname && nsm_use_hostnames) {
- if (strlen(nsm->sm_name) != hostname_len
- || memcmp(nsm->sm_name, hostname, hostname_len))
+ if (strlen(pos->sm_name) != hostname_len
+ || memcmp(pos->sm_name, hostname, hostname_len))
continue;
- } else if (!nlm_cmp_addr(&nsm->sm_addr, sin))
+ } else if (!nlm_cmp_addr(&pos->sm_addr, sin))
continue;
- atomic_inc(&nsm->sm_count);
- goto out;
+ atomic_inc(&pos->sm_count);
+ kfree(nsm);
+ nsm = pos;
+ goto found;
}
-
- if (!create) {
- nsm = NULL;
- goto out;
+ if (nsm) {
+ list_add(&nsm->sm_link, &nsm_handles);
+ goto found;
}
+ spin_unlock(&nsm_lock);
+
+ if (!create)
+ return NULL;
nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
- if (nsm != NULL) {
- nsm->sm_addr = *sin;
- nsm->sm_name = (char *) (nsm + 1);
- memcpy(nsm->sm_name, hostname, hostname_len);
- nsm->sm_name[hostname_len] = '\0';
- atomic_set(&nsm->sm_count, 1);
+ if (nsm == NULL)
+ return NULL;
- list_add(&nsm->sm_link, &nsm_handles);
- }
+ nsm->sm_addr = *sin;
+ nsm->sm_name = (char *) (nsm + 1);
+ memcpy(nsm->sm_name, hostname, hostname_len);
+ nsm->sm_name[hostname_len] = '\0';
+ atomic_set(&nsm->sm_count, 1);
+ goto retry;
-out:
- mutex_unlock(&nsm_mutex);
+found:
+ spin_unlock(&nsm_lock);
return nsm;
}
@@ -532,12 +536,9 @@ nsm_release(struct nsm_handle *nsm)
{
if (!nsm)
return;
- if (atomic_dec_and_test(&nsm->sm_count)) {
- mutex_lock(&nsm_mutex);
- if (atomic_read(&nsm->sm_count) == 0) {
- list_del(&nsm->sm_link);
- kfree(nsm);
- }
- mutex_unlock(&nsm_mutex);
+ if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
+ list_del(&nsm->sm_link);
+ spin_unlock(&nsm_lock);
+ kfree(nsm);
}
}
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 908b23fadd0..e4d563543b1 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -18,6 +18,8 @@
#define NLMDBG_FACILITY NLMDBG_MONITOR
+#define XDR_ADDRBUF_LEN (20)
+
static struct rpc_clnt * nsm_create(void);
static struct rpc_program nsm_program;
@@ -147,28 +149,55 @@ nsm_create(void)
/*
* XDR functions for NSM.
+ *
+ * See http://www.opengroup.org/ for details on the Network
+ * Status Monitor wire protocol.
*/
-static __be32 *
-xdr_encode_common(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
+static __be32 *xdr_encode_nsm_string(__be32 *p, char *string)
{
- char buffer[20], *name;
-
- /*
- * Use the dotted-quad IP address of the remote host as
- * identifier. Linux statd always looks up the canonical
- * hostname first for whatever remote hostname it receives,
- * so this works alright.
- */
- if (nsm_use_hostnames) {
- name = argp->mon_name;
- } else {
- sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(argp->addr));
+ size_t len = strlen(string);
+
+ if (len > SM_MAXSTRLEN)
+ len = SM_MAXSTRLEN;
+ return xdr_encode_opaque(p, string, len);
+}
+
+/*
+ * "mon_name" specifies the host to be monitored.
+ *
+ * Linux uses a text version of the IP address of the remote
+ * host as the host identifier (the "mon_name" argument).
+ *
+ * Linux statd always looks up the canonical hostname first for
+ * whatever remote hostname it receives, so this works alright.
+ */
+static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp)
+{
+ char buffer[XDR_ADDRBUF_LEN + 1];
+ char *name = argp->mon_name;
+
+ if (!nsm_use_hostnames) {
+ snprintf(buffer, XDR_ADDRBUF_LEN,
+ NIPQUAD_FMT, NIPQUAD(argp->addr));
name = buffer;
}
- if (!(p = xdr_encode_string(p, name))
- || !(p = xdr_encode_string(p, utsname()->nodename)))
+
+ return xdr_encode_nsm_string(p, name);
+}
+
+/*
+ * The "my_id" argument specifies the hostname and RPC procedure
+ * to be called when the status manager receives notification
+ * (via the SM_NOTIFY call) that the state of host "mon_name"
+ * has changed.
+ */
+static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp)
+{
+ p = xdr_encode_nsm_string(p, utsname()->nodename);
+ if (!p)
return ERR_PTR(-EIO);
+
*p++ = htonl(argp->prog);
*p++ = htonl(argp->vers);
*p++ = htonl(argp->proc);
@@ -176,18 +205,48 @@ xdr_encode_common(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
return p;
}
-static int
-xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
+/*
+ * The "mon_id" argument specifies the non-private arguments
+ * of an SM_MON or SM_UNMON call.
+ */
+static __be32 *xdr_encode_mon_id(__be32 *p, struct nsm_args *argp)
{
- p = xdr_encode_common(rqstp, p, argp);
- if (IS_ERR(p))
- return PTR_ERR(p);
+ p = xdr_encode_mon_name(p, argp);
+ if (!p)
+ return ERR_PTR(-EIO);
- /* Surprise - there may even be room for an IPv6 address now */
+ return xdr_encode_my_id(p, argp);
+}
+
+/*
+ * The "priv" argument may contain private information required
+ * by the SM_MON call. This information will be supplied in the
+ * SM_NOTIFY call.
+ *
+ * Linux provides the raw IP address of the monitored host,
+ * left in network byte order.
+ */
+static __be32 *xdr_encode_priv(__be32 *p, struct nsm_args *argp)
+{
*p++ = argp->addr;
*p++ = 0;
*p++ = 0;
*p++ = 0;
+
+ return p;
+}
+
+static int
+xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
+{
+ p = xdr_encode_mon_id(p, argp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ p = xdr_encode_priv(p, argp);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
return 0;
}
@@ -195,7 +254,7 @@ xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
static int
xdr_encode_unmon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
{
- p = xdr_encode_common(rqstp, p, argp);
+ p = xdr_encode_mon_id(p, argp);
if (IS_ERR(p))
return PTR_ERR(p);
rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
@@ -220,9 +279,11 @@ xdr_decode_stat(struct rpc_rqst *rqstp, __be32 *p, struct nsm_res *resp)
}
#define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN))
-#define SM_my_id_sz (3+1+SM_my_name_sz)
-#define SM_mon_id_sz (1+XDR_QUADLEN(20)+SM_my_id_sz)
-#define SM_mon_sz (SM_mon_id_sz+4)
+#define SM_my_id_sz (SM_my_name_sz+3)
+#define SM_mon_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN))
+#define SM_mon_id_sz (SM_mon_name_sz+SM_my_id_sz)
+#define SM_priv_sz (XDR_QUADLEN(SM_PRIV_SIZE))
+#define SM_mon_sz (SM_mon_id_sz+SM_priv_sz)
#define SM_monres_sz 2
#define SM_unmonres_sz 1
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 1ed8bd4de94..2169af4d545 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/mutex.h>
+#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/sunrpc/types.h>
@@ -48,14 +49,11 @@ EXPORT_SYMBOL(nlmsvc_ops);
static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
-static pid_t nlmsvc_pid;
+static struct task_struct *nlmsvc_task;
static struct svc_serv *nlmsvc_serv;
int nlmsvc_grace_period;
unsigned long nlmsvc_timeout;
-static DECLARE_COMPLETION(lockd_start_done);
-static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
-
/*
* These can be set at insmod time (useful for NFS as root filesystem),
* and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003
@@ -74,7 +72,9 @@ static const unsigned long nlm_timeout_min = 3;
static const unsigned long nlm_timeout_max = 20;
static const int nlm_port_min = 0, nlm_port_max = 65535;
+#ifdef CONFIG_SYSCTL
static struct ctl_table_header * nlm_sysctl_table;
+#endif
static unsigned long get_lockd_grace_period(void)
{
@@ -111,35 +111,30 @@ static inline void clear_grace_period(void)
/*
* This is the lockd kernel thread
*/
-static void
-lockd(struct svc_rqst *rqstp)
+static int
+lockd(void *vrqstp)
{
- int err = 0;
+ int err = 0, preverr = 0;
+ struct svc_rqst *rqstp = vrqstp;
unsigned long grace_period_expire;
- /* Lock module and set up kernel thread */
- /* lockd_up is waiting for us to startup, so will
- * be holding a reference to this module, so it
- * is safe to just claim another reference
- */
- __module_get(THIS_MODULE);
- lock_kernel();
-
- /*
- * Let our maker know we're running.
- */
- nlmsvc_pid = current->pid;
- nlmsvc_serv = rqstp->rq_server;
- complete(&lockd_start_done);
-
- daemonize("lockd");
+ /* try_to_freeze() is called from svc_recv() */
set_freezable();
- /* Process request with signals blocked, but allow SIGKILL. */
+ /* Allow SIGKILL to tell lockd to drop all of its locks */
allow_signal(SIGKILL);
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
+ /*
+ * FIXME: it would be nice if lockd didn't spend its entire life
+ * running under the BKL. At the very least, it would be good to
+ * have someone clarify what it's intended to protect here. I've
+ * seen some handwavy posts about posix locking needing to be
+ * done under the BKL, but it's far from clear.
+ */
+ lock_kernel();
+
if (!nlm_timeout)
nlm_timeout = LOCKD_DFLT_TIMEO;
nlmsvc_timeout = nlm_timeout * HZ;
@@ -148,10 +143,9 @@ lockd(struct svc_rqst *rqstp)
/*
* The main request loop. We don't terminate until the last
- * NFS mount or NFS daemon has gone away, and we've been sent a
- * signal, or else another process has taken over our job.
+ * NFS mount or NFS daemon has gone away.
*/
- while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) {
+ while (!kthread_should_stop()) {
long timeout = MAX_SCHEDULE_TIMEOUT;
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
@@ -161,6 +155,7 @@ lockd(struct svc_rqst *rqstp)
nlmsvc_invalidate_all();
grace_period_expire = set_grace_period();
}
+ continue;
}
/*
@@ -179,14 +174,20 @@ lockd(struct svc_rqst *rqstp)
* recvfrom routine.
*/
err = svc_recv(rqstp, timeout);
- if (err == -EAGAIN || err == -EINTR)
+ if (err == -EAGAIN || err == -EINTR) {
+ preverr = err;
continue;
+ }
if (err < 0) {
- printk(KERN_WARNING
- "lockd: terminating on error %d\n",
- -err);
- break;
+ if (err != preverr) {
+ printk(KERN_WARNING "%s: unexpected error "
+ "from svc_recv (%d)\n", __func__, err);
+ preverr = err;
+ }
+ schedule_timeout_interruptible(HZ);
+ continue;
}
+ preverr = err;
dprintk("lockd: request from %s\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
@@ -195,28 +196,19 @@ lockd(struct svc_rqst *rqstp)
}
flush_signals(current);
+ if (nlmsvc_ops)
+ nlmsvc_invalidate_all();
+ nlm_shutdown_hosts();
- /*
- * Check whether there's a new lockd process before
- * shutting down the hosts and clearing the slot.
- */
- if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
- if (nlmsvc_ops)
- nlmsvc_invalidate_all();
- nlm_shutdown_hosts();
- nlmsvc_pid = 0;
- nlmsvc_serv = NULL;
- } else
- printk(KERN_DEBUG
- "lockd: new process, skipping host shutdown\n");
- wake_up(&lockd_exit);
+ unlock_kernel();
+
+ nlmsvc_task = NULL;
+ nlmsvc_serv = NULL;
/* Exit the RPC thread */
svc_exit_thread(rqstp);
- /* Release module */
- unlock_kernel();
- module_put_and_exit(0);
+ return 0;
}
/*
@@ -261,14 +253,15 @@ static int make_socks(struct svc_serv *serv, int proto)
int
lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */
{
- struct svc_serv * serv;
- int error = 0;
+ struct svc_serv *serv;
+ struct svc_rqst *rqstp;
+ int error = 0;
mutex_lock(&nlmsvc_mutex);
/*
* Check whether we're already up and running.
*/
- if (nlmsvc_pid) {
+ if (nlmsvc_serv) {
if (proto)
error = make_socks(nlmsvc_serv, proto);
goto out;
@@ -295,13 +288,28 @@ lockd_up(int proto) /* Maybe add a 'family' option when IPv6 is supported ?? */
/*
* Create the kernel thread and wait for it to start.
*/
- error = svc_create_thread(lockd, serv);
- if (error) {
+ rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
+ if (IS_ERR(rqstp)) {
+ error = PTR_ERR(rqstp);
+ printk(KERN_WARNING
+ "lockd_up: svc_rqst allocation failed, error=%d\n",
+ error);
+ goto destroy_and_out;
+ }
+
+ svc_sock_update_bufs(serv);
+ nlmsvc_serv = rqstp->rq_server;
+
+ nlmsvc_task = kthread_run(lockd, rqstp, serv->sv_name);
+ if (IS_ERR(nlmsvc_task)) {
+ error = PTR_ERR(nlmsvc_task);
+ nlmsvc_task = NULL;
+ nlmsvc_serv = NULL;
printk(KERN_WARNING
- "lockd_up: create thread failed, error=%d\n", error);
+ "lockd_up: kthread_run failed, error=%d\n", error);
+ svc_exit_thread(rqstp);
goto destroy_and_out;
}
- wait_for_completion(&lockd_start_done);
/*
* Note: svc_serv structures have an initial use count of 1,
@@ -323,42 +331,28 @@ EXPORT_SYMBOL(lockd_up);
void
lockd_down(void)
{
- static int warned;
-
mutex_lock(&nlmsvc_mutex);
if (nlmsvc_users) {
if (--nlmsvc_users)
goto out;
- } else
- printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid);
-
- if (!nlmsvc_pid) {
- if (warned++ == 0)
- printk(KERN_WARNING "lockd_down: no lockd running.\n");
- goto out;
+ } else {
+ printk(KERN_ERR "lockd_down: no users! task=%p\n",
+ nlmsvc_task);
+ BUG();
}
- warned = 0;
- kill_proc(nlmsvc_pid, SIGKILL, 1);
- /*
- * Wait for the lockd process to exit, but since we're holding
- * the lockd semaphore, we can't wait around forever ...
- */
- clear_thread_flag(TIF_SIGPENDING);
- interruptible_sleep_on_timeout(&lockd_exit, HZ);
- if (nlmsvc_pid) {
- printk(KERN_WARNING
- "lockd_down: lockd failed to exit, clearing pid\n");
- nlmsvc_pid = 0;
+ if (!nlmsvc_task) {
+ printk(KERN_ERR "lockd_down: no lockd running.\n");
+ BUG();
}
- spin_lock_irq(&current->sighand->siglock);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ kthread_stop(nlmsvc_task);
out:
mutex_unlock(&nlmsvc_mutex);
}
EXPORT_SYMBOL(lockd_down);
+#ifdef CONFIG_SYSCTL
+
/*
* Sysctl parameters (same as module parameters, different interface).
*/
@@ -443,6 +437,8 @@ static ctl_table nlm_sysctl_root[] = {
{ .ctl_name = 0 }
};
+#endif /* CONFIG_SYSCTL */
+
/*
* Module (and sysfs) parameters.
*/
@@ -516,15 +512,21 @@ module_param(nsm_use_hostnames, bool, 0644);
static int __init init_nlm(void)
{
+#ifdef CONFIG_SYSCTL
nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root);
return nlm_sysctl_table ? 0 : -ENOMEM;
+#else
+ return 0;
+#endif
}
static void __exit exit_nlm(void)
{
/* FIXME: delete all NLM clients */
nlm_shutdown_hosts();
+#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nlm_sysctl_table);
+#endif
}
module_init(init_nlm);
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index fe9bdb4a220..4d81553d294 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -29,6 +29,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/lockd/nlm.h>
#include <linux/lockd/lockd.h>
+#include <linux/kthread.h>
#define NLMDBG_FACILITY NLMDBG_SVCLOCK
@@ -226,8 +227,7 @@ failed:
}
/*
- * Delete a block. If the lock was cancelled or the grant callback
- * failed, unlock is set to 1.
+ * Delete a block.
* It is the caller's responsibility to check whether the file
* can be closed hereafter.
*/
@@ -632,7 +632,7 @@ nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
block->b_flags |= B_TIMED_OUT;
if (conf) {
if (block->b_fl)
- locks_copy_lock(block->b_fl, conf);
+ __locks_copy_lock(block->b_fl, conf);
}
}
@@ -887,7 +887,7 @@ nlmsvc_retry_blocked(void)
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
struct nlm_block *block;
- while (!list_empty(&nlm_blocked)) {
+ while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
if (block->b_when == NLM_NEVER)
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c
index 068886de4dd..b0ae0700870 100644
--- a/fs/lockd/svcshare.c
+++ b/fs/lockd/svcshare.c
@@ -71,7 +71,8 @@ nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file,
struct nlm_share *share, **shpp;
struct xdr_netobj *oh = &argp->lock.oh;
- for (shpp = &file->f_shares; (share = *shpp) != 0; shpp = &share->s_next) {
+ for (shpp = &file->f_shares; (share = *shpp) != NULL;
+ shpp = &share->s_next) {
if (share->s_host == host && nlm_cmp_owner(share, oh)) {
*shpp = share->s_next;
kfree(share);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index dbbefbcd671..d1c48b539df 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -18,6 +18,8 @@
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
#include <linux/lockd/sm_inter.h>
+#include <linux/module.h>
+#include <linux/mount.h>
#define NLMDBG_FACILITY NLMDBG_SVCSUBS
@@ -194,6 +196,12 @@ again:
return 0;
}
+static int
+nlmsvc_always_match(void *dummy1, struct nlm_host *dummy2)
+{
+ return 1;
+}
+
/*
* Inspect a single file
*/
@@ -230,7 +238,8 @@ nlm_file_inuse(struct nlm_file *file)
* Loop over all files in the file table.
*/
static int
-nlm_traverse_files(struct nlm_host *host, nlm_host_match_fn_t match)
+nlm_traverse_files(void *data, nlm_host_match_fn_t match,
+ int (*is_failover_file)(void *data, struct nlm_file *file))
{
struct hlist_node *pos, *next;
struct nlm_file *file;
@@ -239,12 +248,14 @@ nlm_traverse_files(struct nlm_host *host, nlm_host_match_fn_t match)
mutex_lock(&nlm_file_mutex);
for (i = 0; i < FILE_NRHASH; i++) {
hlist_for_each_entry_safe(file, pos, next, &nlm_files[i], f_list) {
+ if (is_failover_file && !is_failover_file(data, file))
+ continue;
file->f_count++;
mutex_unlock(&nlm_file_mutex);
/* Traverse locks, blocks and shares of this file
* and update file->f_locks count */
- if (nlm_inspect_file(host, file, match))
+ if (nlm_inspect_file(data, file, match))
ret = 1;
mutex_lock(&nlm_file_mutex);
@@ -303,21 +314,27 @@ nlm_release_file(struct nlm_file *file)
* Used by nlmsvc_invalidate_all
*/
static int
-nlmsvc_mark_host(struct nlm_host *host, struct nlm_host *dummy)
+nlmsvc_mark_host(void *data, struct nlm_host *dummy)
{
+ struct nlm_host *host = data;
+
host->h_inuse = 1;
return 0;
}
static int
-nlmsvc_same_host(struct nlm_host *host, struct nlm_host *other)
+nlmsvc_same_host(void *data, struct nlm_host *other)
{
+ struct nlm_host *host = data;
+
return host == other;
}
static int
-nlmsvc_is_client(struct nlm_host *host, struct nlm_host *dummy)
+nlmsvc_is_client(void *data, struct nlm_host *dummy)
{
+ struct nlm_host *host = data;
+
if (host->h_server) {
/* we are destroying locks even though the client
* hasn't asked us too, so don't unmonitor the
@@ -337,7 +354,7 @@ void
nlmsvc_mark_resources(void)
{
dprintk("lockd: nlmsvc_mark_resources\n");
- nlm_traverse_files(NULL, nlmsvc_mark_host);
+ nlm_traverse_files(NULL, nlmsvc_mark_host, NULL);
}
/*
@@ -348,7 +365,7 @@ nlmsvc_free_host_resources(struct nlm_host *host)
{
dprintk("lockd: nlmsvc_free_host_resources\n");
- if (nlm_traverse_files(host, nlmsvc_same_host)) {
+ if (nlm_traverse_files(host, nlmsvc_same_host, NULL)) {
printk(KERN_WARNING
"lockd: couldn't remove all locks held by %s\n",
host->h_name);
@@ -368,5 +385,41 @@ nlmsvc_invalidate_all(void)
* turn, which is about as inefficient as it gets.
* Now we just do it once in nlm_traverse_files.
*/
- nlm_traverse_files(NULL, nlmsvc_is_client);
+ nlm_traverse_files(NULL, nlmsvc_is_client, NULL);
+}
+
+static int
+nlmsvc_match_sb(void *datap, struct nlm_file *file)
+{
+ struct super_block *sb = datap;
+
+ return sb == file->f_file->f_path.mnt->mnt_sb;
+}
+
+int
+nlmsvc_unlock_all_by_sb(struct super_block *sb)
+{
+ int ret;
+
+ ret = nlm_traverse_files(sb, nlmsvc_always_match, nlmsvc_match_sb);
+ return ret ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_sb);
+
+static int
+nlmsvc_match_ip(void *datap, struct nlm_host *host)
+{
+ __be32 *server_addr = datap;
+
+ return host->h_saddr.sin_addr.s_addr == *server_addr;
+}
+
+int
+nlmsvc_unlock_all_by_ip(__be32 server_addr)
+{
+ int ret;
+ ret = nlm_traverse_files(&server_addr, nlmsvc_match_ip, NULL);
+ return ret ? -EIO : 0;
+
}
+EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_ip);
diff --git a/fs/locks.c b/fs/locks.c
index 592faadbcec..44d9a6a7ec5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -224,7 +224,7 @@ static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
/*
* Initialize a new lock from an existing file_lock structure.
*/
-static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
+void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
{
new->fl_owner = fl->fl_owner;
new->fl_pid = fl->fl_pid;
@@ -236,6 +236,7 @@ static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
new->fl_ops = NULL;
new->fl_lmops = NULL;
}
+EXPORT_SYMBOL(__locks_copy_lock);
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
@@ -833,7 +834,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
if (!posix_locks_conflict(request, fl))
continue;
if (conflock)
- locks_copy_lock(conflock, fl);
+ __locks_copy_lock(conflock, fl);
error = -EAGAIN;
if (!(request->fl_flags & FL_SLEEP))
goto out;
@@ -1367,18 +1368,20 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
lease = *flp;
- error = -EAGAIN;
- if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
- goto out;
- if ((arg == F_WRLCK)
- && ((atomic_read(&dentry->d_count) > 1)
- || (atomic_read(&inode->i_count) > 1)))
- goto out;
+ if (arg != F_UNLCK) {
+ error = -ENOMEM;
+ new_fl = locks_alloc_lock();
+ if (new_fl == NULL)
+ goto out;
- error = -ENOMEM;
- new_fl = locks_alloc_lock();
- if (new_fl == NULL)
- goto out;
+ error = -EAGAIN;
+ if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
+ goto out;
+ if ((arg == F_WRLCK)
+ && ((atomic_read(&dentry->d_count) > 1)
+ || (atomic_read(&inode->i_count) > 1)))
+ goto out;
+ }
/*
* At this point, we know that if there is an exclusive
@@ -1404,6 +1407,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
rdlease_count++;
}
+ error = -EAGAIN;
if ((arg == F_RDLCK && (wrlease_count > 0)) ||
(arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
goto out;
@@ -1490,8 +1494,7 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
struct file_lock fl, *flp = &fl;
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = filp->f_path.dentry->d_inode;
int error;
locks_init_lock(&fl);
diff --git a/fs/namespace.c b/fs/namespace.c
index 678f7ce060f..f48f98110c3 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -27,6 +27,7 @@
#include <linux/mount.h>
#include <linux/ramfs.h>
#include <linux/log2.h>
+#include <linux/idr.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "pnode.h"
@@ -39,6 +40,8 @@
__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
static int event;
+static DEFINE_IDA(mnt_id_ida);
+static DEFINE_IDA(mnt_group_ida);
static struct list_head *mount_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
@@ -58,10 +61,63 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
+/* allocation is serialized by namespace_sem */
+static int mnt_alloc_id(struct vfsmount *mnt)
+{
+ int res;
+
+retry:
+ ida_pre_get(&mnt_id_ida, GFP_KERNEL);
+ spin_lock(&vfsmount_lock);
+ res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
+ spin_unlock(&vfsmount_lock);
+ if (res == -EAGAIN)
+ goto retry;
+
+ return res;
+}
+
+static void mnt_free_id(struct vfsmount *mnt)
+{
+ spin_lock(&vfsmount_lock);
+ ida_remove(&mnt_id_ida, mnt->mnt_id);
+ spin_unlock(&vfsmount_lock);
+}
+
+/*
+ * Allocate a new peer group ID
+ *
+ * mnt_group_ida is protected by namespace_sem
+ */
+static int mnt_alloc_group_id(struct vfsmount *mnt)
+{
+ if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
+ return -ENOMEM;
+
+ return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
+}
+
+/*
+ * Release a peer group ID
+ */
+void mnt_release_group_id(struct vfsmount *mnt)
+{
+ ida_remove(&mnt_group_ida, mnt->mnt_group_id);
+ mnt->mnt_group_id = 0;
+}
+
struct vfsmount *alloc_vfsmnt(const char *name)
{
struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
if (mnt) {
+ int err;
+
+ err = mnt_alloc_id(mnt);
+ if (err) {
+ kmem_cache_free(mnt_cache, mnt);
+ return NULL;
+ }
+
atomic_set(&mnt->mnt_count, 1);
INIT_LIST_HEAD(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
@@ -353,6 +409,7 @@ EXPORT_SYMBOL(simple_set_mnt);
void free_vfsmnt(struct vfsmount *mnt)
{
kfree(mnt->mnt_devname);
+ mnt_free_id(mnt);
kmem_cache_free(mnt_cache, mnt);
}
@@ -499,6 +556,17 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
if (mnt) {
+ if (flag & (CL_SLAVE | CL_PRIVATE))
+ mnt->mnt_group_id = 0; /* not a peer of original */
+ else
+ mnt->mnt_group_id = old->mnt_group_id;
+
+ if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
+ int err = mnt_alloc_group_id(mnt);
+ if (err)
+ goto out_free;
+ }
+
mnt->mnt_flags = old->mnt_flags;
atomic_inc(&sb->s_active);
mnt->mnt_sb = sb;
@@ -528,6 +596,10 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
}
}
return mnt;
+
+ out_free:
+ free_vfsmnt(mnt);
+ return NULL;
}
static inline void __mntput(struct vfsmount *mnt)
@@ -652,20 +724,21 @@ void save_mount_options(struct super_block *sb, char *options)
}
EXPORT_SYMBOL(save_mount_options);
+#ifdef CONFIG_PROC_FS
/* iterator */
static void *m_start(struct seq_file *m, loff_t *pos)
{
- struct mnt_namespace *n = m->private;
+ struct proc_mounts *p = m->private;
down_read(&namespace_sem);
- return seq_list_start(&n->list, *pos);
+ return seq_list_start(&p->ns->list, *pos);
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct mnt_namespace *n = m->private;
+ struct proc_mounts *p = m->private;
- return seq_list_next(v, &n->list, pos);
+ return seq_list_next(v, &p->ns->list, pos);
}
static void m_stop(struct seq_file *m, void *v)
@@ -673,20 +746,30 @@ static void m_stop(struct seq_file *m, void *v)
up_read(&namespace_sem);
}
-static int show_vfsmnt(struct seq_file *m, void *v)
+struct proc_fs_info {
+ int flag;
+ const char *str;
+};
+
+static void show_sb_opts(struct seq_file *m, struct super_block *sb)
{
- struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
- int err = 0;
- static struct proc_fs_info {
- int flag;
- char *str;
- } fs_info[] = {
+ static const struct proc_fs_info fs_info[] = {
{ MS_SYNCHRONOUS, ",sync" },
{ MS_DIRSYNC, ",dirsync" },
{ MS_MANDLOCK, ",mand" },
{ 0, NULL }
};
- static struct proc_fs_info mnt_info[] = {
+ const struct proc_fs_info *fs_infop;
+
+ for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+ if (sb->s_flags & fs_infop->flag)
+ seq_puts(m, fs_infop->str);
+ }
+}
+
+static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
+{
+ static const struct proc_fs_info mnt_info[] = {
{ MNT_NOSUID, ",nosuid" },
{ MNT_NODEV, ",nodev" },
{ MNT_NOEXEC, ",noexec" },
@@ -695,40 +778,108 @@ static int show_vfsmnt(struct seq_file *m, void *v)
{ MNT_RELATIME, ",relatime" },
{ 0, NULL }
};
- struct proc_fs_info *fs_infop;
+ const struct proc_fs_info *fs_infop;
+
+ for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+ if (mnt->mnt_flags & fs_infop->flag)
+ seq_puts(m, fs_infop->str);
+ }
+}
+
+static void show_type(struct seq_file *m, struct super_block *sb)
+{
+ mangle(m, sb->s_type->name);
+ if (sb->s_subtype && sb->s_subtype[0]) {
+ seq_putc(m, '.');
+ mangle(m, sb->s_subtype);
+ }
+}
+
+static int show_vfsmnt(struct seq_file *m, void *v)
+{
+ struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
+ int err = 0;
struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
seq_putc(m, ' ');
seq_path(m, &mnt_path, " \t\n\\");
seq_putc(m, ' ');
- mangle(m, mnt->mnt_sb->s_type->name);
- if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
- seq_putc(m, '.');
- mangle(m, mnt->mnt_sb->s_subtype);
- }
+ show_type(m, mnt->mnt_sb);
seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
- for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
- if (mnt->mnt_sb->s_flags & fs_infop->flag)
- seq_puts(m, fs_infop->str);
- }
- for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
- if (mnt->mnt_flags & fs_infop->flag)
- seq_puts(m, fs_infop->str);
- }
+ show_sb_opts(m, mnt->mnt_sb);
+ show_mnt_opts(m, mnt);
if (mnt->mnt_sb->s_op->show_options)
err = mnt->mnt_sb->s_op->show_options(m, mnt);
seq_puts(m, " 0 0\n");
return err;
}
-struct seq_operations mounts_op = {
+const struct seq_operations mounts_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_vfsmnt
};
+static int show_mountinfo(struct seq_file *m, void *v)
+{
+ struct proc_mounts *p = m->private;
+ struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
+ struct super_block *sb = mnt->mnt_sb;
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ struct path root = p->root;
+ int err = 0;
+
+ seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
+ MAJOR(sb->s_dev), MINOR(sb->s_dev));
+ seq_dentry(m, mnt->mnt_root, " \t\n\\");
+ seq_putc(m, ' ');
+ seq_path_root(m, &mnt_path, &root, " \t\n\\");
+ if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
+ /*
+ * Mountpoint is outside root, discard that one. Ugly,
+ * but less so than trying to do that in iterator in a
+ * race-free way (due to renames).
+ */
+ return SEQ_SKIP;
+ }
+ seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
+ show_mnt_opts(m, mnt);
+
+ /* Tagged fields ("foo:X" or "bar") */
+ if (IS_MNT_SHARED(mnt))
+ seq_printf(m, " shared:%i", mnt->mnt_group_id);
+ if (IS_MNT_SLAVE(mnt)) {
+ int master = mnt->mnt_master->mnt_group_id;
+ int dom = get_dominating_id(mnt, &p->root);
+ seq_printf(m, " master:%i", master);
+ if (dom && dom != master)
+ seq_printf(m, " propagate_from:%i", dom);
+ }
+ if (IS_MNT_UNBINDABLE(mnt))
+ seq_puts(m, " unbindable");
+
+ /* Filesystem specific data */
+ seq_puts(m, " - ");
+ show_type(m, sb);
+ seq_putc(m, ' ');
+ mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
+ seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
+ show_sb_opts(m, sb);
+ if (sb->s_op->show_options)
+ err = sb->s_op->show_options(m, mnt);
+ seq_putc(m, '\n');
+ return err;
+}
+
+const struct seq_operations mountinfo_op = {
+ .start = m_start,
+ .next = m_next,
+ .stop = m_stop,
+ .show = show_mountinfo,
+};
+
static int show_vfsstat(struct seq_file *m, void *v)
{
struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
@@ -749,7 +900,7 @@ static int show_vfsstat(struct seq_file *m, void *v)
/* file system type */
seq_puts(m, "with fstype ");
- mangle(m, mnt->mnt_sb->s_type->name);
+ show_type(m, mnt->mnt_sb);
/* optional statistics */
if (mnt->mnt_sb->s_op->show_stats) {
@@ -761,12 +912,13 @@ static int show_vfsstat(struct seq_file *m, void *v)
return err;
}
-struct seq_operations mountstats_op = {
+const struct seq_operations mountstats_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_vfsstat,
};
+#endif /* CONFIG_PROC_FS */
/**
* may_umount_tree - check if a mount tree is busy
@@ -909,10 +1061,11 @@ static int do_umount(struct vfsmount *mnt, int flags)
* about for the moment.
*/
- lock_kernel();
- if (sb->s_op->umount_begin)
- sb->s_op->umount_begin(mnt, flags);
- unlock_kernel();
+ if (flags & MNT_FORCE && sb->s_op->umount_begin) {
+ lock_kernel();
+ sb->s_op->umount_begin(sb);
+ unlock_kernel();
+ }
/*
* No sense to grab the lock for this test, but test itself looks
@@ -1091,23 +1244,50 @@ Enomem:
struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
{
struct vfsmount *tree;
- down_read(&namespace_sem);
+ down_write(&namespace_sem);
tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
- up_read(&namespace_sem);
+ up_write(&namespace_sem);
return tree;
}
void drop_collected_mounts(struct vfsmount *mnt)
{
LIST_HEAD(umount_list);
- down_read(&namespace_sem);
+ down_write(&namespace_sem);
spin_lock(&vfsmount_lock);
umount_tree(mnt, 0, &umount_list);
spin_unlock(&vfsmount_lock);
- up_read(&namespace_sem);
+ up_write(&namespace_sem);
release_mounts(&umount_list);
}
+static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
+{
+ struct vfsmount *p;
+
+ for (p = mnt; p != end; p = next_mnt(p, mnt)) {
+ if (p->mnt_group_id && !IS_MNT_SHARED(p))
+ mnt_release_group_id(p);
+ }
+}
+
+static int invent_group_ids(struct vfsmount *mnt, bool recurse)
+{
+ struct vfsmount *p;
+
+ for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
+ if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
+ int err = mnt_alloc_group_id(p);
+ if (err) {
+ cleanup_group_ids(mnt, p);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
/*
* @source_mnt : mount tree to be attached
* @nd : place the mount tree @source_mnt is attached
@@ -1178,9 +1358,16 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
struct vfsmount *dest_mnt = path->mnt;
struct dentry *dest_dentry = path->dentry;
struct vfsmount *child, *p;
+ int err;
- if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
- return -EINVAL;
+ if (IS_MNT_SHARED(dest_mnt)) {
+ err = invent_group_ids(source_mnt, true);
+ if (err)
+ goto out;
+ }
+ err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
+ if (err)
+ goto out_cleanup_ids;
if (IS_MNT_SHARED(dest_mnt)) {
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1203,34 +1390,40 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
}
spin_unlock(&vfsmount_lock);
return 0;
+
+ out_cleanup_ids:
+ if (IS_MNT_SHARED(dest_mnt))
+ cleanup_group_ids(source_mnt, NULL);
+ out:
+ return err;
}
-static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
+static int graft_tree(struct vfsmount *mnt, struct path *path)
{
int err;
if (mnt->mnt_sb->s_flags & MS_NOUSER)
return -EINVAL;
- if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
+ if (S_ISDIR(path->dentry->d_inode->i_mode) !=
S_ISDIR(mnt->mnt_root->d_inode->i_mode))
return -ENOTDIR;
err = -ENOENT;
- mutex_lock(&nd->path.dentry->d_inode->i_mutex);
- if (IS_DEADDIR(nd->path.dentry->d_inode))
+ mutex_lock(&path->dentry->d_inode->i_mutex);
+ if (IS_DEADDIR(path->dentry->d_inode))
goto out_unlock;
- err = security_sb_check_sb(mnt, nd);
+ err = security_sb_check_sb(mnt, path);
if (err)
goto out_unlock;
err = -ENOENT;
- if (IS_ROOT(nd->path.dentry) || !d_unhashed(nd->path.dentry))
- err = attach_recursive_mnt(mnt, &nd->path, NULL);
+ if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
+ err = attach_recursive_mnt(mnt, path, NULL);
out_unlock:
- mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
+ mutex_unlock(&path->dentry->d_inode->i_mutex);
if (!err)
- security_sb_post_addmount(mnt, nd);
+ security_sb_post_addmount(mnt, path);
return err;
}
@@ -1243,6 +1436,7 @@ static noinline int do_change_type(struct nameidata *nd, int flag)
struct vfsmount *m, *mnt = nd->path.mnt;
int recurse = flag & MS_REC;
int type = flag & ~MS_REC;
+ int err = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1251,12 +1445,20 @@ static noinline int do_change_type(struct nameidata *nd, int flag)
return -EINVAL;
down_write(&namespace_sem);
+ if (type == MS_SHARED) {
+ err = invent_group_ids(mnt, recurse);
+ if (err)
+ goto out_unlock;
+ }
+
spin_lock(&vfsmount_lock);
for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
change_mnt_propagation(m, type);
spin_unlock(&vfsmount_lock);
+
+ out_unlock:
up_write(&namespace_sem);
- return 0;
+ return err;
}
/*
@@ -1294,7 +1496,7 @@ static noinline int do_loopback(struct nameidata *nd, char *old_name,
if (!mnt)
goto out;
- err = graft_tree(mnt, nd);
+ err = graft_tree(mnt, &nd->path);
if (err) {
LIST_HEAD(umount_list);
spin_lock(&vfsmount_lock);
@@ -1501,7 +1703,7 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
goto unlock;
newmnt->mnt_flags = mnt_flags;
- if ((err = graft_tree(newmnt, nd)))
+ if ((err = graft_tree(newmnt, &nd->path)))
goto unlock;
if (fslist) /* add to the specified expiration list */
@@ -1746,7 +1948,8 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
if (retval)
return retval;
- retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page);
+ retval = security_sb_mount(dev_name, &nd.path,
+ type_page, flags, data_page);
if (retval)
goto dput_out;
@@ -1986,15 +2189,13 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
const char __user * put_old)
{
struct vfsmount *tmp;
- struct nameidata new_nd, old_nd, user_nd;
- struct path parent_path, root_parent;
+ struct nameidata new_nd, old_nd;
+ struct path parent_path, root_parent, root;
int error;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- lock_kernel();
-
error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
&new_nd);
if (error)
@@ -2007,14 +2208,14 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
if (error)
goto out1;
- error = security_sb_pivotroot(&old_nd, &new_nd);
+ error = security_sb_pivotroot(&old_nd.path, &new_nd.path);
if (error) {
path_put(&old_nd.path);
goto out1;
}
read_lock(&current->fs->lock);
- user_nd.path = current->fs->root;
+ root = current->fs->root;
path_get(&current->fs->root);
read_unlock(&current->fs->lock);
down_write(&namespace_sem);
@@ -2022,9 +2223,9 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
error = -EINVAL;
if (IS_MNT_SHARED(old_nd.path.mnt) ||
IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) ||
- IS_MNT_SHARED(user_nd.path.mnt->mnt_parent))
+ IS_MNT_SHARED(root.mnt->mnt_parent))
goto out2;
- if (!check_mnt(user_nd.path.mnt))
+ if (!check_mnt(root.mnt))
goto out2;
error = -ENOENT;
if (IS_DEADDIR(new_nd.path.dentry->d_inode))
@@ -2034,13 +2235,13 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry))
goto out2;
error = -EBUSY;
- if (new_nd.path.mnt == user_nd.path.mnt ||
- old_nd.path.mnt == user_nd.path.mnt)
+ if (new_nd.path.mnt == root.mnt ||
+ old_nd.path.mnt == root.mnt)
goto out2; /* loop, on the same file system */
error = -EINVAL;
- if (user_nd.path.mnt->mnt_root != user_nd.path.dentry)
+ if (root.mnt->mnt_root != root.dentry)
goto out2; /* not a mountpoint */
- if (user_nd.path.mnt->mnt_parent == user_nd.path.mnt)
+ if (root.mnt->mnt_parent == root.mnt)
goto out2; /* not attached */
if (new_nd.path.mnt->mnt_root != new_nd.path.dentry)
goto out2; /* not a mountpoint */
@@ -2062,27 +2263,26 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
} else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
goto out3;
detach_mnt(new_nd.path.mnt, &parent_path);
- detach_mnt(user_nd.path.mnt, &root_parent);
+ detach_mnt(root.mnt, &root_parent);
/* mount old root on put_old */
- attach_mnt(user_nd.path.mnt, &old_nd.path);
+ attach_mnt(root.mnt, &old_nd.path);
/* mount new_root on / */
attach_mnt(new_nd.path.mnt, &root_parent);
touch_mnt_namespace(current->nsproxy->mnt_ns);
spin_unlock(&vfsmount_lock);
- chroot_fs_refs(&user_nd.path, &new_nd.path);
- security_sb_post_pivotroot(&user_nd, &new_nd);
+ chroot_fs_refs(&root, &new_nd.path);
+ security_sb_post_pivotroot(&root, &new_nd.path);
error = 0;
path_put(&root_parent);
path_put(&parent_path);
out2:
mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
up_write(&namespace_sem);
- path_put(&user_nd.path);
+ path_put(&root);
path_put(&old_nd.path);
out1:
path_put(&new_nd.path);
out0:
- unlock_kernel();
return error;
out3:
spin_unlock(&vfsmount_lock);
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index df0f41e0988..ac6170c594a 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_NFS_FS) += nfs.o
nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
- pagelist.o proc.o read.o symlink.o unlink.o \
+ direct.o pagelist.o proc.o read.o symlink.o unlink.o \
write.o namespace.o mount_clnt.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
@@ -14,5 +14,4 @@ nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
delegation.o idmap.o \
callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o
-nfs-$(CONFIG_NFS_DIRECTIO) += direct.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 66648dd92d9..5606ae3d72d 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -15,6 +15,7 @@
#include <linux/nfs_fs.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <net/inet_sock.h>
@@ -27,9 +28,7 @@
struct nfs_callback_data {
unsigned int users;
struct svc_serv *serv;
- pid_t pid;
- struct completion started;
- struct completion stopped;
+ struct task_struct *task;
};
static struct nfs_callback_data nfs_callback_info;
@@ -57,48 +56,44 @@ module_param_call(callback_tcpport, param_set_port, param_get_int,
/*
* This is the callback kernel thread.
*/
-static void nfs_callback_svc(struct svc_rqst *rqstp)
+static int
+nfs_callback_svc(void *vrqstp)
{
- int err;
+ int err, preverr = 0;
+ struct svc_rqst *rqstp = vrqstp;
- __module_get(THIS_MODULE);
- lock_kernel();
-
- nfs_callback_info.pid = current->pid;
- daemonize("nfsv4-svc");
- /* Process request with signals blocked, but allow SIGKILL. */
- allow_signal(SIGKILL);
set_freezable();
- complete(&nfs_callback_info.started);
-
- for(;;) {
- if (signalled()) {
- if (nfs_callback_info.users == 0)
- break;
- flush_signals(current);
- }
+ /*
+ * FIXME: do we really need to run this under the BKL? If so, please
+ * add a comment about what it's intended to protect.
+ */
+ lock_kernel();
+ while (!kthread_should_stop()) {
/*
* Listen for a request on the socket
*/
err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
- if (err == -EAGAIN || err == -EINTR)
+ if (err == -EAGAIN || err == -EINTR) {
+ preverr = err;
continue;
+ }
if (err < 0) {
- printk(KERN_WARNING
- "%s: terminating on error %d\n",
- __FUNCTION__, -err);
- break;
+ if (err != preverr) {
+ printk(KERN_WARNING "%s: unexpected error "
+ "from svc_recv (%d)\n", __func__, err);
+ preverr = err;
+ }
+ schedule_timeout_uninterruptible(HZ);
+ continue;
}
+ preverr = err;
svc_process(rqstp);
}
-
- flush_signals(current);
- svc_exit_thread(rqstp);
- nfs_callback_info.pid = 0;
- complete(&nfs_callback_info.stopped);
unlock_kernel();
- module_put_and_exit(0);
+ nfs_callback_info.task = NULL;
+ svc_exit_thread(rqstp);
+ return 0;
}
/*
@@ -107,14 +102,13 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
int nfs_callback_up(void)
{
struct svc_serv *serv = NULL;
+ struct svc_rqst *rqstp;
int ret = 0;
lock_kernel();
mutex_lock(&nfs_callback_mutex);
- if (nfs_callback_info.users++ || nfs_callback_info.pid != 0)
+ if (nfs_callback_info.users++ || nfs_callback_info.task != NULL)
goto out;
- init_completion(&nfs_callback_info.started);
- init_completion(&nfs_callback_info.stopped);
serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
ret = -ENOMEM;
if (!serv)
@@ -127,15 +121,28 @@ int nfs_callback_up(void)
nfs_callback_tcpport = ret;
dprintk("Callback port = 0x%x\n", nfs_callback_tcpport);
- ret = svc_create_thread(nfs_callback_svc, serv);
- if (ret < 0)
+ rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
+ if (IS_ERR(rqstp)) {
+ ret = PTR_ERR(rqstp);
goto out_err;
+ }
+
+ svc_sock_update_bufs(serv);
nfs_callback_info.serv = serv;
- wait_for_completion(&nfs_callback_info.started);
+
+ nfs_callback_info.task = kthread_run(nfs_callback_svc, rqstp,
+ "nfsv4-svc");
+ if (IS_ERR(nfs_callback_info.task)) {
+ ret = PTR_ERR(nfs_callback_info.task);
+ nfs_callback_info.serv = NULL;
+ nfs_callback_info.task = NULL;
+ svc_exit_thread(rqstp);
+ goto out_err;
+ }
out:
/*
* svc_create creates the svc_serv with sv_nrthreads == 1, and then
- * svc_create_thread increments that. So we need to call svc_destroy
+ * svc_prepare_thread increments that. So we need to call svc_destroy
* on both success and failure so that the refcount is 1 when the
* thread exits.
*/
@@ -152,19 +159,15 @@ out_err:
}
/*
- * Kill the server process if it is not already up.
+ * Kill the server process if it is not already down.
*/
void nfs_callback_down(void)
{
lock_kernel();
mutex_lock(&nfs_callback_mutex);
nfs_callback_info.users--;
- do {
- if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0)
- break;
- if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0)
- break;
- } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0);
+ if (nfs_callback_info.users == 0 && nfs_callback_info.task != NULL)
+ kthread_stop(nfs_callback_info.task);
mutex_unlock(&nfs_callback_mutex);
unlock_kernel();
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index c5c0175898f..f2f3b284e6d 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -112,6 +112,7 @@ struct nfs_client_initdata {
static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
{
struct nfs_client *clp;
+ struct rpc_cred *cred;
if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
goto error_0;
@@ -150,6 +151,9 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
#endif
+ cred = rpc_lookup_machine_cred();
+ if (!IS_ERR(cred))
+ clp->cl_machine_cred = cred;
return clp;
@@ -170,6 +174,8 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners));
if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
nfs_idmap_delete(clp);
+
+ rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
#endif
}
@@ -189,6 +195,9 @@ static void nfs_free_client(struct nfs_client *clp)
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
nfs_callback_down();
+ if (clp->cl_machine_cred != NULL)
+ put_rpccred(clp->cl_machine_cred);
+
kfree(clp->cl_hostname);
kfree(clp);
@@ -680,10 +689,22 @@ static int nfs_init_server(struct nfs_server *server,
if (error < 0)
goto error;
+ server->port = data->nfs_server.port;
+
error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
if (error < 0)
goto error;
+ /* Preserve the values of mount_server-related mount options */
+ if (data->mount_server.addrlen) {
+ memcpy(&server->mountd_address, &data->mount_server.address,
+ data->mount_server.addrlen);
+ server->mountd_addrlen = data->mount_server.addrlen;
+ }
+ server->mountd_version = data->mount_server.version;
+ server->mountd_port = data->mount_server.port;
+ server->mountd_protocol = data->mount_server.protocol;
+
server->namelen = data->namlen;
/* Create a client RPC handle for the NFSv3 ACL management interface */
nfs_init_server_aclclient(server);
@@ -1062,6 +1083,8 @@ static int nfs4_init_server(struct nfs_server *server,
server->acdirmin = data->acdirmin * HZ;
server->acdirmax = data->acdirmax * HZ;
+ server->port = data->nfs_server.port;
+
error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
error:
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index d9e30ac2798..f288b3ecab4 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1967,7 +1967,7 @@ force_lookup:
if (!NFS_PROTO(inode)->access)
goto out_notsup;
- cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
+ cred = rpc_lookup_cred();
if (!IS_ERR(cred)) {
res = nfs_do_access(inode, cred, mask);
put_rpccred(cred);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 16844f98f50..4757a2b326a 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -229,14 +229,20 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
- if (nfs_readpage_result(task, data) != 0)
- return;
+ nfs_readpage_result(task, data);
+}
+
+static void nfs_direct_read_release(void *calldata)
+{
+
+ struct nfs_read_data *data = calldata;
+ struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+ int status = data->task.tk_status;
spin_lock(&dreq->lock);
- if (unlikely(task->tk_status < 0)) {
- dreq->error = task->tk_status;
+ if (unlikely(status < 0)) {
+ dreq->error = status;
spin_unlock(&dreq->lock);
} else {
dreq->count += data->res.count;
@@ -249,11 +255,12 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
if (put_dreq(dreq))
nfs_direct_complete(dreq);
+ nfs_readdata_release(calldata);
}
static const struct rpc_call_ops nfs_read_direct_ops = {
.rpc_call_done = nfs_direct_read_result,
- .rpc_release = nfs_readdata_release,
+ .rpc_release = nfs_direct_read_release,
};
/*
@@ -280,6 +287,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
.rpc_client = NFS_CLIENT(inode),
.rpc_message = &msg,
.callback_ops = &nfs_read_direct_ops,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
unsigned int pgbase;
@@ -323,7 +331,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->inode = inode;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
- data->args.context = ctx;
+ data->args.context = get_nfs_open_context(ctx);
data->args.offset = pos;
data->args.pgbase = pgbase;
data->args.pages = data->pagevec;
@@ -339,8 +347,9 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
NFS_PROTO(inode)->read_setup(data, &msg);
task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
+ if (IS_ERR(task))
+ break;
+ rpc_put_task(task);
dprintk("NFS: %5u initiated direct read call "
"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
@@ -446,6 +455,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(inode),
.callback_ops = &nfs_write_direct_ops,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
@@ -499,27 +509,34 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
/* Call the NFS version-specific code */
- if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
- return;
- if (unlikely(task->tk_status < 0)) {
+ NFS_PROTO(data->inode)->commit_done(task, data);
+}
+
+static void nfs_direct_commit_release(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+ int status = data->task.tk_status;
+
+ if (status < 0) {
dprintk("NFS: %5u commit failed with error %d.\n",
- task->tk_pid, task->tk_status);
+ data->task.tk_pid, status);
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
- dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
+ dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
}
- dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
+ dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
nfs_direct_write_complete(dreq, data->inode);
+ nfs_commitdata_release(calldata);
}
static const struct rpc_call_ops nfs_commit_direct_ops = {
.rpc_call_done = nfs_direct_commit_result,
- .rpc_release = nfs_commit_release,
+ .rpc_release = nfs_direct_commit_release,
};
static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
@@ -537,6 +554,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
.rpc_message = &msg,
.callback_ops = &nfs_commit_direct_ops,
.callback_data = data,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
@@ -546,6 +564,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
data->args.fh = NFS_FH(data->inode);
data->args.offset = 0;
data->args.count = 0;
+ data->args.context = get_nfs_open_context(dreq->ctx);
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
@@ -585,7 +604,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
{
- dreq->commit_data = nfs_commit_alloc();
+ dreq->commit_data = nfs_commitdata_alloc();
if (dreq->commit_data != NULL)
dreq->commit_data->req = (struct nfs_page *) dreq;
}
@@ -606,11 +625,20 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
- int status = task->tk_status;
if (nfs_writeback_done(task, data) != 0)
return;
+}
+
+/*
+ * NB: Return the value of the first error return code. Subsequent
+ * errors after the first one are ignored.
+ */
+static void nfs_direct_write_release(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+ int status = data->task.tk_status;
spin_lock(&dreq->lock);
@@ -632,23 +660,13 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
break;
case NFS_ODIRECT_DO_COMMIT:
if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
- dprintk("NFS: %5u write verify failed\n", task->tk_pid);
+ dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
}
}
}
out_unlock:
spin_unlock(&dreq->lock);
-}
-
-/*
- * NB: Return the value of the first error return code. Subsequent
- * errors after the first one are ignored.
- */
-static void nfs_direct_write_release(void *calldata)
-{
- struct nfs_write_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
if (put_dreq(dreq))
nfs_direct_write_complete(dreq, data->inode);
@@ -682,6 +700,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
.rpc_client = NFS_CLIENT(inode),
.rpc_message = &msg,
.callback_ops = &nfs_write_direct_ops,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
size_t wsize = NFS_SERVER(inode)->wsize;
@@ -728,7 +747,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->inode = inode;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
- data->args.context = ctx;
+ data->args.context = get_nfs_open_context(ctx);
data->args.offset = pos;
data->args.pgbase = pgbase;
data->args.pages = data->pagevec;
@@ -745,8 +764,9 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
NFS_PROTO(inode)->write_setup(data, &msg);
task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
+ if (IS_ERR(task))
+ break;
+ rpc_put_task(task);
dprintk("NFS: %5u initiated direct write call "
"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 5d2e9d9a4e2..3536b01164f 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -238,10 +238,8 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
ssize_t result;
size_t count = iov_length(iov, nr_segs);
-#ifdef CONFIG_NFS_DIRECTIO
if (iocb->ki_filp->f_flags & O_DIRECT)
return nfs_file_direct_read(iocb, iov, nr_segs, pos);
-#endif
dfprintk(VFS, "nfs: read(%s/%s, %lu@%lu)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -387,9 +385,7 @@ const struct address_space_operations nfs_file_aops = {
.write_end = nfs_write_end,
.invalidatepage = nfs_invalidate_page,
.releasepage = nfs_release_page,
-#ifdef CONFIG_NFS_DIRECTIO
.direct_IO = nfs_direct_IO,
-#endif
.launder_page = nfs_launder_page,
};
@@ -447,10 +443,8 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
ssize_t result;
size_t count = iov_length(iov, nr_segs);
-#ifdef CONFIG_NFS_DIRECTIO
if (iocb->ki_filp->f_flags & O_DIRECT)
return nfs_file_direct_write(iocb, iov, nr_segs, pos);
-#endif
dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%Ld)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -576,17 +570,9 @@ static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
lock_kernel();
/* Use local locking if mounted with "-onolock" */
- if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) {
+ if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
- /* If we were signalled we still need to ensure that
- * we clean up any state on the server. We therefore
- * record the lock call as having succeeded in order to
- * ensure that locks_remove_posix() cleans it out when
- * the process exits.
- */
- if (status == -EINTR || status == -ERESTARTSYS)
- do_vfs_lock(filp, fl);
- } else
+ else
status = do_vfs_lock(filp, fl);
unlock_kernel();
if (status < 0)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 6f88d7c77ac..5cb3345eb69 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -523,8 +523,12 @@ struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
static void __put_nfs_open_context(struct nfs_open_context *ctx, int wait)
{
- struct inode *inode = ctx->path.dentry->d_inode;
+ struct inode *inode;
+ if (ctx == NULL)
+ return;
+
+ inode = ctx->path.dentry->d_inode;
if (!atomic_dec_and_lock(&ctx->count, &inode->i_lock))
return;
list_del(&ctx->list);
@@ -610,7 +614,7 @@ int nfs_open(struct inode *inode, struct file *filp)
struct nfs_open_context *ctx;
struct rpc_cred *cred;
- cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
+ cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
@@ -1218,6 +1222,36 @@ static void nfs_destroy_inodecache(void)
kmem_cache_destroy(nfs_inode_cachep);
}
+struct workqueue_struct *nfsiod_workqueue;
+
+/*
+ * start up the nfsiod workqueue
+ */
+static int nfsiod_start(void)
+{
+ struct workqueue_struct *wq;
+ dprintk("RPC: creating workqueue nfsiod\n");
+ wq = create_singlethread_workqueue("nfsiod");
+ if (wq == NULL)
+ return -ENOMEM;
+ nfsiod_workqueue = wq;
+ return 0;
+}
+
+/*
+ * Destroy the nfsiod workqueue
+ */
+static void nfsiod_stop(void)
+{
+ struct workqueue_struct *wq;
+
+ wq = nfsiod_workqueue;
+ if (wq == NULL)
+ return;
+ nfsiod_workqueue = NULL;
+ destroy_workqueue(wq);
+}
+
/*
* Initialize NFS
*/
@@ -1225,6 +1259,10 @@ static int __init init_nfs_fs(void)
{
int err;
+ err = nfsiod_start();
+ if (err)
+ goto out6;
+
err = nfs_fs_proc_init();
if (err)
goto out5;
@@ -1271,6 +1309,8 @@ out3:
out4:
nfs_fs_proc_exit();
out5:
+ nfsiod_stop();
+out6:
return err;
}
@@ -1286,6 +1326,7 @@ static void __exit exit_nfs_fs(void)
#endif
unregister_nfs_fs();
nfs_fs_proc_exit();
+ nfsiod_stop();
}
/* Not quite true; I just maintain it */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 931992763e6..04ae867dddb 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -46,9 +46,9 @@ struct nfs_parsed_mount_data {
struct sockaddr_storage address;
size_t addrlen;
char *hostname;
- unsigned int version;
+ u32 version;
unsigned short port;
- int protocol;
+ unsigned short protocol;
} mount_server;
struct {
@@ -56,7 +56,8 @@ struct nfs_parsed_mount_data {
size_t addrlen;
char *hostname;
char *export_path;
- int protocol;
+ unsigned short port;
+ unsigned short protocol;
} nfs_server;
struct security_mnt_opts lsm_opts;
@@ -115,13 +116,8 @@ extern void nfs_destroy_readpagecache(void);
extern int __init nfs_init_writepagecache(void);
extern void nfs_destroy_writepagecache(void);
-#ifdef CONFIG_NFS_DIRECTIO
extern int __init nfs_init_directcache(void);
extern void nfs_destroy_directcache(void);
-#else
-#define nfs_init_directcache() (0)
-#define nfs_destroy_directcache() do {} while(0)
-#endif
/* nfs2xdr.c */
extern int nfs_stat_to_errno(int);
@@ -146,6 +142,7 @@ extern struct rpc_procinfo nfs4_procedures[];
extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask);
/* inode.c */
+extern struct workqueue_struct *nfsiod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_destroy_inode(struct inode *);
extern int nfs_write_inode(struct inode *,int);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 607f6eb9cdb..af4d0f1e402 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -20,7 +20,7 @@
static void nfs_expire_automounts(struct work_struct *work);
-LIST_HEAD(nfs_automount_list);
+static LIST_HEAD(nfs_automount_list);
static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
int nfs_mountpoint_expiry_timeout = 500 * HZ;
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 1f7ea675e0c..28bab67d151 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -267,7 +267,7 @@ nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
int status;
if ((status = ntohl(*p++)))
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
p = xdr_decode_fattr(p, res->fattr);
count = ntohl(*p++);
@@ -428,11 +428,11 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
size_t hdrlen;
unsigned int pglen, recvd;
u32 len;
- int status, nr;
+ int status, nr = 0;
__be32 *end, *entry, *kaddr;
if ((status = ntohl(*p++)))
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
@@ -452,7 +452,12 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
kaddr = p = kmap_atomic(*page, KM_USER0);
end = (__be32 *)((char *)p + pglen);
entry = p;
- for (nr = 0; *p++; nr++) {
+
+ /* Make sure the packet actually has a value_follows and EOF entry */
+ if ((entry + 1) > end)
+ goto short_pkt;
+
+ for (; *p++; nr++) {
if (p + 2 > end)
goto short_pkt;
p++; /* fileid */
@@ -467,18 +472,32 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
goto short_pkt;
entry = p;
}
- if (!nr && (entry[0] != 0 || entry[1] == 0))
- goto short_pkt;
+
+ /*
+ * Apparently some server sends responses that are a valid size, but
+ * contain no entries, and have value_follows==0 and EOF==0. For
+ * those, just set the EOF marker.
+ */
+ if (!nr && entry[1] == 0) {
+ dprintk("NFS: readdir reply truncated!\n");
+ entry[1] = 1;
+ }
out:
kunmap_atomic(kaddr, KM_USER0);
return nr;
short_pkt:
+ /*
+ * When we get a short packet there are 2 possibilities. We can
+ * return an error, or fix up the response to look like a valid
+ * response and return what we have so far. If there are no
+ * entries and the packet was short, then return -EIO. If there
+ * are valid entries in the response, return them and pretend that
+ * the call was successful, but incomplete. The caller can retry the
+ * readdir starting at the last cookie.
+ */
entry[0] = entry[1] = 0;
- /* truncate listing ? */
- if (!nr) {
- dprintk("NFS: readdir reply truncated!\n");
- entry[1] = 1;
- }
+ if (!nr)
+ nr = -errno_NFSERR_IO;
goto out;
err_unmap:
nr = -errno_NFSERR_IO;
@@ -518,7 +537,7 @@ nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy)
int status;
if ((status = ntohl(*p++)) != 0)
- status = -nfs_stat_to_errno(status);
+ status = nfs_stat_to_errno(status);
return status;
}
@@ -532,7 +551,7 @@ nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
int status;
if ((status = ntohl(*p++)))
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
xdr_decode_fattr(p, fattr);
return 0;
}
@@ -547,7 +566,7 @@ nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
int status;
if ((status = ntohl(*p++)))
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
p = xdr_decode_fhandle(p, res->fh);
xdr_decode_fattr(p, res->fattr);
return 0;
@@ -585,7 +604,7 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
int status;
if ((status = ntohl(*p++)))
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
/* Convert length of symlink */
len = ntohl(*p++);
if (len >= rcvbuf->page_len) {
@@ -634,7 +653,7 @@ nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res)
int status;
if ((status = ntohl(*p++)))
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
res->tsize = ntohl(*p++);
res->bsize = ntohl(*p++);
@@ -653,39 +672,39 @@ static struct {
int errno;
} nfs_errtbl[] = {
{ NFS_OK, 0 },
- { NFSERR_PERM, EPERM },
- { NFSERR_NOENT, ENOENT },
- { NFSERR_IO, errno_NFSERR_IO },
- { NFSERR_NXIO, ENXIO },
-/* { NFSERR_EAGAIN, EAGAIN }, */
- { NFSERR_ACCES, EACCES },
- { NFSERR_EXIST, EEXIST },
- { NFSERR_XDEV, EXDEV },
- { NFSERR_NODEV, ENODEV },
- { NFSERR_NOTDIR, ENOTDIR },
- { NFSERR_ISDIR, EISDIR },
- { NFSERR_INVAL, EINVAL },
- { NFSERR_FBIG, EFBIG },
- { NFSERR_NOSPC, ENOSPC },
- { NFSERR_ROFS, EROFS },
- { NFSERR_MLINK, EMLINK },
- { NFSERR_NAMETOOLONG, ENAMETOOLONG },
- { NFSERR_NOTEMPTY, ENOTEMPTY },
- { NFSERR_DQUOT, EDQUOT },
- { NFSERR_STALE, ESTALE },
- { NFSERR_REMOTE, EREMOTE },
+ { NFSERR_PERM, -EPERM },
+ { NFSERR_NOENT, -ENOENT },
+ { NFSERR_IO, -errno_NFSERR_IO},
+ { NFSERR_NXIO, -ENXIO },
+/* { NFSERR_EAGAIN, -EAGAIN }, */
+ { NFSERR_ACCES, -EACCES },
+ { NFSERR_EXIST, -EEXIST },
+ { NFSERR_XDEV, -EXDEV },
+ { NFSERR_NODEV, -ENODEV },
+ { NFSERR_NOTDIR, -ENOTDIR },
+ { NFSERR_ISDIR, -EISDIR },
+ { NFSERR_INVAL, -EINVAL },
+ { NFSERR_FBIG, -EFBIG },
+ { NFSERR_NOSPC, -ENOSPC },
+ { NFSERR_ROFS, -EROFS },
+ { NFSERR_MLINK, -EMLINK },
+ { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFSERR_NOTEMPTY, -ENOTEMPTY },
+ { NFSERR_DQUOT, -EDQUOT },
+ { NFSERR_STALE, -ESTALE },
+ { NFSERR_REMOTE, -EREMOTE },
#ifdef EWFLUSH
- { NFSERR_WFLUSH, EWFLUSH },
+ { NFSERR_WFLUSH, -EWFLUSH },
#endif
- { NFSERR_BADHANDLE, EBADHANDLE },
- { NFSERR_NOT_SYNC, ENOTSYNC },
- { NFSERR_BAD_COOKIE, EBADCOOKIE },
- { NFSERR_NOTSUPP, ENOTSUPP },
- { NFSERR_TOOSMALL, ETOOSMALL },
- { NFSERR_SERVERFAULT, ESERVERFAULT },
- { NFSERR_BADTYPE, EBADTYPE },
- { NFSERR_JUKEBOX, EJUKEBOX },
- { -1, EIO }
+ { NFSERR_BADHANDLE, -EBADHANDLE },
+ { NFSERR_NOT_SYNC, -ENOTSYNC },
+ { NFSERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFSERR_NOTSUPP, -ENOTSUPP },
+ { NFSERR_TOOSMALL, -ETOOSMALL },
+ { NFSERR_SERVERFAULT, -ESERVERFAULT },
+ { NFSERR_BADTYPE, -EBADTYPE },
+ { NFSERR_JUKEBOX, -EJUKEBOX },
+ { -1, -EIO }
};
/*
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 3917e2fa4e4..11cdddec143 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -508,14 +508,14 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
struct page **page;
size_t hdrlen;
u32 len, recvd, pglen;
- int status, nr;
+ int status, nr = 0;
__be32 *entry, *end, *kaddr;
status = ntohl(*p++);
/* Decode post_op_attrs */
p = xdr_decode_post_op_attr(p, res->dir_attr);
if (status)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
/* Decode verifier cookie */
if (res->verf) {
res->verf[0] = *p++;
@@ -542,7 +542,12 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
kaddr = p = kmap_atomic(*page, KM_USER0);
end = (__be32 *)((char *)p + pglen);
entry = p;
- for (nr = 0; *p++; nr++) {
+
+ /* Make sure the packet actually has a value_follows and EOF entry */
+ if ((entry + 1) > end)
+ goto short_pkt;
+
+ for (; *p++; nr++) {
if (p + 3 > end)
goto short_pkt;
p += 2; /* inode # */
@@ -581,18 +586,32 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
goto short_pkt;
entry = p;
}
- if (!nr && (entry[0] != 0 || entry[1] == 0))
- goto short_pkt;
+
+ /*
+ * Apparently some server sends responses that are a valid size, but
+ * contain no entries, and have value_follows==0 and EOF==0. For
+ * those, just set the EOF marker.
+ */
+ if (!nr && entry[1] == 0) {
+ dprintk("NFS: readdir reply truncated!\n");
+ entry[1] = 1;
+ }
out:
kunmap_atomic(kaddr, KM_USER0);
return nr;
short_pkt:
+ /*
+ * When we get a short packet there are 2 possibilities. We can
+ * return an error, or fix up the response to look like a valid
+ * response and return what we have so far. If there are no
+ * entries and the packet was short, then return -EIO. If there
+ * are valid entries in the response, return them and pretend that
+ * the call was successful, but incomplete. The caller can retry the
+ * readdir starting at the last cookie.
+ */
entry[0] = entry[1] = 0;
- /* truncate listing ? */
- if (!nr) {
- dprintk("NFS: readdir reply truncated!\n");
- entry[1] = 1;
- }
+ if (!nr)
+ nr = -errno_NFSERR_IO;
goto out;
err_unmap:
nr = -errno_NFSERR_IO;
@@ -732,7 +751,7 @@ nfs3_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
int status;
if ((status = ntohl(*p++)))
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
xdr_decode_fattr(p, fattr);
return 0;
}
@@ -747,7 +766,7 @@ nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
int status;
if ((status = ntohl(*p++)))
- status = -nfs_stat_to_errno(status);
+ status = nfs_stat_to_errno(status);
xdr_decode_wcc_data(p, fattr);
return status;
}
@@ -767,7 +786,7 @@ nfs3_xdr_lookupres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
int status;
if ((status = ntohl(*p++))) {
- status = -nfs_stat_to_errno(status);
+ status = nfs_stat_to_errno(status);
} else {
if (!(p = xdr_decode_fhandle(p, res->fh)))
return -errno_NFSERR_IO;
@@ -787,7 +806,7 @@ nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
res->access = ntohl(*p++);
return 0;
}
@@ -824,7 +843,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
p = xdr_decode_post_op_attr(p, fattr);
if (status != 0)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
/* Convert length of symlink */
len = ntohl(*p++);
@@ -872,7 +891,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
/* Decode reply count and EOF flag. NFSv3 is somewhat redundant
* in that it puts the count both in the res struct and in the
@@ -922,7 +941,7 @@ nfs3_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
p = xdr_decode_wcc_data(p, res->fattr);
if (status != 0)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
res->count = ntohl(*p++);
res->verf->committed = (enum nfs3_stable_how)ntohl(*p++);
@@ -953,7 +972,7 @@ nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
res->fattr->valid = 0;
}
} else {
- status = -nfs_stat_to_errno(status);
+ status = nfs_stat_to_errno(status);
}
p = xdr_decode_wcc_data(p, res->dir_attr);
return status;
@@ -968,7 +987,7 @@ nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs3_renameres *res)
int status;
if ((status = ntohl(*p++)) != 0)
- status = -nfs_stat_to_errno(status);
+ status = nfs_stat_to_errno(status);
p = xdr_decode_wcc_data(p, res->fromattr);
p = xdr_decode_wcc_data(p, res->toattr);
return status;
@@ -983,7 +1002,7 @@ nfs3_xdr_linkres(struct rpc_rqst *req, __be32 *p, struct nfs3_linkres *res)
int status;
if ((status = ntohl(*p++)) != 0)
- status = -nfs_stat_to_errno(status);
+ status = nfs_stat_to_errno(status);
p = xdr_decode_post_op_attr(p, res->fattr);
p = xdr_decode_wcc_data(p, res->dir_attr);
return status;
@@ -1001,7 +1020,7 @@ nfs3_xdr_fsstatres(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
p = xdr_decode_hyper(p, &res->tbytes);
p = xdr_decode_hyper(p, &res->fbytes);
@@ -1026,7 +1045,7 @@ nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
res->rtmax = ntohl(*p++);
res->rtpref = ntohl(*p++);
@@ -1054,7 +1073,7 @@ nfs3_xdr_pathconfres(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
res->max_link = ntohl(*p++);
res->max_namelen = ntohl(*p++);
@@ -1073,7 +1092,7 @@ nfs3_xdr_commitres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
status = ntohl(*p++);
p = xdr_decode_wcc_data(p, res->fattr);
if (status != 0)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
res->verf->verifier[0] = *p++;
res->verf->verifier[1] = *p++;
@@ -1095,7 +1114,7 @@ nfs3_xdr_getaclres(struct rpc_rqst *req, __be32 *p,
int err, base;
if (status != 0)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
p = xdr_decode_post_op_attr(p, res->fattr);
res->mask = ntohl(*p++);
if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
@@ -1122,7 +1141,7 @@ nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
int status = ntohl(*p++);
if (status)
- return -nfs_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
xdr_decode_post_op_attr(p, fattr);
return 0;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7ce07862c2f..dbc09271af0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -51,6 +51,7 @@
#include "nfs4_fs.h"
#include "delegation.h"
+#include "internal.h"
#include "iostat.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -239,6 +240,8 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
{
p->o_res.f_attr = &p->f_attr;
p->o_res.dir_attr = &p->dir_attr;
+ p->o_res.seqid = p->o_arg.seqid;
+ p->c_res.seqid = p->c_arg.seqid;
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
@@ -729,7 +732,6 @@ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
renew_lease(data->o_res.server, data->timestamp);
data->rpc_done = 1;
}
- nfs_increment_open_seqid(data->rpc_status, data->c_arg.seqid);
}
static void nfs4_open_confirm_release(void *calldata)
@@ -773,6 +775,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
.rpc_message = &msg,
.callback_ops = &nfs4_open_confirm_ops,
.callback_data = data,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
@@ -858,7 +861,6 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
nfs_confirm_seqid(&data->owner->so_seqid, 0);
}
- nfs_increment_open_seqid(data->rpc_status, data->o_arg.seqid);
data->rpc_done = 1;
}
@@ -910,6 +912,7 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
.rpc_message = &msg,
.callback_ops = &nfs4_open_ops,
.callback_data = data,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
@@ -979,11 +982,8 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
if (IS_ERR(opendata))
return PTR_ERR(opendata);
ret = nfs4_open_recover(opendata, state);
- if (ret == -ESTALE) {
- /* Invalidate the state owner so we don't ever use it again */
- nfs4_drop_state_owner(state->owner);
+ if (ret == -ESTALE)
d_drop(ctx->path.dentry);
- }
nfs4_opendata_put(opendata);
return ret;
}
@@ -1226,7 +1226,6 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
*/
- nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid);
switch (task->tk_status) {
case 0:
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
@@ -1315,6 +1314,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_close_ops,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status = -ENOMEM;
@@ -1332,6 +1332,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
goto out_free_calldata;
calldata->arg.bitmask = server->attr_bitmask;
calldata->res.fattr = &calldata->fattr;
+ calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
calldata->path.mnt = mntget(path->mnt);
calldata->path.dentry = dget(path->dentry);
@@ -1404,7 +1405,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
BUG_ON(nd->intent.open.flags & O_CREAT);
}
- cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0);
+ cred = rpc_lookup_cred();
if (IS_ERR(cred))
return (struct dentry *)cred;
parent = dentry->d_parent;
@@ -1439,7 +1440,7 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st
struct rpc_cred *cred;
struct nfs4_state *state;
- cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0);
+ cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
state = nfs4_do_open(dir, &path, openflags, NULL, cred);
@@ -1656,7 +1657,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
nfs_fattr_init(fattr);
- cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
+ cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
@@ -1892,7 +1893,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
struct rpc_cred *cred;
int status = 0;
- cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0);
+ cred = rpc_lookup_cred();
if (IS_ERR(cred)) {
status = PTR_ERR(cred);
goto out;
@@ -2761,10 +2762,10 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
- rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL, NULL);
+ rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
nfs4_schedule_state_recovery(clp);
if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
- rpc_wake_up_task(task);
+ rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
task->tk_status = 0;
return -EAGAIN;
case -NFS4ERR_DELAY:
@@ -2884,7 +2885,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short po
RPC_DISPLAY_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_PROTO),
- cred->cr_ops->cr_name,
+ clp->cl_rpcclient->cl_auth->au_ops->au_name,
clp->cl_id_uniquifier);
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
sizeof(setclientid.sc_netid),
@@ -3158,6 +3159,7 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
+ p->res.seqid = seqid;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
@@ -3183,7 +3185,6 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
if (RPC_ASSASSINATED(task))
return;
- nfs_increment_lock_seqid(task->tk_status, calldata->arg.seqid);
switch (task->tk_status) {
case 0:
memcpy(calldata->lsp->ls_stateid.data,
@@ -3235,6 +3236,7 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_locku_ops,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
@@ -3261,6 +3263,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
struct nfs4_lock_state *lsp;
struct rpc_task *task;
int status = 0;
+ unsigned char fl_flags = request->fl_flags;
status = nfs4_set_lock_state(state, request);
/* Unlock _before_ we do the RPC call */
@@ -3284,6 +3287,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
status = nfs4_wait_for_completion_rpc_task(task);
rpc_put_task(task);
out:
+ request->fl_flags = fl_flags;
return status;
}
@@ -3320,6 +3324,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
+ p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
@@ -3346,6 +3351,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
return;
data->arg.open_stateid = &state->stateid;
data->arg.new_lock_owner = 1;
+ data->res.open_seqid = data->arg.open_seqid;
} else
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
@@ -3363,7 +3369,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
if (RPC_ASSASSINATED(task))
goto out;
if (data->arg.new_lock_owner != 0) {
- nfs_increment_open_seqid(data->rpc_status, data->arg.open_seqid);
if (data->rpc_status == 0)
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
else
@@ -3375,7 +3380,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp);
}
- nfs_increment_lock_seqid(data->rpc_status, data->arg.lock_seqid);
out:
dprintk("%s: done, ret = %d!\n", __FUNCTION__, data->rpc_status);
}
@@ -3419,6 +3423,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
.rpc_client = NFS_CLIENT(state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_lock_ops,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int ret;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index b962397004c..46eb624e4f1 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -71,6 +71,29 @@ static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
return status;
}
+static struct rpc_cred *nfs4_get_machine_cred(struct nfs_client *clp)
+{
+ struct rpc_cred *cred = NULL;
+
+ spin_lock(&clp->cl_lock);
+ if (clp->cl_machine_cred != NULL)
+ cred = get_rpccred(clp->cl_machine_cred);
+ spin_unlock(&clp->cl_lock);
+ return cred;
+}
+
+static void nfs4_clear_machine_cred(struct nfs_client *clp)
+{
+ struct rpc_cred *cred;
+
+ spin_lock(&clp->cl_lock);
+ cred = clp->cl_machine_cred;
+ clp->cl_machine_cred = NULL;
+ spin_unlock(&clp->cl_lock);
+ if (cred != NULL)
+ put_rpccred(cred);
+}
+
struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
@@ -91,13 +114,18 @@ static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
struct rb_node *pos;
+ struct rpc_cred *cred;
+ cred = nfs4_get_machine_cred(clp);
+ if (cred != NULL)
+ goto out;
pos = rb_first(&clp->cl_state_owners);
if (pos != NULL) {
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
- return get_rpccred(sp->so_cred);
+ cred = get_rpccred(sp->so_cred);
}
- return NULL;
+out:
+ return cred;
}
static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
@@ -292,8 +320,10 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
spin_unlock(&clp->cl_lock);
if (sp == new)
get_rpccred(cred);
- else
+ else {
+ rpc_destroy_wait_queue(&new->so_sequence.wait);
kfree(new);
+ }
return sp;
}
@@ -310,6 +340,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
return;
nfs4_remove_state_owner(clp, sp);
spin_unlock(&clp->cl_lock);
+ rpc_destroy_wait_queue(&sp->so_sequence.wait);
put_rpccred(cred);
kfree(sp);
}
@@ -529,6 +560,7 @@ static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
spin_lock(&clp->cl_lock);
nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
spin_unlock(&clp->cl_lock);
+ rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
kfree(lsp);
}
@@ -731,7 +763,7 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
list_add_tail(&seqid->list, &sequence->list);
if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
goto unlock;
- rpc_sleep_on(&sequence->wait, task, NULL, NULL);
+ rpc_sleep_on(&sequence->wait, task, NULL);
status = -EAGAIN;
unlock:
spin_unlock(&sequence->lock);
@@ -920,10 +952,10 @@ restart_loop:
if (cred != NULL) {
/* Yes there are: try to renew the old lease */
status = nfs4_proc_renew(clp, cred);
+ put_rpccred(cred);
switch (status) {
case 0:
case -NFS4ERR_CB_PATH_DOWN:
- put_rpccred(cred);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
@@ -932,14 +964,19 @@ restart_loop:
} else {
/* "reboot" to ensure we clear all state on the server */
clp->cl_boot_time = CURRENT_TIME;
- cred = nfs4_get_setclientid_cred(clp);
}
/* We're going to have to re-establish a clientid */
nfs4_state_mark_reclaim(clp);
status = -ENOENT;
+ cred = nfs4_get_setclientid_cred(clp);
if (cred != NULL) {
status = nfs4_init_client(clp, cred);
put_rpccred(cred);
+ /* Handle case where the user hasn't set up machine creds */
+ if (status == -EACCES && cred == clp->cl_machine_cred) {
+ nfs4_clear_machine_cred(clp);
+ goto restart_loop;
+ }
}
if (status)
goto out_error;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index db1ed9c46ed..5a2d64927b3 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -110,7 +110,7 @@ static int nfs4_stat_to_errno(int);
#define decode_savefh_maxsz (op_decode_hdr_maxsz)
#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
#define decode_restorefh_maxsz (op_decode_hdr_maxsz)
-#define encode_fsinfo_maxsz (op_encode_hdr_maxsz + 2)
+#define encode_fsinfo_maxsz (encode_getattr_maxsz)
#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 11)
#define encode_renew_maxsz (op_encode_hdr_maxsz + 3)
#define decode_renew_maxsz (op_decode_hdr_maxsz)
@@ -1191,8 +1191,8 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
WRITE32(attrs[0] & readdir->bitmask[0]);
WRITE32(attrs[1] & readdir->bitmask[1]);
- dprintk("%s: cookie = %Lu, verifier = 0x%x%x, bitmap = 0x%x%x\n",
- __FUNCTION__,
+ dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
+ __func__,
(unsigned long long)readdir->cookie,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1],
@@ -2241,7 +2241,7 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
}
READ32(nfserr);
if (nfserr != NFS_OK)
- return -nfs4_stat_to_errno(nfserr);
+ return nfs4_stat_to_errno(nfserr);
return 0;
}
@@ -2291,7 +2291,7 @@ static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint3
bitmap[0] &= ~FATTR4_WORD0_SUPPORTED_ATTRS;
} else
bitmask[0] = bitmask[1] = 0;
- dprintk("%s: bitmask=0x%x%x\n", __FUNCTION__, bitmask[0], bitmask[1]);
+ dprintk("%s: bitmask=%08x:%08x\n", __func__, bitmask[0], bitmask[1]);
return 0;
}
@@ -3005,6 +3005,8 @@ static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
int status;
status = decode_op_hdr(xdr, OP_CLOSE);
+ if (status != -EIO)
+ nfs_increment_open_seqid(status, res->seqid);
if (status)
return status;
READ_BUF(NFS4_STATEID_SIZE);
@@ -3296,11 +3298,17 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
int status;
status = decode_op_hdr(xdr, OP_LOCK);
+ if (status == -EIO)
+ goto out;
if (status == 0) {
READ_BUF(NFS4_STATEID_SIZE);
COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
} else if (status == -NFS4ERR_DENIED)
- return decode_lock_denied(xdr, NULL);
+ status = decode_lock_denied(xdr, NULL);
+ if (res->open_seqid != NULL)
+ nfs_increment_open_seqid(status, res->open_seqid);
+ nfs_increment_lock_seqid(status, res->lock_seqid);
+out:
return status;
}
@@ -3319,6 +3327,8 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
int status;
status = decode_op_hdr(xdr, OP_LOCKU);
+ if (status != -EIO)
+ nfs_increment_lock_seqid(status, res->seqid);
if (status == 0) {
READ_BUF(NFS4_STATEID_SIZE);
COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
@@ -3384,6 +3394,8 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
int status;
status = decode_op_hdr(xdr, OP_OPEN);
+ if (status != -EIO)
+ nfs_increment_open_seqid(status, res->seqid);
if (status)
return status;
READ_BUF(NFS4_STATEID_SIZE);
@@ -3416,6 +3428,8 @@ static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmre
int status;
status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
+ if (status != -EIO)
+ nfs_increment_open_seqid(status, res->seqid);
if (status)
return status;
READ_BUF(NFS4_STATEID_SIZE);
@@ -3429,6 +3443,8 @@ static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *re
int status;
status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
+ if (status != -EIO)
+ nfs_increment_open_seqid(status, res->seqid);
if (status)
return status;
READ_BUF(NFS4_STATEID_SIZE);
@@ -3481,7 +3497,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
size_t hdrlen;
u32 recvd, pglen = rcvbuf->page_len;
__be32 *end, *entry, *p, *kaddr;
- unsigned int nr;
+ unsigned int nr = 0;
int status;
status = decode_op_hdr(xdr, OP_READDIR);
@@ -3489,8 +3505,8 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
return status;
READ_BUF(8);
COPYMEM(readdir->verifier.data, 8);
- dprintk("%s: verifier = 0x%x%x\n",
- __FUNCTION__,
+ dprintk("%s: verifier = %08x:%08x\n",
+ __func__,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1]);
@@ -3505,7 +3521,12 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
kaddr = p = kmap_atomic(page, KM_USER0);
end = p + ((pglen + readdir->pgbase) >> 2);
entry = p;
- for (nr = 0; *p++; nr++) {
+
+ /* Make sure the packet actually has a value_follows and EOF entry */
+ if ((entry + 1) > end)
+ goto short_pkt;
+
+ for (; *p++; nr++) {
u32 len, attrlen, xlen;
if (end - p < 3)
goto short_pkt;
@@ -3532,20 +3553,32 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
p += attrlen; /* attributes */
entry = p;
}
- if (!nr && (entry[0] != 0 || entry[1] == 0))
- goto short_pkt;
+ /*
+ * Apparently some server sends responses that are a valid size, but
+ * contain no entries, and have value_follows==0 and EOF==0. For
+ * those, just set the EOF marker.
+ */
+ if (!nr && entry[1] == 0) {
+ dprintk("NFS: readdir reply truncated!\n");
+ entry[1] = 1;
+ }
out:
kunmap_atomic(kaddr, KM_USER0);
return 0;
short_pkt:
+ /*
+ * When we get a short packet there are 2 possibilities. We can
+ * return an error, or fix up the response to look like a valid
+ * response and return what we have so far. If there are no
+ * entries and the packet was short, then return -EIO. If there
+ * are valid entries in the response, return them and pretend that
+ * the call was successful, but incomplete. The caller can retry the
+ * readdir starting at the last cookie.
+ */
dprintk("%s: short packet at entry %d\n", __FUNCTION__, nr);
entry[0] = entry[1] = 0;
- /* truncate listing ? */
- if (!nr) {
- dprintk("NFS: readdir reply truncated!\n");
- entry[1] = 1;
- }
- goto out;
+ if (nr)
+ goto out;
err_unmap:
kunmap_atomic(kaddr, KM_USER0);
return -errno_NFSERR_IO;
@@ -3727,7 +3760,7 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
READ_BUF(len);
return -NFSERR_CLID_INUSE;
} else
- return -nfs4_stat_to_errno(nfserr);
+ return nfs4_stat_to_errno(nfserr);
return 0;
}
@@ -4389,7 +4422,7 @@ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs_fsinf
if (!status)
status = decode_fsinfo(&xdr, fsinfo);
if (!status)
- status = -nfs4_stat_to_errno(hdr.status);
+ status = nfs4_stat_to_errno(hdr.status);
return status;
}
@@ -4479,7 +4512,7 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
if (!status)
status = decode_setclientid(&xdr, clp);
if (!status)
- status = -nfs4_stat_to_errno(hdr.status);
+ status = nfs4_stat_to_errno(hdr.status);
return status;
}
@@ -4501,7 +4534,7 @@ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str
if (!status)
status = decode_fsinfo(&xdr, fsinfo);
if (!status)
- status = -nfs4_stat_to_errno(hdr.status);
+ status = nfs4_stat_to_errno(hdr.status);
return status;
}
@@ -4611,42 +4644,42 @@ static struct {
int errno;
} nfs_errtbl[] = {
{ NFS4_OK, 0 },
- { NFS4ERR_PERM, EPERM },
- { NFS4ERR_NOENT, ENOENT },
- { NFS4ERR_IO, errno_NFSERR_IO },
- { NFS4ERR_NXIO, ENXIO },
- { NFS4ERR_ACCESS, EACCES },
- { NFS4ERR_EXIST, EEXIST },
- { NFS4ERR_XDEV, EXDEV },
- { NFS4ERR_NOTDIR, ENOTDIR },
- { NFS4ERR_ISDIR, EISDIR },
- { NFS4ERR_INVAL, EINVAL },
- { NFS4ERR_FBIG, EFBIG },
- { NFS4ERR_NOSPC, ENOSPC },
- { NFS4ERR_ROFS, EROFS },
- { NFS4ERR_MLINK, EMLINK },
- { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
- { NFS4ERR_NOTEMPTY, ENOTEMPTY },
- { NFS4ERR_DQUOT, EDQUOT },
- { NFS4ERR_STALE, ESTALE },
- { NFS4ERR_BADHANDLE, EBADHANDLE },
- { NFS4ERR_BADOWNER, EINVAL },
- { NFS4ERR_BADNAME, EINVAL },
- { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
- { NFS4ERR_NOTSUPP, ENOTSUPP },
- { NFS4ERR_TOOSMALL, ETOOSMALL },
- { NFS4ERR_SERVERFAULT, ESERVERFAULT },
- { NFS4ERR_BADTYPE, EBADTYPE },
- { NFS4ERR_LOCKED, EAGAIN },
- { NFS4ERR_RESOURCE, EREMOTEIO },
- { NFS4ERR_SYMLINK, ELOOP },
- { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
- { NFS4ERR_DEADLOCK, EDEADLK },
- { NFS4ERR_WRONGSEC, EPERM }, /* FIXME: this needs
+ { NFS4ERR_PERM, -EPERM },
+ { NFS4ERR_NOENT, -ENOENT },
+ { NFS4ERR_IO, -errno_NFSERR_IO},
+ { NFS4ERR_NXIO, -ENXIO },
+ { NFS4ERR_ACCESS, -EACCES },
+ { NFS4ERR_EXIST, -EEXIST },
+ { NFS4ERR_XDEV, -EXDEV },
+ { NFS4ERR_NOTDIR, -ENOTDIR },
+ { NFS4ERR_ISDIR, -EISDIR },
+ { NFS4ERR_INVAL, -EINVAL },
+ { NFS4ERR_FBIG, -EFBIG },
+ { NFS4ERR_NOSPC, -ENOSPC },
+ { NFS4ERR_ROFS, -EROFS },
+ { NFS4ERR_MLINK, -EMLINK },
+ { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
+ { NFS4ERR_DQUOT, -EDQUOT },
+ { NFS4ERR_STALE, -ESTALE },
+ { NFS4ERR_BADHANDLE, -EBADHANDLE },
+ { NFS4ERR_BADOWNER, -EINVAL },
+ { NFS4ERR_BADNAME, -EINVAL },
+ { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFS4ERR_NOTSUPP, -ENOTSUPP },
+ { NFS4ERR_TOOSMALL, -ETOOSMALL },
+ { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
+ { NFS4ERR_BADTYPE, -EBADTYPE },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_RESOURCE, -EREMOTEIO },
+ { NFS4ERR_SYMLINK, -ELOOP },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
+ { NFS4ERR_DEADLOCK, -EDEADLK },
+ { NFS4ERR_WRONGSEC, -EPERM }, /* FIXME: this needs
* to be handled by a
* middle-layer.
*/
- { -1, EIO }
+ { -1, -EIO }
};
/*
@@ -4663,14 +4696,14 @@ nfs4_stat_to_errno(int stat)
}
if (stat <= 10000 || stat > 10100) {
/* The server is looney tunes. */
- return ESERVERFAULT;
+ return -ESERVERFAULT;
}
/* If we cannot translate the error, the recovery routines should
* handle it.
* Note: remaining NFSv4 error codes have values > 10000, so should
* not conflict with native Linux error codes.
*/
- return stat;
+ return -stat;
}
#define PROC(proc, argtype, restype) \
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 5a70be589bb..16f57e0af99 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -58,22 +58,19 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
return p;
}
-static void nfs_readdata_rcu_free(struct rcu_head *head)
+static void nfs_readdata_free(struct nfs_read_data *p)
{
- struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu);
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_rdata_mempool);
}
-static void nfs_readdata_free(struct nfs_read_data *rdata)
-{
- call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free);
-}
-
void nfs_readdata_release(void *data)
{
- nfs_readdata_free(data);
+ struct nfs_read_data *rdata = data;
+
+ put_nfs_open_context(rdata->args.context);
+ nfs_readdata_free(rdata);
}
static
@@ -156,7 +153,7 @@ static void nfs_readpage_release(struct nfs_page *req)
/*
* Set up the NFS read request struct
*/
-static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
+static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
const struct rpc_call_ops *call_ops,
unsigned int count, unsigned int offset)
{
@@ -174,6 +171,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
.rpc_message = &msg,
.callback_ops = call_ops,
.callback_data = data,
+ .workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC | swap_flags,
};
@@ -186,7 +184,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
data->args.pgbase = req->wb_pgbase + offset;
data->args.pages = data->pagevec;
data->args.count = count;
- data->args.context = req->wb_context;
+ data->args.context = get_nfs_open_context(req->wb_context);
data->res.fattr = &data->fattr;
data->res.count = count;
@@ -204,8 +202,10 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
(unsigned long long)data->args.offset);
task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ rpc_put_task(task);
+ return 0;
}
static void
@@ -242,6 +242,7 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
unsigned int offset;
int requests = 0;
+ int ret = 0;
LIST_HEAD(list);
nfs_list_remove_request(req);
@@ -253,7 +254,6 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
data = nfs_readdata_alloc(1);
if (!data)
goto out_bad;
- INIT_LIST_HEAD(&data->pages);
list_add(&data->pages, &list);
requests++;
nbytes -= len;
@@ -264,6 +264,8 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
offset = 0;
nbytes = count;
do {
+ int ret2;
+
data = list_entry(list.next, struct nfs_read_data, pages);
list_del_init(&data->pages);
@@ -271,13 +273,15 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
if (nbytes < rsize)
rsize = nbytes;
- nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
+ ret2 = nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
rsize, offset);
+ if (ret == 0)
+ ret = ret2;
offset += rsize;
nbytes -= rsize;
} while (nbytes != 0);
- return 0;
+ return ret;
out_bad:
while (!list_empty(&list)) {
@@ -295,12 +299,12 @@ static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned
struct nfs_page *req;
struct page **pages;
struct nfs_read_data *data;
+ int ret = -ENOMEM;
data = nfs_readdata_alloc(npages);
if (!data)
goto out_bad;
- INIT_LIST_HEAD(&data->pages);
pages = data->pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
@@ -311,11 +315,10 @@ static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned
}
req = nfs_list_entry(data->pages.next);
- nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
- return 0;
+ return nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
out_bad:
nfs_async_read_error(head);
- return -ENOMEM;
+ return ret;
}
/*
@@ -342,26 +345,25 @@ int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
return 0;
}
-static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
+static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
{
struct nfs_readargs *argp = &data->args;
struct nfs_readres *resp = &data->res;
if (resp->eof || resp->count == argp->count)
- return 0;
+ return;
/* This is a short read! */
nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0)
- return 0;
+ return;
/* Yes, so retry the read at the end of the data */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
rpc_restart_call(task);
- return -EAGAIN;
}
/*
@@ -370,29 +372,37 @@ static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
- struct nfs_page *req = data->req;
- struct page *page = req->wb_page;
if (nfs_readpage_result(task, data) != 0)
return;
+ if (task->tk_status < 0)
+ return;
- if (likely(task->tk_status >= 0)) {
- nfs_readpage_truncate_uninitialised_page(data);
- if (nfs_readpage_retry(task, data) != 0)
- return;
- }
- if (unlikely(task->tk_status < 0))
+ nfs_readpage_truncate_uninitialised_page(data);
+ nfs_readpage_retry(task, data);
+}
+
+static void nfs_readpage_release_partial(void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+ struct nfs_page *req = data->req;
+ struct page *page = req->wb_page;
+ int status = data->task.tk_status;
+
+ if (status < 0)
SetPageError(page);
+
if (atomic_dec_and_test(&req->wb_complete)) {
if (!PageError(page))
SetPageUptodate(page);
nfs_readpage_release(req);
}
+ nfs_readdata_release(calldata);
}
static const struct rpc_call_ops nfs_read_partial_ops = {
.rpc_call_done = nfs_readpage_result_partial,
- .rpc_release = nfs_readdata_release,
+ .rpc_release = nfs_readpage_release_partial,
};
static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
@@ -427,29 +437,35 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
if (nfs_readpage_result(task, data) != 0)
return;
+ if (task->tk_status < 0)
+ return;
/*
* Note: nfs_readpage_retry may change the values of
* data->args. In the multi-page case, we therefore need
* to ensure that we call nfs_readpage_set_pages_uptodate()
* first.
*/
- if (likely(task->tk_status >= 0)) {
- nfs_readpage_truncate_uninitialised_page(data);
- nfs_readpage_set_pages_uptodate(data);
- if (nfs_readpage_retry(task, data) != 0)
- return;
- }
+ nfs_readpage_truncate_uninitialised_page(data);
+ nfs_readpage_set_pages_uptodate(data);
+ nfs_readpage_retry(task, data);
+}
+
+static void nfs_readpage_release_full(void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+
while (!list_empty(&data->pages)) {
struct nfs_page *req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
nfs_readpage_release(req);
}
+ nfs_readdata_release(calldata);
}
static const struct rpc_call_ops nfs_read_full_ops = {
.rpc_call_done = nfs_readpage_result_full,
- .rpc_release = nfs_readdata_release,
+ .rpc_release = nfs_readpage_release_full,
};
/*
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index f9219024f31..fa220dc7460 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -198,7 +198,7 @@ static match_table_t nfs_secflavor_tokens = {
};
-static void nfs_umount_begin(struct vfsmount *, int);
+static void nfs_umount_begin(struct super_block *);
static int nfs_statfs(struct dentry *, struct kstatfs *);
static int nfs_show_options(struct seq_file *, struct vfsmount *);
static int nfs_show_stats(struct seq_file *, struct vfsmount *);
@@ -441,10 +441,52 @@ static const char *nfs_pseudoflavour_to_name(rpc_authflavor_t flavour)
return sec_flavours[i].str;
}
+static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+ struct sockaddr *sap = (struct sockaddr *)&nfss->mountd_address;
+
+ switch (sap->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)sap;
+ seq_printf(m, ",mountaddr=" NIPQUAD_FMT,
+ NIPQUAD(sin->sin_addr.s_addr));
+ break;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
+ seq_printf(m, ",mountaddr=" NIP6_FMT,
+ NIP6(sin6->sin6_addr));
+ break;
+ }
+ default:
+ if (showdefaults)
+ seq_printf(m, ",mountaddr=unspecified");
+ }
+
+ if (nfss->mountd_version || showdefaults)
+ seq_printf(m, ",mountvers=%u", nfss->mountd_version);
+ if (nfss->mountd_port || showdefaults)
+ seq_printf(m, ",mountport=%u", nfss->mountd_port);
+
+ switch (nfss->mountd_protocol) {
+ case IPPROTO_UDP:
+ seq_printf(m, ",mountproto=udp");
+ break;
+ case IPPROTO_TCP:
+ seq_printf(m, ",mountproto=tcp");
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, ",mountproto=auto");
+ }
+}
+
/*
* Describe the mount options in force on this server representation
*/
-static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, int showdefaults)
+static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
{
static const struct proc_nfs_info {
int flag;
@@ -452,6 +494,8 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
const char *nostr;
} nfs_info[] = {
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
+ { NFS_MOUNT_INTR, ",intr", ",nointr" },
+ { NFS_MOUNT_POSIX, ",posix", "" },
{ NFS_MOUNT_NOCTO, ",nocto", "" },
{ NFS_MOUNT_NOAC, ",noac", "" },
{ NFS_MOUNT_NONLM, ",nolock", "" },
@@ -462,18 +506,22 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
};
const struct proc_nfs_info *nfs_infop;
struct nfs_client *clp = nfss->nfs_client;
-
- seq_printf(m, ",vers=%d", clp->rpc_ops->version);
- seq_printf(m, ",rsize=%d", nfss->rsize);
- seq_printf(m, ",wsize=%d", nfss->wsize);
+ u32 version = clp->rpc_ops->version;
+
+ seq_printf(m, ",vers=%u", version);
+ seq_printf(m, ",rsize=%u", nfss->rsize);
+ seq_printf(m, ",wsize=%u", nfss->wsize);
+ if (nfss->bsize != 0)
+ seq_printf(m, ",bsize=%u", nfss->bsize);
+ seq_printf(m, ",namlen=%u", nfss->namelen);
if (nfss->acregmin != 3*HZ || showdefaults)
- seq_printf(m, ",acregmin=%d", nfss->acregmin/HZ);
+ seq_printf(m, ",acregmin=%u", nfss->acregmin/HZ);
if (nfss->acregmax != 60*HZ || showdefaults)
- seq_printf(m, ",acregmax=%d", nfss->acregmax/HZ);
+ seq_printf(m, ",acregmax=%u", nfss->acregmax/HZ);
if (nfss->acdirmin != 30*HZ || showdefaults)
- seq_printf(m, ",acdirmin=%d", nfss->acdirmin/HZ);
+ seq_printf(m, ",acdirmin=%u", nfss->acdirmin/HZ);
if (nfss->acdirmax != 60*HZ || showdefaults)
- seq_printf(m, ",acdirmax=%d", nfss->acdirmax/HZ);
+ seq_printf(m, ",acdirmax=%u", nfss->acdirmax/HZ);
for (nfs_infop = nfs_info; nfs_infop->flag; nfs_infop++) {
if (nfss->flags & nfs_infop->flag)
seq_puts(m, nfs_infop->str);
@@ -482,9 +530,24 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
}
seq_printf(m, ",proto=%s",
rpc_peeraddr2str(nfss->client, RPC_DISPLAY_PROTO));
+ if (version == 4) {
+ if (nfss->port != NFS_PORT)
+ seq_printf(m, ",port=%u", nfss->port);
+ } else
+ if (nfss->port)
+ seq_printf(m, ",port=%u", nfss->port);
+
seq_printf(m, ",timeo=%lu", 10U * nfss->client->cl_timeout->to_initval / HZ);
seq_printf(m, ",retrans=%u", nfss->client->cl_timeout->to_retries);
seq_printf(m, ",sec=%s", nfs_pseudoflavour_to_name(nfss->client->cl_auth->au_flavor));
+
+ if (version != 4)
+ nfs_show_mountd_options(m, nfss, showdefaults);
+
+#ifdef CONFIG_NFS_V4
+ if (clp->rpc_ops->version == 4)
+ seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
+#endif
}
/*
@@ -529,10 +592,10 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
seq_printf(m, "\n\tcaps:\t");
seq_printf(m, "caps=0x%x", nfss->caps);
- seq_printf(m, ",wtmult=%d", nfss->wtmult);
- seq_printf(m, ",dtsize=%d", nfss->dtsize);
- seq_printf(m, ",bsize=%d", nfss->bsize);
- seq_printf(m, ",namelen=%d", nfss->namelen);
+ seq_printf(m, ",wtmult=%u", nfss->wtmult);
+ seq_printf(m, ",dtsize=%u", nfss->dtsize);
+ seq_printf(m, ",bsize=%u", nfss->bsize);
+ seq_printf(m, ",namlen=%u", nfss->namelen);
#ifdef CONFIG_NFS_V4
if (nfss->nfs_client->rpc_ops->version == 4) {
@@ -546,9 +609,9 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
/*
* Display security flavor in effect for this mount
*/
- seq_printf(m, "\n\tsec:\tflavor=%d", auth->au_ops->au_flavor);
+ seq_printf(m, "\n\tsec:\tflavor=%u", auth->au_ops->au_flavor);
if (auth->au_flavor)
- seq_printf(m, ",pseudoflavor=%d", auth->au_flavor);
+ seq_printf(m, ",pseudoflavor=%u", auth->au_flavor);
/*
* Display superblock I/O counters
@@ -584,13 +647,11 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
* Begin unmount by attempting to remove all automounted mountpoints we added
* in response to xdev traversals and referrals
*/
-static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags)
+static void nfs_umount_begin(struct super_block *sb)
{
- struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb);
+ struct nfs_server *server = NFS_SB(sb);
struct rpc_clnt *rpc;
- if (!(flags & MNT_FORCE))
- return;
/* -EIO all pending I/O */
rpc = server->client_acl;
if (!IS_ERR(rpc))
@@ -683,7 +744,6 @@ static int nfs_parse_mount_options(char *raw,
struct nfs_parsed_mount_data *mnt)
{
char *p, *string, *secdata;
- unsigned short port = 0;
int rc;
if (!raw) {
@@ -798,7 +858,7 @@ static int nfs_parse_mount_options(char *raw,
return 0;
if (option < 0 || option > 65535)
return 0;
- port = option;
+ mnt->nfs_server.port = option;
break;
case Opt_rsize:
if (match_int(args, &mnt->rsize))
@@ -1048,7 +1108,8 @@ static int nfs_parse_mount_options(char *raw,
}
}
- nfs_set_port((struct sockaddr *)&mnt->nfs_server.address, port);
+ nfs_set_port((struct sockaddr *)&mnt->nfs_server.address,
+ mnt->nfs_server.port);
return 1;
@@ -1169,7 +1230,9 @@ static int nfs_validate_mount_data(void *options,
args->acregmax = 60;
args->acdirmin = 30;
args->acdirmax = 60;
+ args->mount_server.port = 0; /* autobind unless user sets port */
args->mount_server.protocol = XPRT_TRANSPORT_UDP;
+ args->nfs_server.port = 0; /* autobind unless user sets port */
args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
switch (data->version) {
@@ -1208,7 +1271,6 @@ static int nfs_validate_mount_data(void *options,
args->flags = data->flags;
args->rsize = data->rsize;
args->wsize = data->wsize;
- args->flags = data->flags;
args->timeo = data->timeo;
args->retrans = data->retrans;
args->acregmin = data->acregmin;
@@ -1230,6 +1292,8 @@ static int nfs_validate_mount_data(void *options,
args->namlen = data->namlen;
args->bsize = data->bsize;
args->auth_flavors[0] = data->pseudoflavor;
+ if (!args->nfs_server.hostname)
+ goto out_nomem;
/*
* The legacy version 6 binary mount data from userspace has a
@@ -1276,6 +1340,8 @@ static int nfs_validate_mount_data(void *options,
len = c - dev_name;
/* N.B. caller will free nfs_server.hostname in all cases */
args->nfs_server.hostname = kstrndup(dev_name, len, GFP_KERNEL);
+ if (!args->nfs_server.hostname)
+ goto out_nomem;
c++;
if (strlen(c) > NFS_MAXPATHLEN)
@@ -1319,6 +1385,10 @@ out_v3_not_compiled:
return -EPROTONOSUPPORT;
#endif /* !CONFIG_NFS_V3 */
+out_nomem:
+ dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
+ return -ENOMEM;
+
out_no_address:
dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
return -EINVAL;
@@ -1706,28 +1776,6 @@ static void nfs4_fill_super(struct super_block *sb)
}
/*
- * If the user didn't specify a port, set the port number to
- * the NFS version 4 default port.
- */
-static void nfs4_default_port(struct sockaddr *sap)
-{
- switch (sap->sa_family) {
- case AF_INET: {
- struct sockaddr_in *ap = (struct sockaddr_in *)sap;
- if (ap->sin_port == 0)
- ap->sin_port = htons(NFS_PORT);
- break;
- }
- case AF_INET6: {
- struct sockaddr_in6 *ap = (struct sockaddr_in6 *)sap;
- if (ap->sin6_port == 0)
- ap->sin6_port = htons(NFS_PORT);
- break;
- }
- }
-}
-
-/*
* Validate NFSv4 mount options
*/
static int nfs4_validate_mount_data(void *options,
@@ -1751,6 +1799,7 @@ static int nfs4_validate_mount_data(void *options,
args->acregmax = 60;
args->acdirmin = 30;
args->acdirmax = 60;
+ args->nfs_server.port = NFS_PORT; /* 2049 unless user set port= */
args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
switch (data->version) {
@@ -1767,9 +1816,6 @@ static int nfs4_validate_mount_data(void *options,
&args->nfs_server.address))
goto out_no_address;
- nfs4_default_port((struct sockaddr *)
- &args->nfs_server.address);
-
switch (data->auth_flavourlen) {
case 0:
args->auth_flavors[0] = RPC_AUTH_UNIX;
@@ -1827,9 +1873,6 @@ static int nfs4_validate_mount_data(void *options,
&args->nfs_server.address))
return -EINVAL;
- nfs4_default_port((struct sockaddr *)
- &args->nfs_server.address);
-
switch (args->auth_flavor_len) {
case 0:
args->auth_flavors[0] = RPC_AUTH_UNIX;
@@ -1852,12 +1895,16 @@ static int nfs4_validate_mount_data(void *options,
return -ENAMETOOLONG;
/* N.B. caller will free nfs_server.hostname in all cases */
args->nfs_server.hostname = kstrndup(dev_name, len, GFP_KERNEL);
+ if (!args->nfs_server.hostname)
+ goto out_nomem;
c++; /* step over the ':' */
len = strlen(c);
if (len > NFS4_MAXPATHLEN)
return -ENAMETOOLONG;
args->nfs_server.export_path = kstrndup(c, len, GFP_KERNEL);
+ if (!args->nfs_server.export_path)
+ goto out_nomem;
dprintk("NFS: MNTPATH: '%s'\n", args->nfs_server.export_path);
@@ -1879,6 +1926,10 @@ out_inval_auth:
data->auth_flavourlen);
return -EINVAL;
+out_nomem:
+ dfprintk(MOUNT, "NFS4: not enough memory to handle mount options\n");
+ return -ENOMEM;
+
out_no_address:
dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n");
return -EINVAL;
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
index 83e865a16ad..412738dbfbc 100644
--- a/fs/nfs/symlink.c
+++ b/fs/nfs/symlink.c
@@ -10,7 +10,6 @@
* nfs symlink handling code
*/
-#define NFS_NEED_XDR_TYPES
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/sunrpc/clnt.h>
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 75741536342..3adf8b26646 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -234,7 +234,7 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry)
if (data == NULL)
goto out;
- data->cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0);
+ data->cred = rpc_lookup_cred();
if (IS_ERR(data->cred)) {
status = PTR_ERR(data->cred);
goto out_free;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index bed63416a55..1ade11d1ba0 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -48,7 +48,7 @@ static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
static mempool_t *nfs_commit_mempool;
-struct nfs_write_data *nfs_commit_alloc(void)
+struct nfs_write_data *nfs_commitdata_alloc(void)
{
struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
@@ -59,19 +59,13 @@ struct nfs_write_data *nfs_commit_alloc(void)
return p;
}
-static void nfs_commit_rcu_free(struct rcu_head *head)
+void nfs_commit_free(struct nfs_write_data *p)
{
- struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_commit_mempool);
}
-void nfs_commit_free(struct nfs_write_data *wdata)
-{
- call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
-}
-
struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
{
struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
@@ -93,21 +87,18 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
return p;
}
-static void nfs_writedata_rcu_free(struct rcu_head *head)
+static void nfs_writedata_free(struct nfs_write_data *p)
{
- struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_wdata_mempool);
}
-static void nfs_writedata_free(struct nfs_write_data *wdata)
+void nfs_writedata_release(void *data)
{
- call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
-}
+ struct nfs_write_data *wdata = data;
-void nfs_writedata_release(void *wdata)
-{
+ put_nfs_open_context(wdata->args.context);
nfs_writedata_free(wdata);
}
@@ -291,8 +282,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
spin_unlock(&inode->i_lock);
if (!nfs_pageio_add_request(pgio, req)) {
nfs_redirty_request(req);
- nfs_end_page_writeback(page);
- nfs_clear_page_tag_locked(req);
return pgio->pg_error;
}
return 0;
@@ -366,15 +355,13 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
/*
* Insert a write request into an inode
*/
-static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
+static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(inode);
int error;
error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
- BUG_ON(error == -EEXIST);
- if (error)
- return error;
+ BUG_ON(error);
if (!nfsi->npages) {
igrab(inode);
if (nfs_have_delegation(inode, FMODE_WRITE))
@@ -384,8 +371,8 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
set_page_private(req->wb_page, (unsigned long)req);
nfsi->npages++;
kref_get(&req->wb_kref);
- radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
- return 0;
+ radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
+ NFS_PAGE_TAG_LOCKED);
}
/*
@@ -413,7 +400,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
}
static void
-nfs_redirty_request(struct nfs_page *req)
+nfs_mark_request_dirty(struct nfs_page *req)
{
__set_page_dirty_nobuffers(req->wb_page);
}
@@ -467,7 +454,7 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
return 1;
}
if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
- nfs_redirty_request(req);
+ nfs_mark_request_dirty(req);
return 1;
}
return 0;
@@ -597,6 +584,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
/* Loop over all inode entries and see if we find
* A request for the page we wish to update
*/
+ if (new) {
+ if (radix_tree_preload(GFP_NOFS)) {
+ nfs_release_request(new);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
spin_lock(&inode->i_lock);
req = nfs_page_find_request_locked(page);
if (req) {
@@ -607,28 +601,27 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
error = nfs_wait_on_request(req);
nfs_release_request(req);
if (error < 0) {
- if (new)
+ if (new) {
+ radix_tree_preload_end();
nfs_release_request(new);
+ }
return ERR_PTR(error);
}
continue;
}
spin_unlock(&inode->i_lock);
- if (new)
+ if (new) {
+ radix_tree_preload_end();
nfs_release_request(new);
+ }
break;
}
if (new) {
- int error;
nfs_lock_request_dontget(new);
- error = nfs_inode_add_request(inode, new);
- if (error) {
- spin_unlock(&inode->i_lock);
- nfs_unlock_request(new);
- return ERR_PTR(error);
- }
+ nfs_inode_add_request(inode, new);
spin_unlock(&inode->i_lock);
+ radix_tree_preload_end();
req = new;
goto zero_page;
}
@@ -785,7 +778,7 @@ static int flush_task_priority(int how)
/*
* Set up the argument/result storage required for the RPC call.
*/
-static void nfs_write_rpcsetup(struct nfs_page *req,
+static int nfs_write_rpcsetup(struct nfs_page *req,
struct nfs_write_data *data,
const struct rpc_call_ops *call_ops,
unsigned int count, unsigned int offset,
@@ -806,6 +799,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
.rpc_message = &msg,
.callback_ops = call_ops,
.callback_data = data,
+ .workqueue = nfsiod_workqueue,
.flags = flags,
.priority = priority,
};
@@ -822,7 +816,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
data->args.pgbase = req->wb_pgbase + offset;
data->args.pages = data->pagevec;
data->args.count = count;
- data->args.context = req->wb_context;
+ data->args.context = get_nfs_open_context(req->wb_context);
data->args.stable = NFS_UNSTABLE;
if (how & FLUSH_STABLE) {
data->args.stable = NFS_DATA_SYNC;
@@ -847,8 +841,21 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
(unsigned long long)data->args.offset);
task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ rpc_put_task(task);
+ return 0;
+}
+
+/* If a nfs_flush_* function fails, it should remove reqs from @head and
+ * call this on each, which will prepare them to be retried on next
+ * writeback using standard nfs.
+ */
+static void nfs_redirty_request(struct nfs_page *req)
+{
+ nfs_mark_request_dirty(req);
+ nfs_end_page_writeback(req->wb_page);
+ nfs_clear_page_tag_locked(req);
}
/*
@@ -863,6 +870,7 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned
size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
unsigned int offset;
int requests = 0;
+ int ret = 0;
LIST_HEAD(list);
nfs_list_remove_request(req);
@@ -884,6 +892,8 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned
offset = 0;
nbytes = count;
do {
+ int ret2;
+
data = list_entry(list.next, struct nfs_write_data, pages);
list_del_init(&data->pages);
@@ -891,13 +901,15 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned
if (nbytes < wsize)
wsize = nbytes;
- nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
+ ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
wsize, offset, how);
+ if (ret == 0)
+ ret = ret2;
offset += wsize;
nbytes -= wsize;
} while (nbytes != 0);
- return 0;
+ return ret;
out_bad:
while (!list_empty(&list)) {
@@ -906,8 +918,6 @@ out_bad:
nfs_writedata_release(data);
}
nfs_redirty_request(req);
- nfs_end_page_writeback(req->wb_page);
- nfs_clear_page_tag_locked(req);
return -ENOMEM;
}
@@ -940,16 +950,12 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i
req = nfs_list_entry(data->pages.next);
/* Set up the argument struct */
- nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
-
- return 0;
+ return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
out_bad:
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_redirty_request(req);
- nfs_end_page_writeback(req->wb_page);
- nfs_clear_page_tag_locked(req);
}
return -ENOMEM;
}
@@ -972,7 +978,6 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_page *req = data->req;
- struct page *page = req->wb_page;
dprintk("NFS: write (%s/%Ld %d@%Ld)",
req->wb_context->path.dentry->d_inode->i_sb->s_id,
@@ -980,13 +985,20 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
req->wb_bytes,
(long long)req_offset(req));
- if (nfs_writeback_done(task, data) != 0)
- return;
+ nfs_writeback_done(task, data);
+}
- if (task->tk_status < 0) {
+static void nfs_writeback_release_partial(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_page *req = data->req;
+ struct page *page = req->wb_page;
+ int status = data->task.tk_status;
+
+ if (status < 0) {
nfs_set_pageerror(page);
- nfs_context_set_write_error(req->wb_context, task->tk_status);
- dprintk(", error = %d\n", task->tk_status);
+ nfs_context_set_write_error(req->wb_context, status);
+ dprintk(", error = %d\n", status);
goto out;
}
@@ -1011,11 +1023,12 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
out:
if (atomic_dec_and_test(&req->wb_complete))
nfs_writepage_release(req);
+ nfs_writedata_release(calldata);
}
static const struct rpc_call_ops nfs_write_partial_ops = {
.rpc_call_done = nfs_writeback_done_partial,
- .rpc_release = nfs_writedata_release,
+ .rpc_release = nfs_writeback_release_partial,
};
/*
@@ -1028,17 +1041,21 @@ static const struct rpc_call_ops nfs_write_partial_ops = {
static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
- struct nfs_page *req;
- struct page *page;
- if (nfs_writeback_done(task, data) != 0)
- return;
+ nfs_writeback_done(task, data);
+}
+
+static void nfs_writeback_release_full(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ int status = data->task.tk_status;
/* Update attributes as result of writeback. */
while (!list_empty(&data->pages)) {
- req = nfs_list_entry(data->pages.next);
+ struct nfs_page *req = nfs_list_entry(data->pages.next);
+ struct page *page = req->wb_page;
+
nfs_list_remove_request(req);
- page = req->wb_page;
dprintk("NFS: write (%s/%Ld %d@%Ld)",
req->wb_context->path.dentry->d_inode->i_sb->s_id,
@@ -1046,10 +1063,10 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
req->wb_bytes,
(long long)req_offset(req));
- if (task->tk_status < 0) {
+ if (status < 0) {
nfs_set_pageerror(page);
- nfs_context_set_write_error(req->wb_context, task->tk_status);
- dprintk(", error = %d\n", task->tk_status);
+ nfs_context_set_write_error(req->wb_context, status);
+ dprintk(", error = %d\n", status);
goto remove_request;
}
@@ -1069,11 +1086,12 @@ remove_request:
next:
nfs_clear_page_tag_locked(req);
}
+ nfs_writedata_release(calldata);
}
static const struct rpc_call_ops nfs_write_full_ops = {
.rpc_call_done = nfs_writeback_done_full,
- .rpc_release = nfs_writedata_release,
+ .rpc_release = nfs_writeback_release_full,
};
@@ -1159,15 +1177,18 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-void nfs_commit_release(void *wdata)
+void nfs_commitdata_release(void *data)
{
+ struct nfs_write_data *wdata = data;
+
+ put_nfs_open_context(wdata->args.context);
nfs_commit_free(wdata);
}
/*
* Set up the argument/result storage required for the RPC call.
*/
-static void nfs_commit_rpcsetup(struct list_head *head,
+static int nfs_commit_rpcsetup(struct list_head *head,
struct nfs_write_data *data,
int how)
{
@@ -1187,6 +1208,7 @@ static void nfs_commit_rpcsetup(struct list_head *head,
.rpc_message = &msg,
.callback_ops = &nfs_commit_ops,
.callback_data = data,
+ .workqueue = nfsiod_workqueue,
.flags = flags,
.priority = priority,
};
@@ -1203,6 +1225,7 @@ static void nfs_commit_rpcsetup(struct list_head *head,
/* Note: we always request a commit of the entire inode */
data->args.offset = 0;
data->args.count = 0;
+ data->args.context = get_nfs_open_context(first->wb_context);
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
@@ -1214,8 +1237,10 @@ static void nfs_commit_rpcsetup(struct list_head *head,
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ rpc_put_task(task);
+ return 0;
}
/*
@@ -1227,15 +1252,13 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
struct nfs_write_data *data;
struct nfs_page *req;
- data = nfs_commit_alloc();
+ data = nfs_commitdata_alloc();
if (!data)
goto out_bad;
/* Set up the argument struct */
- nfs_commit_rpcsetup(head, data, how);
-
- return 0;
+ return nfs_commit_rpcsetup(head, data, how);
out_bad:
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
@@ -1255,7 +1278,6 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
static void nfs_commit_done(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
- struct nfs_page *req;
dprintk("NFS: %5u nfs_commit_done (status %d)\n",
task->tk_pid, task->tk_status);
@@ -1263,6 +1285,13 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
/* Call the NFS version-specific code */
if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
return;
+}
+
+static void nfs_commit_release(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_page *req;
+ int status = data->task.tk_status;
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
@@ -1277,10 +1306,10 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
req->wb_bytes,
(long long)req_offset(req));
- if (task->tk_status < 0) {
- nfs_context_set_write_error(req->wb_context, task->tk_status);
+ if (status < 0) {
+ nfs_context_set_write_error(req->wb_context, status);
nfs_inode_remove_request(req);
- dprintk(", error = %d\n", task->tk_status);
+ dprintk(", error = %d\n", status);
goto next;
}
@@ -1297,10 +1326,11 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
}
/* We have a mismatch. Write the page again */
dprintk(" mismatch\n");
- nfs_redirty_request(req);
+ nfs_mark_request_dirty(req);
next:
nfs_clear_page_tag_locked(req);
}
+ nfs_commitdata_release(calldata);
}
static const struct rpc_call_ops nfs_commit_ops = {
@@ -1487,18 +1517,19 @@ static int nfs_wb_page_priority(struct inode *inode, struct page *page,
};
int ret;
- BUG_ON(!PageLocked(page));
- if (clear_page_dirty_for_io(page)) {
- ret = nfs_writepage_locked(page, &wbc);
+ do {
+ if (clear_page_dirty_for_io(page)) {
+ ret = nfs_writepage_locked(page, &wbc);
+ if (ret < 0)
+ goto out_error;
+ } else if (!PagePrivate(page))
+ break;
+ ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
if (ret < 0)
- goto out;
- }
- if (!PagePrivate(page))
- return 0;
- ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
- if (ret >= 0)
- return 0;
-out:
+ goto out_error;
+ } while (PagePrivate(page));
+ return 0;
+out_error:
__mark_inode_dirty(inode, I_DIRTY_PAGES);
return ret;
}
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index d13403e3362..294992e9bf6 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -10,6 +10,7 @@
#include <linux/sunrpc/svcauth.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/export.h>
+#include "auth.h"
int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
{
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8a6f7c924c7..33bfcf09db4 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -35,6 +35,7 @@
#include <linux/lockd/bind.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/gss_api.h>
+#include <net/ipv6.h>
#define NFSDDBG_FACILITY NFSDDBG_EXPORT
@@ -1548,6 +1549,7 @@ exp_addclient(struct nfsctl_client *ncp)
{
struct auth_domain *dom;
int i, err;
+ struct in6_addr addr6;
/* First, consistency check. */
err = -EINVAL;
@@ -1566,9 +1568,10 @@ exp_addclient(struct nfsctl_client *ncp)
goto out_unlock;
/* Insert client into hashtable. */
- for (i = 0; i < ncp->cl_naddr; i++)
- auth_unix_add_addr(ncp->cl_addrlist[i], dom);
-
+ for (i = 0; i < ncp->cl_naddr; i++) {
+ ipv6_addr_set_v4mapped(ncp->cl_addrlist[i].s_addr, &addr6);
+ auth_unix_add_addr(&addr6, dom);
+ }
auth_unix_forget_old(dom);
auth_domain_put(dom);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index aae2b29ae2c..562abf3380d 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -344,6 +344,21 @@ static struct rpc_version * nfs_cb_version[] = {
&nfs_cb_version4,
};
+static struct rpc_program cb_program;
+
+static struct rpc_stat cb_stats = {
+ .program = &cb_program
+};
+
+#define NFS4_CALLBACK 0x40000000
+static struct rpc_program cb_program = {
+ .name = "nfs4_cb",
+ .number = NFS4_CALLBACK,
+ .nrvers = ARRAY_SIZE(nfs_cb_version),
+ .version = nfs_cb_version,
+ .stats = &cb_stats,
+};
+
/* Reference counting, callback cleanup, etc., all look racy as heck.
* And why is cb_set an atomic? */
@@ -358,13 +373,12 @@ static int do_probe_callback(void *data)
.to_maxval = (NFSD_LEASE_TIME/2) * HZ,
.to_exponential = 1,
};
- struct rpc_program * program = &cb->cb_program;
struct rpc_create_args args = {
.protocol = IPPROTO_TCP,
.address = (struct sockaddr *)&addr,
.addrsize = sizeof(addr),
.timeout = &timeparms,
- .program = program,
+ .program = &cb_program,
.version = nfs_cb_version[1]->number,
.authflavor = RPC_AUTH_UNIX, /* XXX: need AUTH_GSS... */
.flags = (RPC_CLNT_CREATE_NOPING),
@@ -382,16 +396,8 @@ static int do_probe_callback(void *data)
addr.sin_port = htons(cb->cb_port);
addr.sin_addr.s_addr = htonl(cb->cb_addr);
- /* Initialize rpc_program */
- program->name = "nfs4_cb";
- program->number = cb->cb_prog;
- program->nrvers = ARRAY_SIZE(nfs_cb_version);
- program->version = nfs_cb_version;
- program->stats = &cb->cb_stat;
-
/* Initialize rpc_stat */
- memset(program->stats, 0, sizeof(cb->cb_stat));
- program->stats->program = program;
+ memset(args.program->stats, 0, sizeof(struct rpc_stat));
/* Create RPC client */
client = rpc_create(&args);
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 996bd88b75b..5b398421b05 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -202,7 +202,7 @@ static struct cache_detail idtoname_cache = {
.alloc = ent_alloc,
};
-int
+static int
idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
{
struct ent ent, *res;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 81a75f3081f..8799b870818 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1639,6 +1639,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
locks_init_lock(&fl);
fl.fl_lmops = &nfsd_lease_mng_ops;
fl.fl_flags = FL_LEASE;
+ fl.fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
fl.fl_end = OFFSET_MAX;
fl.fl_owner = (fl_owner_t)dp;
fl.fl_file = stp->st_vfs_file;
@@ -1647,8 +1648,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
/* vfs_setlease checks to see if delegation should be handed out.
* the lock_manager callbacks fl_mylease and fl_change are used
*/
- if ((status = vfs_setlease(stp->st_vfs_file,
- flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK, &flp))) {
+ if ((status = vfs_setlease(stp->st_vfs_file, fl.fl_type, &flp))) {
dprintk("NFSD: setlease failed [%d], no delegation\n", status);
unhash_delegation(dp);
flag = NFS4_OPEN_DELEGATE_NONE;
@@ -1763,10 +1763,6 @@ out:
return status;
}
-static struct workqueue_struct *laundry_wq;
-static void laundromat_main(struct work_struct *);
-static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
-
__be32
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
clientid_t *clid)
@@ -1874,7 +1870,11 @@ nfs4_laundromat(void)
return clientid_val;
}
-void
+static struct workqueue_struct *laundry_wq;
+static void laundromat_main(struct work_struct *);
+static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
+
+static void
laundromat_main(struct work_struct *not_used)
{
time_t t;
@@ -1975,6 +1975,26 @@ io_during_grace_disallowed(struct inode *inode, int flags)
&& mandatory_lock(inode);
}
+static int check_stateid_generation(stateid_t *in, stateid_t *ref)
+{
+ /* If the client sends us a stateid from the future, it's buggy: */
+ if (in->si_generation > ref->si_generation)
+ return nfserr_bad_stateid;
+ /*
+ * The following, however, can happen. For example, if the
+ * client sends an open and some IO at the same time, the open
+ * may bump si_generation while the IO is still in flight.
+ * Thanks to hard links and renames, the client never knows what
+ * file an open will affect. So it could avoid that situation
+ * only by serializing all opens and IO from the same open
+ * owner. To recover from the old_stateid error, the client
+ * will just have to retry the IO:
+ */
+ if (in->si_generation < ref->si_generation)
+ return nfserr_old_stateid;
+ return nfs_ok;
+}
+
/*
* Checks for stateid operations
*/
@@ -2023,12 +2043,8 @@ nfs4_preprocess_stateid_op(struct svc_fh *current_fh, stateid_t *stateid, int fl
goto out;
stidp = &stp->st_stateid;
}
- if (stateid->si_generation > stidp->si_generation)
- goto out;
-
- /* OLD STATEID */
- status = nfserr_old_stateid;
- if (stateid->si_generation < stidp->si_generation)
+ status = check_stateid_generation(stateid, stidp);
+ if (status)
goto out;
if (stp) {
if ((status = nfs4_check_openmode(stp,flags)))
@@ -2036,7 +2052,7 @@ nfs4_preprocess_stateid_op(struct svc_fh *current_fh, stateid_t *stateid, int fl
renew_client(stp->st_stateowner->so_client);
if (filpp)
*filpp = stp->st_vfs_file;
- } else if (dp) {
+ } else {
if ((status = nfs4_check_delegmode(dp, flags)))
goto out;
renew_client(dp->dl_client);
@@ -2065,6 +2081,7 @@ nfs4_preprocess_seqid_op(struct svc_fh *current_fh, u32 seqid, stateid_t *statei
{
struct nfs4_stateid *stp;
struct nfs4_stateowner *sop;
+ __be32 status;
dprintk("NFSD: preprocess_seqid_op: seqid=%d "
"stateid = (%08x/%08x/%08x/%08x)\n", seqid,
@@ -2127,7 +2144,7 @@ nfs4_preprocess_seqid_op(struct svc_fh *current_fh, u32 seqid, stateid_t *statei
}
}
- if ((flags & CHECK_FH) && nfs4_check_fh(current_fh, stp)) {
+ if (nfs4_check_fh(current_fh, stp)) {
dprintk("NFSD: preprocess_seqid_op: fh-stateid mismatch!\n");
return nfserr_bad_stateid;
}
@@ -2150,15 +2167,9 @@ nfs4_preprocess_seqid_op(struct svc_fh *current_fh, u32 seqid, stateid_t *statei
" confirmed yet!\n");
return nfserr_bad_stateid;
}
- if (stateid->si_generation > stp->st_stateid.si_generation) {
- dprintk("NFSD: preprocess_seqid_op: future stateid?!\n");
- return nfserr_bad_stateid;
- }
-
- if (stateid->si_generation < stp->st_stateid.si_generation) {
- dprintk("NFSD: preprocess_seqid_op: old stateid!\n");
- return nfserr_old_stateid;
- }
+ status = check_stateid_generation(stateid, &stp->st_stateid);
+ if (status)
+ return status;
renew_client(sop->so_client);
return nfs_ok;
@@ -2194,7 +2205,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
oc->oc_seqid, &oc->oc_req_stateid,
- CHECK_FH | CONFIRM | OPEN_STATE,
+ CONFIRM | OPEN_STATE,
&oc->oc_stateowner, &stp, NULL)))
goto out;
@@ -2265,7 +2276,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
od->od_seqid,
&od->od_stateid,
- CHECK_FH | OPEN_STATE,
+ OPEN_STATE,
&od->od_stateowner, &stp, NULL)))
goto out;
@@ -2318,7 +2329,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
close->cl_seqid,
&close->cl_stateid,
- CHECK_FH | OPEN_STATE | CLOSE_STATE,
+ OPEN_STATE | CLOSE_STATE,
&close->cl_stateowner, &stp, NULL)))
goto out;
status = nfs_ok;
@@ -2623,7 +2634,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_seqid_op(&cstate->current_fh,
lock->lk_new_open_seqid,
&lock->lk_new_open_stateid,
- CHECK_FH | OPEN_STATE,
+ OPEN_STATE,
&lock->lk_replay_owner, &open_stp,
lock);
if (status)
@@ -2650,7 +2661,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_seqid_op(&cstate->current_fh,
lock->lk_old_lock_seqid,
&lock->lk_old_lock_stateid,
- CHECK_FH | LOCK_STATE,
+ LOCK_STATE,
&lock->lk_replay_owner, &lock_stp, lock);
if (status)
goto out;
@@ -2701,9 +2712,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* Note: locks.c uses the BKL to protect the inode's lock list.
*/
- /* XXX?: Just to divert the locks_release_private at the start of
- * locks_copy_lock: */
- locks_init_lock(&conflock);
err = vfs_lock_file(filp, cmd, &file_lock, &conflock);
switch (-err) {
case 0: /* success! */
@@ -2847,7 +2855,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
locku->lu_seqid,
&locku->lu_stateid,
- CHECK_FH | LOCK_STATE,
+ LOCK_STATE,
&locku->lu_stateowner, &stp, NULL)))
goto out;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0e6a179ecca..c513bbdf2d3 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -376,20 +376,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, struct iattr *ia
goto xdr_error;
}
}
- if (bmval[1] & FATTR4_WORD1_TIME_METADATA) {
- /* We require the high 32 bits of 'seconds' to be 0, and we ignore
- all 32 bits of 'nseconds'. */
- READ_BUF(12);
- len += 12;
- READ32(dummy32);
- if (dummy32)
- return nfserr_inval;
- READ32(iattr->ia_ctime.tv_sec);
- READ32(iattr->ia_ctime.tv_nsec);
- if (iattr->ia_ctime.tv_nsec >= (u32)1000000000)
- return nfserr_inval;
- iattr->ia_valid |= ATTR_CTIME;
- }
if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
READ_BUF(4);
len += 4;
@@ -1867,6 +1853,15 @@ out_serverfault:
goto out;
}
+static inline int attributes_need_mount(u32 *bmval)
+{
+ if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME))
+ return 1;
+ if (bmval[1] & ~FATTR4_WORD1_MOUNTED_ON_FILEID)
+ return 1;
+ return 0;
+}
+
static __be32
nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
const char *name, int namlen, __be32 *p, int *buflen)
@@ -1888,9 +1883,7 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
* we will not follow the cross mount and will fill the attribtutes
* directly from the mountpoint dentry.
*/
- if (d_mountpoint(dentry) &&
- (cd->rd_bmval[0] & ~FATTR4_WORD0_RDATTR_ERROR) == 0 &&
- (cd->rd_bmval[1] & ~FATTR4_WORD1_MOUNTED_ON_FILEID) == 0)
+ if (d_mountpoint(dentry) && !attributes_need_mount(cd->rd_bmval))
ignore_crossmnt = 1;
else if (d_mountpoint(dentry)) {
int err;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 8516137cdbb..42f3820ee8f 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/init.h>
+#include <linux/inet.h>
#include <linux/string.h>
#include <linux/smp_lock.h>
#include <linux/ctype.h>
@@ -35,8 +36,10 @@
#include <linux/nfsd/cache.h>
#include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h>
+#include <linux/lockd/lockd.h>
#include <asm/uaccess.h>
+#include <net/ipv6.h>
/*
* We have a single directory with 9 nodes in it.
@@ -52,6 +55,8 @@ enum {
NFSD_Getfs,
NFSD_List,
NFSD_Fh,
+ NFSD_FO_UnlockIP,
+ NFSD_FO_UnlockFS,
NFSD_Threads,
NFSD_Pool_Threads,
NFSD_Versions,
@@ -88,6 +93,9 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
#endif
+static ssize_t failover_unlock_ip(struct file *file, char *buf, size_t size);
+static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size);
+
static ssize_t (*write_op[])(struct file *, char *, size_t) = {
[NFSD_Svc] = write_svc,
[NFSD_Add] = write_add,
@@ -97,6 +105,8 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
[NFSD_Getfd] = write_getfd,
[NFSD_Getfs] = write_getfs,
[NFSD_Fh] = write_filehandle,
+ [NFSD_FO_UnlockIP] = failover_unlock_ip,
+ [NFSD_FO_UnlockFS] = failover_unlock_fs,
[NFSD_Threads] = write_threads,
[NFSD_Pool_Threads] = write_pool_threads,
[NFSD_Versions] = write_versions,
@@ -149,7 +159,6 @@ static const struct file_operations transaction_ops = {
.release = simple_transaction_release,
};
-extern struct seq_operations nfs_exports_op;
static int exports_open(struct inode *inode, struct file *file)
{
return seq_open(file, &nfs_exports_op);
@@ -222,6 +231,7 @@ static ssize_t write_getfs(struct file *file, char *buf, size_t size)
struct auth_domain *clp;
int err = 0;
struct knfsd_fh *res;
+ struct in6_addr in6;
if (size < sizeof(*data))
return -EINVAL;
@@ -236,7 +246,11 @@ static ssize_t write_getfs(struct file *file, char *buf, size_t size)
res = (struct knfsd_fh*)buf;
exp_readlock();
- if (!(clp = auth_unix_lookup(sin->sin_addr)))
+
+ ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &in6);
+
+ clp = auth_unix_lookup(&in6);
+ if (!clp)
err = -EPERM;
else {
err = exp_rootfh(clp, data->gd_path, res, data->gd_maxlen);
@@ -257,6 +271,7 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
int err = 0;
struct knfsd_fh fh;
char *res;
+ struct in6_addr in6;
if (size < sizeof(*data))
return -EINVAL;
@@ -271,7 +286,11 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
res = buf;
sin = (struct sockaddr_in *)&data->gd_addr;
exp_readlock();
- if (!(clp = auth_unix_lookup(sin->sin_addr)))
+
+ ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &in6);
+
+ clp = auth_unix_lookup(&in6);
+ if (!clp)
err = -EPERM;
else {
err = exp_rootfh(clp, data->gd_path, &fh, NFS_FHSIZE);
@@ -288,6 +307,58 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
return err;
}
+static ssize_t failover_unlock_ip(struct file *file, char *buf, size_t size)
+{
+ __be32 server_ip;
+ char *fo_path, c;
+ int b1, b2, b3, b4;
+
+ /* sanity check */
+ if (size == 0)
+ return -EINVAL;
+
+ if (buf[size-1] != '\n')
+ return -EINVAL;
+
+ fo_path = buf;
+ if (qword_get(&buf, fo_path, size) < 0)
+ return -EINVAL;
+
+ /* get ipv4 address */
+ if (sscanf(fo_path, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
+ return -EINVAL;
+ server_ip = htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
+
+ return nlmsvc_unlock_all_by_ip(server_ip);
+}
+
+static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
+{
+ struct nameidata nd;
+ char *fo_path;
+ int error;
+
+ /* sanity check */
+ if (size == 0)
+ return -EINVAL;
+
+ if (buf[size-1] != '\n')
+ return -EINVAL;
+
+ fo_path = buf;
+ if (qword_get(&buf, fo_path, size) < 0)
+ return -EINVAL;
+
+ error = path_lookup(fo_path, 0, &nd);
+ if (error)
+ return error;
+
+ error = nlmsvc_unlock_all_by_sb(nd.path.mnt->mnt_sb);
+
+ path_put(&nd.path);
+ return error;
+}
+
static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
{
/* request is:
@@ -347,8 +418,6 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
return mesg - buf;
}
-extern int nfsd_nrthreads(void);
-
static ssize_t write_threads(struct file *file, char *buf, size_t size)
{
/* if size > 0, look for a number of threads and call nfsd_svc
@@ -371,10 +440,6 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
return strlen(buf);
}
-extern int nfsd_nrpools(void);
-extern int nfsd_get_nrthreads(int n, int *);
-extern int nfsd_set_nrthreads(int n, int *);
-
static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
{
/* if size > 0, look for an array of number of threads per node
@@ -696,6 +761,10 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_List] = {"exports", &exports_operations, S_IRUGO},
+ [NFSD_FO_UnlockIP] = {"unlock_ip",
+ &transaction_ops, S_IWUSR|S_IRUSR},
+ [NFSD_FO_UnlockFS] = {"unlock_filesystem",
+ &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 3e6b3f41ee1..100ae564116 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -113,6 +113,124 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
}
/*
+ * Use the given filehandle to look up the corresponding export and
+ * dentry. On success, the results are used to set fh_export and
+ * fh_dentry.
+ */
+static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
+{
+ struct knfsd_fh *fh = &fhp->fh_handle;
+ struct fid *fid = NULL, sfid;
+ struct svc_export *exp;
+ struct dentry *dentry;
+ int fileid_type;
+ int data_left = fh->fh_size/4;
+ __be32 error;
+
+ error = nfserr_stale;
+ if (rqstp->rq_vers > 2)
+ error = nfserr_badhandle;
+ if (rqstp->rq_vers == 4 && fh->fh_size == 0)
+ return nfserr_nofilehandle;
+
+ if (fh->fh_version == 1) {
+ int len;
+
+ if (--data_left < 0)
+ return error;
+ if (fh->fh_auth_type != 0)
+ return error;
+ len = key_len(fh->fh_fsid_type) / 4;
+ if (len == 0)
+ return error;
+ if (fh->fh_fsid_type == FSID_MAJOR_MINOR) {
+ /* deprecated, convert to type 3 */
+ len = key_len(FSID_ENCODE_DEV)/4;
+ fh->fh_fsid_type = FSID_ENCODE_DEV;
+ fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl(fh->fh_fsid[0]), ntohl(fh->fh_fsid[1])));
+ fh->fh_fsid[1] = fh->fh_fsid[2];
+ }
+ data_left -= len;
+ if (data_left < 0)
+ return error;
+ exp = rqst_exp_find(rqstp, fh->fh_fsid_type, fh->fh_auth);
+ fid = (struct fid *)(fh->fh_auth + len);
+ } else {
+ __u32 tfh[2];
+ dev_t xdev;
+ ino_t xino;
+
+ if (fh->fh_size != NFS_FHSIZE)
+ return error;
+ /* assume old filehandle format */
+ xdev = old_decode_dev(fh->ofh_xdev);
+ xino = u32_to_ino_t(fh->ofh_xino);
+ mk_fsid(FSID_DEV, tfh, xdev, xino, 0, NULL);
+ exp = rqst_exp_find(rqstp, FSID_DEV, tfh);
+ }
+
+ error = nfserr_stale;
+ if (PTR_ERR(exp) == -ENOENT)
+ return error;
+
+ if (IS_ERR(exp))
+ return nfserrno(PTR_ERR(exp));
+
+ error = nfsd_setuser_and_check_port(rqstp, exp);
+ if (error)
+ goto out;
+
+ /*
+ * Look up the dentry using the NFS file handle.
+ */
+ error = nfserr_stale;
+ if (rqstp->rq_vers > 2)
+ error = nfserr_badhandle;
+
+ if (fh->fh_version != 1) {
+ sfid.i32.ino = fh->ofh_ino;
+ sfid.i32.gen = fh->ofh_generation;
+ sfid.i32.parent_ino = fh->ofh_dirino;
+ fid = &sfid;
+ data_left = 3;
+ if (fh->ofh_dirino == 0)
+ fileid_type = FILEID_INO32_GEN;
+ else
+ fileid_type = FILEID_INO32_GEN_PARENT;
+ } else
+ fileid_type = fh->fh_fileid_type;
+
+ if (fileid_type == FILEID_ROOT)
+ dentry = dget(exp->ex_path.dentry);
+ else {
+ dentry = exportfs_decode_fh(exp->ex_path.mnt, fid,
+ data_left, fileid_type,
+ nfsd_acceptable, exp);
+ }
+ if (dentry == NULL)
+ goto out;
+ if (IS_ERR(dentry)) {
+ if (PTR_ERR(dentry) != -EINVAL)
+ error = nfserrno(PTR_ERR(dentry));
+ goto out;
+ }
+
+ if (S_ISDIR(dentry->d_inode->i_mode) &&
+ (dentry->d_flags & DCACHE_DISCONNECTED)) {
+ printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+ }
+
+ fhp->fh_dentry = dentry;
+ fhp->fh_export = exp;
+ nfsd_nr_verified++;
+ return 0;
+out:
+ exp_put(exp);
+ return error;
+}
+
+/*
* Perform sanity checks on the dentry in a client's file handle.
*
* Note that the file handle dentry may need to be freed even after
@@ -124,115 +242,18 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
__be32
fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
{
- struct knfsd_fh *fh = &fhp->fh_handle;
- struct svc_export *exp = NULL;
+ struct svc_export *exp;
struct dentry *dentry;
- __be32 error = 0;
+ __be32 error;
dprintk("nfsd: fh_verify(%s)\n", SVCFH_fmt(fhp));
if (!fhp->fh_dentry) {
- struct fid *fid = NULL, sfid;
- int fileid_type;
- int data_left = fh->fh_size/4;
-
- error = nfserr_stale;
- if (rqstp->rq_vers > 2)
- error = nfserr_badhandle;
- if (rqstp->rq_vers == 4 && fh->fh_size == 0)
- return nfserr_nofilehandle;
-
- if (fh->fh_version == 1) {
- int len;
- if (--data_left<0) goto out;
- switch (fh->fh_auth_type) {
- case 0: break;
- default: goto out;
- }
- len = key_len(fh->fh_fsid_type) / 4;
- if (len == 0) goto out;
- if (fh->fh_fsid_type == FSID_MAJOR_MINOR) {
- /* deprecated, convert to type 3 */
- len = key_len(FSID_ENCODE_DEV)/4;
- fh->fh_fsid_type = FSID_ENCODE_DEV;
- fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl(fh->fh_fsid[0]), ntohl(fh->fh_fsid[1])));
- fh->fh_fsid[1] = fh->fh_fsid[2];
- }
- if ((data_left -= len)<0) goto out;
- exp = rqst_exp_find(rqstp, fh->fh_fsid_type,
- fh->fh_auth);
- fid = (struct fid *)(fh->fh_auth + len);
- } else {
- __u32 tfh[2];
- dev_t xdev;
- ino_t xino;
- if (fh->fh_size != NFS_FHSIZE)
- goto out;
- /* assume old filehandle format */
- xdev = old_decode_dev(fh->ofh_xdev);
- xino = u32_to_ino_t(fh->ofh_xino);
- mk_fsid(FSID_DEV, tfh, xdev, xino, 0, NULL);
- exp = rqst_exp_find(rqstp, FSID_DEV, tfh);
- }
-
- error = nfserr_stale;
- if (PTR_ERR(exp) == -ENOENT)
- goto out;
-
- if (IS_ERR(exp)) {
- error = nfserrno(PTR_ERR(exp));
- goto out;
- }
-
- error = nfsd_setuser_and_check_port(rqstp, exp);
+ error = nfsd_set_fh_dentry(rqstp, fhp);
if (error)
goto out;
-
- /*
- * Look up the dentry using the NFS file handle.
- */
- error = nfserr_stale;
- if (rqstp->rq_vers > 2)
- error = nfserr_badhandle;
-
- if (fh->fh_version != 1) {
- sfid.i32.ino = fh->ofh_ino;
- sfid.i32.gen = fh->ofh_generation;
- sfid.i32.parent_ino = fh->ofh_dirino;
- fid = &sfid;
- data_left = 3;
- if (fh->ofh_dirino == 0)
- fileid_type = FILEID_INO32_GEN;
- else
- fileid_type = FILEID_INO32_GEN_PARENT;
- } else
- fileid_type = fh->fh_fileid_type;
-
- if (fileid_type == FILEID_ROOT)
- dentry = dget(exp->ex_path.dentry);
- else {
- dentry = exportfs_decode_fh(exp->ex_path.mnt, fid,
- data_left, fileid_type,
- nfsd_acceptable, exp);
- }
- if (dentry == NULL)
- goto out;
- if (IS_ERR(dentry)) {
- if (PTR_ERR(dentry) != -EINVAL)
- error = nfserrno(PTR_ERR(dentry));
- goto out;
- }
-
- if (S_ISDIR(dentry->d_inode->i_mode) &&
- (dentry->d_flags & DCACHE_DISCONNECTED)) {
- printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
- }
-
- fhp->fh_dentry = dentry;
- fhp->fh_export = exp;
- nfsd_nr_verified++;
- cache_get(&exp->h);
+ dentry = fhp->fh_dentry;
+ exp = fhp->fh_export;
} else {
/*
* just rechecking permissions
@@ -242,7 +263,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
dprintk("nfsd: fh_verify - just checking\n");
dentry = fhp->fh_dentry;
exp = fhp->fh_export;
- cache_get(&exp->h);
/*
* Set user creds for this exportpoint; necessary even
* in the "just checking" case because this may be a
@@ -281,8 +301,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
access, ntohl(error));
}
out:
- if (exp && !IS_ERR(exp))
- exp_put(exp);
if (error == nfserr_stale)
nfsdstats.fh_stale++;
return error;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 9647b0f7bc0..941041f4b13 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -244,7 +244,6 @@ static int nfsd_init_socks(int port)
if (error < 0)
return error;
-#ifdef CONFIG_NFSD_TCP
error = lockd_up(IPPROTO_TCP);
if (error >= 0) {
error = svc_create_xprt(nfsd_serv, "tcp", port,
@@ -254,7 +253,6 @@ static int nfsd_init_socks(int port)
}
if (error < 0)
return error;
-#endif
return 0;
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 304bf5f643c..a3a291f771f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -264,7 +264,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
struct inode *inode;
int accmode = MAY_SATTR;
int ftype = 0;
- int imode;
__be32 err;
int host_err;
int size_change = 0;
@@ -360,25 +359,25 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
DQUOT_INIT(inode);
}
- imode = inode->i_mode;
+ /* sanitize the mode change */
if (iap->ia_valid & ATTR_MODE) {
iap->ia_mode &= S_IALLUGO;
- imode = iap->ia_mode |= (imode & ~S_IALLUGO);
- /* if changing uid/gid revoke setuid/setgid in mode */
- if ((iap->ia_valid & ATTR_UID) && iap->ia_uid != inode->i_uid) {
- iap->ia_valid |= ATTR_KILL_PRIV;
+ iap->ia_mode |= (inode->i_mode & ~S_IALLUGO);
+ }
+
+ /* Revoke setuid/setgid on chown */
+ if (((iap->ia_valid & ATTR_UID) && iap->ia_uid != inode->i_uid) ||
+ ((iap->ia_valid & ATTR_GID) && iap->ia_gid != inode->i_gid)) {
+ iap->ia_valid |= ATTR_KILL_PRIV;
+ if (iap->ia_valid & ATTR_MODE) {
+ /* we're setting mode too, just clear the s*id bits */
iap->ia_mode &= ~S_ISUID;
+ if (iap->ia_mode & S_IXGRP)
+ iap->ia_mode &= ~S_ISGID;
+ } else {
+ /* set ATTR_KILL_* bits and let VFS handle it */
+ iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
}
- if ((iap->ia_valid & ATTR_GID) && iap->ia_gid != inode->i_gid)
- iap->ia_mode &= ~S_ISGID;
- } else {
- /*
- * Revoke setuid/setgid bit on chown/chgrp
- */
- if ((iap->ia_valid & ATTR_UID) && iap->ia_uid != inode->i_uid)
- iap->ia_valid |= ATTR_KILL_SUID | ATTR_KILL_PRIV;
- if ((iap->ia_valid & ATTR_GID) && iap->ia_gid != inode->i_gid)
- iap->ia_valid |= ATTR_KILL_SGID;
}
/* Change the attributes. */
@@ -988,7 +987,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
* flushing the data to disk is handled separately below.
*/
- if (file->f_op->fsync == 0) {/* COMMIT3 cannot work */
+ if (!file->f_op->fsync) {/* COMMIT3 cannot work */
stable = 2;
*stablep = 2; /* FILE_SYNC */
}
@@ -1152,7 +1151,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
#endif /* CONFIG_NFSD_V3 */
-__be32
+static __be32
nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp,
struct iattr *iap)
{
diff --git a/fs/pipe.c b/fs/pipe.c
index 8be381bbcb5..f73492b6817 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -988,7 +988,10 @@ struct file *create_write_pipe(void)
return f;
err_dentry:
+ free_pipe_info(inode);
dput(dentry);
+ return ERR_PTR(err);
+
err_inode:
free_pipe_info(inode);
iput(inode);
diff --git a/fs/pnode.c b/fs/pnode.c
index 1d8f5447f3f..8d5f392ec3d 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -9,6 +9,7 @@
#include <linux/mnt_namespace.h>
#include <linux/mount.h>
#include <linux/fs.h>
+#include "internal.h"
#include "pnode.h"
/* return the next shared peer mount of @p */
@@ -27,6 +28,57 @@ static inline struct vfsmount *next_slave(struct vfsmount *p)
return list_entry(p->mnt_slave.next, struct vfsmount, mnt_slave);
}
+/*
+ * Return true if path is reachable from root
+ *
+ * namespace_sem is held, and mnt is attached
+ */
+static bool is_path_reachable(struct vfsmount *mnt, struct dentry *dentry,
+ const struct path *root)
+{
+ while (mnt != root->mnt && mnt->mnt_parent != mnt) {
+ dentry = mnt->mnt_mountpoint;
+ mnt = mnt->mnt_parent;
+ }
+ return mnt == root->mnt && is_subdir(dentry, root->dentry);
+}
+
+static struct vfsmount *get_peer_under_root(struct vfsmount *mnt,
+ struct mnt_namespace *ns,
+ const struct path *root)
+{
+ struct vfsmount *m = mnt;
+
+ do {
+ /* Check the namespace first for optimization */
+ if (m->mnt_ns == ns && is_path_reachable(m, m->mnt_root, root))
+ return m;
+
+ m = next_peer(m);
+ } while (m != mnt);
+
+ return NULL;
+}
+
+/*
+ * Get ID of closest dominating peer group having a representative
+ * under the given root.
+ *
+ * Caller must hold namespace_sem
+ */
+int get_dominating_id(struct vfsmount *mnt, const struct path *root)
+{
+ struct vfsmount *m;
+
+ for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
+ struct vfsmount *d = get_peer_under_root(m, mnt->mnt_ns, root);
+ if (d)
+ return d->mnt_group_id;
+ }
+
+ return 0;
+}
+
static int do_make_slave(struct vfsmount *mnt)
{
struct vfsmount *peer_mnt = mnt, *master = mnt->mnt_master;
@@ -45,7 +97,11 @@ static int do_make_slave(struct vfsmount *mnt)
if (peer_mnt == mnt)
peer_mnt = NULL;
}
+ if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share))
+ mnt_release_group_id(mnt);
+
list_del_init(&mnt->mnt_share);
+ mnt->mnt_group_id = 0;
if (peer_mnt)
master = peer_mnt;
@@ -67,7 +123,6 @@ static int do_make_slave(struct vfsmount *mnt)
}
mnt->mnt_master = master;
CLEAR_MNT_SHARED(mnt);
- INIT_LIST_HEAD(&mnt->mnt_slave_list);
return 0;
}
@@ -211,8 +266,7 @@ int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
out:
spin_lock(&vfsmount_lock);
while (!list_empty(&tmp_list)) {
- child = list_entry(tmp_list.next, struct vfsmount, mnt_hash);
- list_del_init(&child->mnt_hash);
+ child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash);
umount_tree(child, 0, &umount_list);
}
spin_unlock(&vfsmount_lock);
diff --git a/fs/pnode.h b/fs/pnode.h
index f249be2fee7..958665d662a 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -35,4 +35,6 @@ int propagate_mnt(struct vfsmount *, struct dentry *, struct vfsmount *,
struct list_head *);
int propagate_umount(struct list_head *);
int propagate_mount_busy(struct vfsmount *, int);
+void mnt_release_group_id(struct vfsmount *);
+int get_dominating_id(struct vfsmount *mnt, const struct path *root);
#endif /* _LINUX_PNODE_H */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 81d7d145292..c5e412a00b1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -502,17 +502,14 @@ static const struct inode_operations proc_def_inode_operations = {
.setattr = proc_setattr,
};
-extern const struct seq_operations mounts_op;
-struct proc_mounts {
- struct seq_file m;
- int event;
-};
-
-static int mounts_open(struct inode *inode, struct file *file)
+static int mounts_open_common(struct inode *inode, struct file *file,
+ const struct seq_operations *op)
{
struct task_struct *task = get_proc_task(inode);
struct nsproxy *nsp;
struct mnt_namespace *ns = NULL;
+ struct fs_struct *fs = NULL;
+ struct path root;
struct proc_mounts *p;
int ret = -EINVAL;
@@ -525,40 +522,61 @@ static int mounts_open(struct inode *inode, struct file *file)
get_mnt_ns(ns);
}
rcu_read_unlock();
-
+ if (ns)
+ fs = get_fs_struct(task);
put_task_struct(task);
}
- if (ns) {
- ret = -ENOMEM;
- p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
- if (p) {
- file->private_data = &p->m;
- ret = seq_open(file, &mounts_op);
- if (!ret) {
- p->m.private = ns;
- p->event = ns->event;
- return 0;
- }
- kfree(p);
- }
- put_mnt_ns(ns);
- }
+ if (!ns)
+ goto err;
+ if (!fs)
+ goto err_put_ns;
+
+ read_lock(&fs->lock);
+ root = fs->root;
+ path_get(&root);
+ read_unlock(&fs->lock);
+ put_fs_struct(fs);
+
+ ret = -ENOMEM;
+ p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
+ if (!p)
+ goto err_put_path;
+
+ file->private_data = &p->m;
+ ret = seq_open(file, op);
+ if (ret)
+ goto err_free;
+
+ p->m.private = p;
+ p->ns = ns;
+ p->root = root;
+ p->event = ns->event;
+
+ return 0;
+
+ err_free:
+ kfree(p);
+ err_put_path:
+ path_put(&root);
+ err_put_ns:
+ put_mnt_ns(ns);
+ err:
return ret;
}
static int mounts_release(struct inode *inode, struct file *file)
{
- struct seq_file *m = file->private_data;
- struct mnt_namespace *ns = m->private;
- put_mnt_ns(ns);
+ struct proc_mounts *p = file->private_data;
+ path_put(&p->root);
+ put_mnt_ns(p->ns);
return seq_release(inode, file);
}
static unsigned mounts_poll(struct file *file, poll_table *wait)
{
struct proc_mounts *p = file->private_data;
- struct mnt_namespace *ns = p->m.private;
+ struct mnt_namespace *ns = p->ns;
unsigned res = 0;
poll_wait(file, &ns->poll, wait);
@@ -573,6 +591,11 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
return res;
}
+static int mounts_open(struct inode *inode, struct file *file)
+{
+ return mounts_open_common(inode, file, &mounts_op);
+}
+
static const struct file_operations proc_mounts_operations = {
.open = mounts_open,
.read = seq_read,
@@ -581,38 +604,22 @@ static const struct file_operations proc_mounts_operations = {
.poll = mounts_poll,
};
-extern const struct seq_operations mountstats_op;
-static int mountstats_open(struct inode *inode, struct file *file)
+static int mountinfo_open(struct inode *inode, struct file *file)
{
- int ret = seq_open(file, &mountstats_op);
-
- if (!ret) {
- struct seq_file *m = file->private_data;
- struct nsproxy *nsp;
- struct mnt_namespace *mnt_ns = NULL;
- struct task_struct *task = get_proc_task(inode);
-
- if (task) {
- rcu_read_lock();
- nsp = task_nsproxy(task);
- if (nsp) {
- mnt_ns = nsp->mnt_ns;
- if (mnt_ns)
- get_mnt_ns(mnt_ns);
- }
- rcu_read_unlock();
+ return mounts_open_common(inode, file, &mountinfo_op);
+}
- put_task_struct(task);
- }
+static const struct file_operations proc_mountinfo_operations = {
+ .open = mountinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mounts_release,
+ .poll = mounts_poll,
+};
- if (mnt_ns)
- m->private = mnt_ns;
- else {
- seq_release(inode, file);
- ret = -EINVAL;
- }
- }
- return ret;
+static int mountstats_open(struct inode *inode, struct file *file)
+{
+ return mounts_open_common(inode, file, &mountstats_op);
}
static const struct file_operations proc_mountstats_operations = {
@@ -1626,7 +1633,6 @@ static int proc_readfd_common(struct file * filp, void * dirent,
unsigned int fd, ino;
int retval;
struct files_struct * files;
- struct fdtable *fdt;
retval = -ENOENT;
if (!p)
@@ -1649,9 +1655,8 @@ static int proc_readfd_common(struct file * filp, void * dirent,
if (!files)
goto out;
rcu_read_lock();
- fdt = files_fdtable(files);
for (fd = filp->f_pos-2;
- fd < fdt->max_fds;
+ fd < files_fdtable(files)->max_fds;
fd++, filp->f_pos++) {
char name[PROC_NUMBUF];
int len;
@@ -2311,6 +2316,7 @@ static const struct pid_entry tgid_base_stuff[] = {
LNK("root", root),
LNK("exe", exe),
REG("mounts", S_IRUGO, mounts),
+ REG("mountinfo", S_IRUGO, mountinfo),
REG("mountstats", S_IRUSR, mountstats),
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, clear_refs),
@@ -2643,6 +2649,7 @@ static const struct pid_entry tid_base_stuff[] = {
LNK("root", root),
LNK("exe", exe),
REG("mounts", S_IRUGO, mounts),
+ REG("mountinfo", S_IRUGO, mountinfo),
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, clear_refs),
REG("smaps", S_IRUGO, smaps),
diff --git a/fs/read_write.c b/fs/read_write.c
index 49a98718ecd..f0d1240a5c6 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(generic_ro_fops);
loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
{
- long long retval;
+ loff_t retval;
struct inode *inode = file->f_mapping->host;
mutex_lock(&inode->i_mutex);
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(generic_file_llseek);
loff_t remote_llseek(struct file *file, loff_t offset, int origin)
{
- long long retval;
+ loff_t retval;
lock_kernel();
switch (origin) {
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(no_llseek);
loff_t default_llseek(struct file *file, loff_t offset, int origin)
{
- long long retval;
+ loff_t retval;
lock_kernel();
switch (origin) {
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 853770274f2..3f54dbd6c49 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -25,6 +25,7 @@
* into the buffer. In case of error ->start() and ->next() return
* ERR_PTR(error). In the end of sequence they return %NULL. ->show()
* returns 0 in case of success and negative number in case of error.
+ * Returning SEQ_SKIP means "discard this element and move on".
*/
int seq_open(struct file *file, const struct seq_operations *op)
{
@@ -114,8 +115,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
if (!p || IS_ERR(p))
break;
err = m->op->show(m, p);
- if (err)
+ if (err < 0)
break;
+ if (unlikely(err))
+ m->count = 0;
if (m->count < m->size)
goto Fill;
m->op->stop(m, p);
@@ -140,9 +143,10 @@ Fill:
break;
}
err = m->op->show(m, p);
- if (err || m->count == m->size) {
+ if (m->count == m->size || err) {
m->count = offs;
- break;
+ if (likely(err <= 0))
+ break;
}
pos = next;
}
@@ -199,8 +203,12 @@ static int traverse(struct seq_file *m, loff_t offset)
if (IS_ERR(p))
break;
error = m->op->show(m, p);
- if (error)
+ if (error < 0)
break;
+ if (unlikely(error)) {
+ error = 0;
+ m->count = 0;
+ }
if (m->count == m->size)
goto Eoverflow;
if (pos + m->count > offset) {
@@ -239,7 +247,7 @@ Eoverflow:
loff_t seq_lseek(struct file *file, loff_t offset, int origin)
{
struct seq_file *m = (struct seq_file *)file->private_data;
- long long retval = -EINVAL;
+ loff_t retval = -EINVAL;
mutex_lock(&m->lock);
m->version = file->f_version;
@@ -342,28 +350,40 @@ int seq_printf(struct seq_file *m, const char *f, ...)
}
EXPORT_SYMBOL(seq_printf);
+static char *mangle_path(char *s, char *p, char *esc)
+{
+ while (s <= p) {
+ char c = *p++;
+ if (!c) {
+ return s;
+ } else if (!strchr(esc, c)) {
+ *s++ = c;
+ } else if (s + 4 > p) {
+ break;
+ } else {
+ *s++ = '\\';
+ *s++ = '0' + ((c & 0300) >> 6);
+ *s++ = '0' + ((c & 070) >> 3);
+ *s++ = '0' + (c & 07);
+ }
+ }
+ return NULL;
+}
+
+/*
+ * return the absolute path of 'dentry' residing in mount 'mnt'.
+ */
int seq_path(struct seq_file *m, struct path *path, char *esc)
{
if (m->count < m->size) {
char *s = m->buf + m->count;
char *p = d_path(path, s, m->size - m->count);
if (!IS_ERR(p)) {
- while (s <= p) {
- char c = *p++;
- if (!c) {
- p = m->buf + m->count;
- m->count = s - m->buf;
- return s - p;
- } else if (!strchr(esc, c)) {
- *s++ = c;
- } else if (s + 4 > p) {
- break;
- } else {
- *s++ = '\\';
- *s++ = '0' + ((c & 0300) >> 6);
- *s++ = '0' + ((c & 070) >> 3);
- *s++ = '0' + (c & 07);
- }
+ s = mangle_path(s, p, esc);
+ if (s) {
+ p = m->buf + m->count;
+ m->count = s - m->buf;
+ return s - p;
}
}
}
@@ -372,6 +392,57 @@ int seq_path(struct seq_file *m, struct path *path, char *esc)
}
EXPORT_SYMBOL(seq_path);
+/*
+ * Same as seq_path, but relative to supplied root.
+ *
+ * root may be changed, see __d_path().
+ */
+int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
+ char *esc)
+{
+ int err = -ENAMETOOLONG;
+ if (m->count < m->size) {
+ char *s = m->buf + m->count;
+ char *p;
+
+ spin_lock(&dcache_lock);
+ p = __d_path(path, root, s, m->size - m->count);
+ spin_unlock(&dcache_lock);
+ err = PTR_ERR(p);
+ if (!IS_ERR(p)) {
+ s = mangle_path(s, p, esc);
+ if (s) {
+ p = m->buf + m->count;
+ m->count = s - m->buf;
+ return 0;
+ }
+ }
+ }
+ m->count = m->size;
+ return err;
+}
+
+/*
+ * returns the path of the 'dentry' from the root of its filesystem.
+ */
+int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc)
+{
+ if (m->count < m->size) {
+ char *s = m->buf + m->count;
+ char *p = dentry_path(dentry, s, m->size - m->count);
+ if (!IS_ERR(p)) {
+ s = mangle_path(s, p, esc);
+ if (s) {
+ p = m->buf + m->count;
+ m->count = s - m->buf;
+ return s - p;
+ }
+ }
+ }
+ m->count = m->size;
+ return -1;
+}
+
static void *single_start(struct seq_file *p, loff_t *pos)
{
return NULL + (*pos == 0);
diff --git a/fs/super.c b/fs/super.c
index 1f8f05ede43..4798350b2bc 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -39,6 +39,7 @@
#include <linux/mutex.h>
#include <linux/file.h>
#include <asm/uaccess.h>
+#include "internal.h"
LIST_HEAD(super_blocks);
diff --git a/fs/udf/Makefile b/fs/udf/Makefile
index be845e7540e..0d4503f7446 100644
--- a/fs/udf/Makefile
+++ b/fs/udf/Makefile
@@ -6,4 +6,4 @@ obj-$(CONFIG_UDF_FS) += udf.o
udf-objs := balloc.o dir.o file.o ialloc.o inode.o lowlevel.o namei.o \
partition.o super.o truncate.o symlink.o fsync.o \
- crc.o directory.o misc.o udftime.o unicode.o
+ directory.o misc.o udftime.o unicode.o
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index f855dcbbdfb..1b809bd494b 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -149,8 +149,7 @@ static bool udf_add_free_space(struct udf_sb_info *sbi,
return false;
lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
- lvid->freeSpaceTable[partition] = cpu_to_le32(le32_to_cpu(
- lvid->freeSpaceTable[partition]) + cnt);
+ le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
return true;
}
@@ -589,10 +588,8 @@ static void udf_table_free_blocks(struct super_block *sb,
sptr = oepos.bh->b_data + epos.offset;
aed = (struct allocExtDesc *)
oepos.bh->b_data;
- aed->lengthAllocDescs =
- cpu_to_le32(le32_to_cpu(
- aed->lengthAllocDescs) +
- adsize);
+ le32_add_cpu(&aed->lengthAllocDescs,
+ adsize);
} else {
sptr = iinfo->i_ext.i_data +
epos.offset;
@@ -645,9 +642,7 @@ static void udf_table_free_blocks(struct super_block *sb,
mark_inode_dirty(table);
} else {
aed = (struct allocExtDesc *)epos.bh->b_data;
- aed->lengthAllocDescs =
- cpu_to_le32(le32_to_cpu(
- aed->lengthAllocDescs) + adsize);
+ le32_add_cpu(&aed->lengthAllocDescs, adsize);
udf_update_tag(epos.bh->b_data, epos.offset);
mark_buffer_dirty(epos.bh);
}
diff --git a/fs/udf/crc.c b/fs/udf/crc.c
deleted file mode 100644
index b1661296e78..00000000000
--- a/fs/udf/crc.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * crc.c
- *
- * PURPOSE
- * Routines to generate, calculate, and test a 16-bit CRC.
- *
- * DESCRIPTION
- * The CRC code was devised by Don P. Mitchell of AT&T Bell Laboratories
- * and Ned W. Rhodes of Software Systems Group. It has been published in
- * "Design and Validation of Computer Protocols", Prentice Hall,
- * Englewood Cliffs, NJ, 1991, Chapter 3, ISBN 0-13-539925-4.
- *
- * Copyright is held by AT&T.
- *
- * AT&T gives permission for the free use of the CRC source code.
- *
- * COPYRIGHT
- * This file is distributed under the terms of the GNU General Public
- * License (GPL). Copies of the GPL can be obtained from:
- * ftp://prep.ai.mit.edu/pub/gnu/GPL
- * Each contributing author retains all rights to their own work.
- */
-
-#include "udfdecl.h"
-
-static uint16_t crc_table[256] = {
- 0x0000U, 0x1021U, 0x2042U, 0x3063U, 0x4084U, 0x50a5U, 0x60c6U, 0x70e7U,
- 0x8108U, 0x9129U, 0xa14aU, 0xb16bU, 0xc18cU, 0xd1adU, 0xe1ceU, 0xf1efU,
- 0x1231U, 0x0210U, 0x3273U, 0x2252U, 0x52b5U, 0x4294U, 0x72f7U, 0x62d6U,
- 0x9339U, 0x8318U, 0xb37bU, 0xa35aU, 0xd3bdU, 0xc39cU, 0xf3ffU, 0xe3deU,
- 0x2462U, 0x3443U, 0x0420U, 0x1401U, 0x64e6U, 0x74c7U, 0x44a4U, 0x5485U,
- 0xa56aU, 0xb54bU, 0x8528U, 0x9509U, 0xe5eeU, 0xf5cfU, 0xc5acU, 0xd58dU,
- 0x3653U, 0x2672U, 0x1611U, 0x0630U, 0x76d7U, 0x66f6U, 0x5695U, 0x46b4U,
- 0xb75bU, 0xa77aU, 0x9719U, 0x8738U, 0xf7dfU, 0xe7feU, 0xd79dU, 0xc7bcU,
- 0x48c4U, 0x58e5U, 0x6886U, 0x78a7U, 0x0840U, 0x1861U, 0x2802U, 0x3823U,
- 0xc9ccU, 0xd9edU, 0xe98eU, 0xf9afU, 0x8948U, 0x9969U, 0xa90aU, 0xb92bU,
- 0x5af5U, 0x4ad4U, 0x7ab7U, 0x6a96U, 0x1a71U, 0x0a50U, 0x3a33U, 0x2a12U,
- 0xdbfdU, 0xcbdcU, 0xfbbfU, 0xeb9eU, 0x9b79U, 0x8b58U, 0xbb3bU, 0xab1aU,
- 0x6ca6U, 0x7c87U, 0x4ce4U, 0x5cc5U, 0x2c22U, 0x3c03U, 0x0c60U, 0x1c41U,
- 0xedaeU, 0xfd8fU, 0xcdecU, 0xddcdU, 0xad2aU, 0xbd0bU, 0x8d68U, 0x9d49U,
- 0x7e97U, 0x6eb6U, 0x5ed5U, 0x4ef4U, 0x3e13U, 0x2e32U, 0x1e51U, 0x0e70U,
- 0xff9fU, 0xefbeU, 0xdfddU, 0xcffcU, 0xbf1bU, 0xaf3aU, 0x9f59U, 0x8f78U,
- 0x9188U, 0x81a9U, 0xb1caU, 0xa1ebU, 0xd10cU, 0xc12dU, 0xf14eU, 0xe16fU,
- 0x1080U, 0x00a1U, 0x30c2U, 0x20e3U, 0x5004U, 0x4025U, 0x7046U, 0x6067U,
- 0x83b9U, 0x9398U, 0xa3fbU, 0xb3daU, 0xc33dU, 0xd31cU, 0xe37fU, 0xf35eU,
- 0x02b1U, 0x1290U, 0x22f3U, 0x32d2U, 0x4235U, 0x5214U, 0x6277U, 0x7256U,
- 0xb5eaU, 0xa5cbU, 0x95a8U, 0x8589U, 0xf56eU, 0xe54fU, 0xd52cU, 0xc50dU,
- 0x34e2U, 0x24c3U, 0x14a0U, 0x0481U, 0x7466U, 0x6447U, 0x5424U, 0x4405U,
- 0xa7dbU, 0xb7faU, 0x8799U, 0x97b8U, 0xe75fU, 0xf77eU, 0xc71dU, 0xd73cU,
- 0x26d3U, 0x36f2U, 0x0691U, 0x16b0U, 0x6657U, 0x7676U, 0x4615U, 0x5634U,
- 0xd94cU, 0xc96dU, 0xf90eU, 0xe92fU, 0x99c8U, 0x89e9U, 0xb98aU, 0xa9abU,
- 0x5844U, 0x4865U, 0x7806U, 0x6827U, 0x18c0U, 0x08e1U, 0x3882U, 0x28a3U,
- 0xcb7dU, 0xdb5cU, 0xeb3fU, 0xfb1eU, 0x8bf9U, 0x9bd8U, 0xabbbU, 0xbb9aU,
- 0x4a75U, 0x5a54U, 0x6a37U, 0x7a16U, 0x0af1U, 0x1ad0U, 0x2ab3U, 0x3a92U,
- 0xfd2eU, 0xed0fU, 0xdd6cU, 0xcd4dU, 0xbdaaU, 0xad8bU, 0x9de8U, 0x8dc9U,
- 0x7c26U, 0x6c07U, 0x5c64U, 0x4c45U, 0x3ca2U, 0x2c83U, 0x1ce0U, 0x0cc1U,
- 0xef1fU, 0xff3eU, 0xcf5dU, 0xdf7cU, 0xaf9bU, 0xbfbaU, 0x8fd9U, 0x9ff8U,
- 0x6e17U, 0x7e36U, 0x4e55U, 0x5e74U, 0x2e93U, 0x3eb2U, 0x0ed1U, 0x1ef0U
-};
-
-/*
- * udf_crc
- *
- * PURPOSE
- * Calculate a 16-bit CRC checksum using ITU-T V.41 polynomial.
- *
- * DESCRIPTION
- * The OSTA-UDF(tm) 1.50 standard states that using CRCs is mandatory.
- * The polynomial used is: x^16 + x^12 + x^15 + 1
- *
- * PRE-CONDITIONS
- * data Pointer to the data block.
- * size Size of the data block.
- *
- * POST-CONDITIONS
- * <return> CRC of the data block.
- *
- * HISTORY
- * July 21, 1997 - Andrew E. Mileski
- * Adapted from OSTA-UDF(tm) 1.50 standard.
- */
-uint16_t udf_crc(uint8_t *data, uint32_t size, uint16_t crc)
-{
- while (size--)
- crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8);
-
- return crc;
-}
-
-/****************************************************************************/
-#if defined(TEST)
-
-/*
- * PURPOSE
- * Test udf_crc()
- *
- * HISTORY
- * July 21, 1997 - Andrew E. Mileski
- * Adapted from OSTA-UDF(tm) 1.50 standard.
- */
-
-unsigned char bytes[] = { 0x70U, 0x6AU, 0x77U };
-
-int main(void)
-{
- unsigned short x;
-
- x = udf_crc(bytes, sizeof bytes);
- printf("udf_crc: calculated = %4.4x, correct = %4.4x\n", x, 0x3299U);
-
- return 0;
-}
-
-#endif /* defined(TEST) */
-
-/****************************************************************************/
-#if defined(GENERATE)
-
-/*
- * PURPOSE
- * Generate a table for fast 16-bit CRC calculations (any polynomial).
- *
- * DESCRIPTION
- * The ITU-T V.41 polynomial is 010041.
- *
- * HISTORY
- * July 21, 1997 - Andrew E. Mileski
- * Adapted from OSTA-UDF(tm) 1.50 standard.
- */
-
-#include <stdio.h>
-
-int main(int argc, char **argv)
-{
- unsigned long crc, poly;
- int n, i;
-
- /* Get the polynomial */
- sscanf(argv[1], "%lo", &poly);
- if (poly & 0xffff0000U) {
- fprintf(stderr, "polynomial is too large\en");
- exit(1);
- }
-
- printf("/* CRC 0%o */\n", poly);
-
- /* Create a table */
- printf("static unsigned short crc_table[256] = {\n");
- for (n = 0; n < 256; n++) {
- if (n % 8 == 0)
- printf("\t");
- crc = n << 8;
- for (i = 0; i < 8; i++) {
- if (crc & 0x8000U)
- crc = (crc << 1) ^ poly;
- else
- crc <<= 1;
- crc &= 0xFFFFU;
- }
- if (n == 255)
- printf("0x%04xU ", crc);
- else
- printf("0x%04xU, ", crc);
- if (n % 8 == 7)
- printf("\n");
- }
- printf("};\n");
-
- return 0;
-}
-
-#endif /* defined(GENERATE) */
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 8d8643ada19..62dc270c69d 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -39,13 +39,13 @@
static int do_udf_readdir(struct inode *dir, struct file *filp,
filldir_t filldir, void *dirent)
{
- struct udf_fileident_bh fibh;
+ struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL};
struct fileIdentDesc *fi = NULL;
struct fileIdentDesc cfi;
int block, iblock;
loff_t nf_pos = (filp->f_pos - 1) << 2;
int flen;
- char fname[UDF_NAME_LEN];
+ char *fname = NULL;
char *nameptr;
uint16_t liu;
uint8_t lfi;
@@ -54,23 +54,32 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
- int i, num;
+ int i, num, ret = 0;
unsigned int dt_type;
struct extent_position epos = { NULL, 0, {0, 0} };
struct udf_inode_info *iinfo;
if (nf_pos >= size)
- return 0;
+ goto out;
+
+ fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+ if (!fname) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (nf_pos == 0)
nf_pos = udf_ext0_offset(dir);
fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
iinfo = UDF_I(dir);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
- fibh.sbh = fibh.ebh = NULL;
- } else if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
- &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+ if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
+ &epos, &eloc, &elen, &offset)
+ != (EXT_RECORDED_ALLOCATED >> 30)) {
+ ret = -ENOENT;
+ goto out;
+ }
block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
@@ -83,8 +92,8 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
}
if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
- brelse(epos.bh);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
@@ -105,9 +114,6 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
brelse(bha[i]);
}
}
- } else {
- brelse(epos.bh);
- return -ENOENT;
}
while (nf_pos < size) {
@@ -115,13 +121,8 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
&elen, &offset);
- if (!fi) {
- if (fibh.sbh != fibh.ebh)
- brelse(fibh.ebh);
- brelse(fibh.sbh);
- brelse(epos.bh);
- return 0;
- }
+ if (!fi)
+ goto out;
liu = le16_to_cpu(cfi.lengthOfImpUse);
lfi = cfi.lengthFileIdent;
@@ -167,53 +168,23 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
dt_type = DT_UNKNOWN;
}
- if (flen) {
- if (filldir(dirent, fname, flen, filp->f_pos, iblock, dt_type) < 0) {
- if (fibh.sbh != fibh.ebh)
- brelse(fibh.ebh);
- brelse(fibh.sbh);
- brelse(epos.bh);
- return 0;
- }
- }
+ if (flen && filldir(dirent, fname, flen, filp->f_pos,
+ iblock, dt_type) < 0)
+ goto out;
} /* end while */
filp->f_pos = (nf_pos >> 2) + 1;
+out:
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
brelse(epos.bh);
+ kfree(fname);
- return 0;
+ return ret;
}
-/*
- * udf_readdir
- *
- * PURPOSE
- * Read a directory entry.
- *
- * DESCRIPTION
- * Optional - sys_getdents() will return -ENOTDIR if this routine is not
- * available.
- *
- * Refer to sys_getdents() in fs/readdir.c
- * sys_getdents() -> .
- *
- * PRE-CONDITIONS
- * filp Pointer to directory file.
- * buf Pointer to directory entry buffer.
- * filldir Pointer to filldir function.
- *
- * POST-CONDITIONS
- * <return> >=0 on success.
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
- */
-
static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct inode *dir = filp->f_path.dentry->d_inode;
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index 56387711589..a0974df82b3 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -70,19 +70,6 @@ typedef struct {
uint8_t microseconds;
} __attribute__ ((packed)) timestamp;
-typedef struct {
- uint16_t typeAndTimezone;
- int16_t year;
- uint8_t month;
- uint8_t day;
- uint8_t hour;
- uint8_t minute;
- uint8_t second;
- uint8_t centiseconds;
- uint8_t hundredsOfMicroseconds;
- uint8_t microseconds;
-} __attribute__ ((packed)) kernel_timestamp;
-
/* Type and Time Zone (ECMA 167r3 1/7.3.1) */
#define TIMESTAMP_TYPE_MASK 0xF000
#define TIMESTAMP_TYPE_CUT 0x0000
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 97c71ae7c68..0ed6e146a0d 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -27,7 +27,6 @@
#include "udfdecl.h"
#include <linux/fs.h>
-#include <linux/udf_fs.h>
#include <asm/uaccess.h>
#include <linux/kernel.h>
#include <linux/string.h> /* memset */
@@ -144,40 +143,6 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
return retval;
}
-/*
- * udf_ioctl
- *
- * PURPOSE
- * Issue an ioctl.
- *
- * DESCRIPTION
- * Optional - sys_ioctl() will return -ENOTTY if this routine is not
- * available, and the ioctl cannot be handled without filesystem help.
- *
- * sys_ioctl() handles these ioctls that apply only to regular files:
- * FIBMAP [requires udf_block_map()], FIGETBSZ, FIONREAD
- * These ioctls are also handled by sys_ioctl():
- * FIOCLEX, FIONCLEX, FIONBIO, FIOASYNC
- * All other ioctls are passed to the filesystem.
- *
- * Refer to sys_ioctl() in fs/ioctl.c
- * sys_ioctl() -> .
- *
- * PRE-CONDITIONS
- * inode Pointer to inode that ioctl was issued on.
- * filp Pointer to file that ioctl was issued on.
- * cmd The ioctl command.
- * arg The ioctl argument [can be interpreted as a
- * user-space pointer if desired].
- *
- * POST-CONDITIONS
- * <return> Success (>=0) or an error code (<=0) that
- * sys_ioctl() will return.
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
- */
int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -225,18 +190,6 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
return result;
}
-/*
- * udf_release_file
- *
- * PURPOSE
- * Called when all references to the file are closed
- *
- * DESCRIPTION
- * Discard prealloced blocks
- *
- * HISTORY
- *
- */
static int udf_release_file(struct inode *inode, struct file *filp)
{
if (filp->f_mode & FMODE_WRITE) {
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 84360315aca..eb9cfa23dc3 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -21,7 +21,6 @@
#include "udfdecl.h"
#include <linux/fs.h>
#include <linux/quotaops.h>
-#include <linux/udf_fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -47,11 +46,9 @@ void udf_free_inode(struct inode *inode)
struct logicalVolIntegrityDescImpUse *lvidiu =
udf_sb_lvidiu(sbi);
if (S_ISDIR(inode->i_mode))
- lvidiu->numDirs =
- cpu_to_le32(le32_to_cpu(lvidiu->numDirs) - 1);
+ le32_add_cpu(&lvidiu->numDirs, -1);
else
- lvidiu->numFiles =
- cpu_to_le32(le32_to_cpu(lvidiu->numFiles) - 1);
+ le32_add_cpu(&lvidiu->numFiles, -1);
mark_buffer_dirty(sbi->s_lvid_bh);
}
@@ -105,11 +102,9 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
lvhd = (struct logicalVolHeaderDesc *)
(lvid->logicalVolContentsUse);
if (S_ISDIR(mode))
- lvidiu->numDirs =
- cpu_to_le32(le32_to_cpu(lvidiu->numDirs) + 1);
+ le32_add_cpu(&lvidiu->numDirs, 1);
else
- lvidiu->numFiles =
- cpu_to_le32(le32_to_cpu(lvidiu->numFiles) + 1);
+ le32_add_cpu(&lvidiu->numFiles, 1);
iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
if (!(++uniqueID & 0x00000000FFFFFFFFUL))
uniqueID += 16;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 24cfa55d0fd..6e74b117aaf 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -37,6 +37,7 @@
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/slab.h>
+#include <linux/crc-itu-t.h>
#include "udf_i.h"
#include "udf_sb.h"
@@ -66,22 +67,7 @@ static void udf_update_extents(struct inode *,
struct extent_position *);
static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
-/*
- * udf_delete_inode
- *
- * PURPOSE
- * Clean-up before the specified inode is destroyed.
- *
- * DESCRIPTION
- * This routine is called when the kernel destroys an inode structure
- * ie. when iput() finds i_count == 0.
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
- *
- * Called at the last iput() if i_nlink is zero.
- */
+
void udf_delete_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
@@ -323,9 +309,6 @@ static int udf_get_block(struct inode *inode, sector_t block,
lock_kernel();
- if (block < 0)
- goto abort_negative;
-
iinfo = UDF_I(inode);
if (block == iinfo->i_next_alloc_block + 1) {
iinfo->i_next_alloc_block++;
@@ -347,10 +330,6 @@ static int udf_get_block(struct inode *inode, sector_t block,
abort:
unlock_kernel();
return err;
-
-abort_negative:
- udf_warning(inode->i_sb, "udf_get_block", "block < 0");
- goto abort;
}
static struct buffer_head *udf_getblk(struct inode *inode, long block,
@@ -1116,42 +1095,36 @@ static void __udf_read_inode(struct inode *inode)
fe = (struct fileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
- struct buffer_head *ibh = NULL, *nbh = NULL;
- struct indirectEntry *ie;
+ struct buffer_head *ibh;
ibh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 1,
&ident);
- if (ident == TAG_IDENT_IE) {
- if (ibh) {
- kernel_lb_addr loc;
- ie = (struct indirectEntry *)ibh->b_data;
-
- loc = lelb_to_cpu(ie->indirectICB.extLocation);
-
- if (ie->indirectICB.extLength &&
- (nbh = udf_read_ptagged(inode->i_sb, loc, 0,
- &ident))) {
- if (ident == TAG_IDENT_FE ||
- ident == TAG_IDENT_EFE) {
- memcpy(&iinfo->i_location,
- &loc,
- sizeof(kernel_lb_addr));
- brelse(bh);
- brelse(ibh);
- brelse(nbh);
- __udf_read_inode(inode);
- return;
- } else {
- brelse(nbh);
- brelse(ibh);
- }
- } else {
+ if (ident == TAG_IDENT_IE && ibh) {
+ struct buffer_head *nbh = NULL;
+ kernel_lb_addr loc;
+ struct indirectEntry *ie;
+
+ ie = (struct indirectEntry *)ibh->b_data;
+ loc = lelb_to_cpu(ie->indirectICB.extLocation);
+
+ if (ie->indirectICB.extLength &&
+ (nbh = udf_read_ptagged(inode->i_sb, loc, 0,
+ &ident))) {
+ if (ident == TAG_IDENT_FE ||
+ ident == TAG_IDENT_EFE) {
+ memcpy(&iinfo->i_location,
+ &loc,
+ sizeof(kernel_lb_addr));
+ brelse(bh);
brelse(ibh);
+ brelse(nbh);
+ __udf_read_inode(inode);
+ return;
}
+ brelse(nbh);
}
- } else {
- brelse(ibh);
}
+ brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
printk(KERN_ERR "udf: unsupported strategy type: %d\n",
le16_to_cpu(fe->icbTag.strategyType));
@@ -1168,8 +1141,6 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
{
struct fileEntry *fe;
struct extendedFileEntry *efe;
- time_t convtime;
- long convtime_usec;
int offset;
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
struct udf_inode_info *iinfo = UDF_I(inode);
@@ -1257,29 +1228,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
- if (udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(fe->accessTime))) {
- inode->i_atime.tv_sec = convtime;
- inode->i_atime.tv_nsec = convtime_usec * 1000;
- } else {
+ if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
inode->i_atime = sbi->s_record_time;
- }
- if (udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(fe->modificationTime))) {
- inode->i_mtime.tv_sec = convtime;
- inode->i_mtime.tv_nsec = convtime_usec * 1000;
- } else {
+ if (!udf_disk_stamp_to_time(&inode->i_mtime,
+ fe->modificationTime))
inode->i_mtime = sbi->s_record_time;
- }
- if (udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(fe->attrTime))) {
- inode->i_ctime.tv_sec = convtime;
- inode->i_ctime.tv_nsec = convtime_usec * 1000;
- } else {
+ if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
inode->i_ctime = sbi->s_record_time;
- }
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
@@ -1289,37 +1246,18 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
- if (udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(efe->accessTime))) {
- inode->i_atime.tv_sec = convtime;
- inode->i_atime.tv_nsec = convtime_usec * 1000;
- } else {
+ if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
inode->i_atime = sbi->s_record_time;
- }
- if (udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(efe->modificationTime))) {
- inode->i_mtime.tv_sec = convtime;
- inode->i_mtime.tv_nsec = convtime_usec * 1000;
- } else {
+ if (!udf_disk_stamp_to_time(&inode->i_mtime,
+ efe->modificationTime))
inode->i_mtime = sbi->s_record_time;
- }
- if (udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(efe->createTime))) {
- iinfo->i_crtime.tv_sec = convtime;
- iinfo->i_crtime.tv_nsec = convtime_usec * 1000;
- } else {
+ if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
iinfo->i_crtime = sbi->s_record_time;
- }
- if (udf_stamp_to_time(&convtime, &convtime_usec,
- lets_to_cpu(efe->attrTime))) {
- inode->i_ctime.tv_sec = convtime;
- inode->i_ctime.tv_nsec = convtime_usec * 1000;
- } else {
+ if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
inode->i_ctime = sbi->s_record_time;
- }
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
@@ -1338,6 +1276,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
case ICBTAG_FILE_TYPE_REALTIME:
case ICBTAG_FILE_TYPE_REGULAR:
case ICBTAG_FILE_TYPE_UNDEF:
+ case ICBTAG_FILE_TYPE_VAT20:
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
@@ -1363,6 +1302,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
inode->i_op = &page_symlink_inode_operations;
inode->i_mode = S_IFLNK | S_IRWXUGO;
break;
+ case ICBTAG_FILE_TYPE_MAIN:
+ udf_debug("METADATA FILE-----\n");
+ break;
+ case ICBTAG_FILE_TYPE_MIRROR:
+ udf_debug("METADATA MIRROR FILE-----\n");
+ break;
+ case ICBTAG_FILE_TYPE_BITMAP:
+ udf_debug("METADATA BITMAP FILE-----\n");
+ break;
default:
printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown "
"file type=%d\n", inode->i_ino,
@@ -1416,21 +1364,6 @@ static mode_t udf_convert_permissions(struct fileEntry *fe)
return mode;
}
-/*
- * udf_write_inode
- *
- * PURPOSE
- * Write out the specified inode.
- *
- * DESCRIPTION
- * This routine is called whenever an inode is synced.
- * Currently this routine is just a placeholder.
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
- */
-
int udf_write_inode(struct inode *inode, int sync)
{
int ret;
@@ -1455,7 +1388,6 @@ static int udf_update_inode(struct inode *inode, int do_sync)
uint32_t udfperms;
uint16_t icbflags;
uint16_t crclen;
- kernel_timestamp cpu_time;
int err = 0;
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
@@ -1488,9 +1420,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
iinfo->i_location.
logicalBlockNum);
use->descTag.descCRCLength = cpu_to_le16(crclen);
- use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use +
- sizeof(tag), crclen,
- 0));
+ use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
+ sizeof(tag),
+ crclen));
use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
mark_buffer_dirty(bh);
@@ -1558,12 +1490,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
(blocksize_bits - 9));
- if (udf_time_to_stamp(&cpu_time, inode->i_atime))
- fe->accessTime = cpu_to_lets(cpu_time);
- if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
- fe->modificationTime = cpu_to_lets(cpu_time);
- if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
- fe->attrTime = cpu_to_lets(cpu_time);
+ udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
+ udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
+ udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
memset(&(fe->impIdent), 0, sizeof(regid));
strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1598,14 +1527,10 @@ static int udf_update_inode(struct inode *inode, int do_sync)
iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
iinfo->i_crtime = inode->i_ctime;
- if (udf_time_to_stamp(&cpu_time, inode->i_atime))
- efe->accessTime = cpu_to_lets(cpu_time);
- if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
- efe->modificationTime = cpu_to_lets(cpu_time);
- if (udf_time_to_stamp(&cpu_time, iinfo->i_crtime))
- efe->createTime = cpu_to_lets(cpu_time);
- if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
- efe->attrTime = cpu_to_lets(cpu_time);
+ udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
+ udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
+ udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
+ udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
memset(&(efe->impIdent), 0, sizeof(regid));
strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
@@ -1660,8 +1585,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc -
sizeof(tag);
fe->descTag.descCRCLength = cpu_to_le16(crclen);
- fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag),
- crclen, 0));
+ fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(tag),
+ crclen));
fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
/* write the data blocks */
@@ -1778,9 +1703,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
if (epos->bh) {
aed = (struct allocExtDesc *)epos->bh->b_data;
- aed->lengthAllocDescs =
- cpu_to_le32(le32_to_cpu(
- aed->lengthAllocDescs) + adsize);
+ le32_add_cpu(&aed->lengthAllocDescs, adsize);
} else {
iinfo->i_lenAlloc += adsize;
mark_inode_dirty(inode);
@@ -1830,9 +1753,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
mark_inode_dirty(inode);
} else {
aed = (struct allocExtDesc *)epos->bh->b_data;
- aed->lengthAllocDescs =
- cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) +
- adsize);
+ le32_add_cpu(&aed->lengthAllocDescs, adsize);
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(epos->bh->b_data,
@@ -2046,9 +1967,7 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
mark_inode_dirty(inode);
} else {
aed = (struct allocExtDesc *)oepos.bh->b_data;
- aed->lengthAllocDescs =
- cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
- (2 * adsize));
+ le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(oepos.bh->b_data,
@@ -2065,9 +1984,7 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
mark_inode_dirty(inode);
} else {
aed = (struct allocExtDesc *)oepos.bh->b_data;
- aed->lengthAllocDescs =
- cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
- adsize);
+ le32_add_cpu(&aed->lengthAllocDescs, -adsize);
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(oepos.bh->b_data,
@@ -2095,11 +2012,6 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
int8_t etype;
struct udf_inode_info *iinfo;
- if (block < 0) {
- printk(KERN_ERR "udf: inode_bmap: block < 0\n");
- return -1;
- }
-
iinfo = UDF_I(inode);
pos->offset = 0;
pos->block = iinfo->i_location;
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
index 579bae71e67..703843f30ff 100644
--- a/fs/udf/lowlevel.c
+++ b/fs/udf/lowlevel.c
@@ -23,7 +23,6 @@
#include <linux/cdrom.h>
#include <asm/uaccess.h>
-#include <linux/udf_fs.h>
#include "udf_sb.h"
unsigned int udf_get_last_session(struct super_block *sb)
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index a1d6da0caf7..84bf0fd4a4f 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -23,8 +23,8 @@
#include <linux/fs.h>
#include <linux/string.h>
-#include <linux/udf_fs.h>
#include <linux/buffer_head.h>
+#include <linux/crc-itu-t.h>
#include "udf_i.h"
#include "udf_sb.h"
@@ -136,8 +136,8 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
/* rewrite CRC + checksum of eahd */
crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag);
eahd->descTag.descCRCLength = cpu_to_le16(crclen);
- eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd +
- sizeof(tag), crclen, 0));
+ eahd->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)eahd +
+ sizeof(tag), crclen));
eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag);
iinfo->i_lenEAttr += size;
return (struct genericFormat *)&ea[offset];
@@ -204,16 +204,15 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
{
tag *tag_p;
struct buffer_head *bh = NULL;
- struct udf_sb_info *sbi = UDF_SB(sb);
/* Read the block */
if (block == 0xFFFFFFFF)
return NULL;
- bh = udf_tread(sb, block + sbi->s_session);
+ bh = udf_tread(sb, block);
if (!bh) {
udf_debug("block=%d, location=%d: read failed\n",
- block + sbi->s_session, location);
+ block, location);
return NULL;
}
@@ -223,8 +222,7 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
if (location != le32_to_cpu(tag_p->tagLocation)) {
udf_debug("location mismatch block %u, tag %u != %u\n",
- block + sbi->s_session,
- le32_to_cpu(tag_p->tagLocation), location);
+ block, le32_to_cpu(tag_p->tagLocation), location);
goto error_out;
}
@@ -244,13 +242,13 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
/* Verify the descriptor CRC */
if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize ||
- le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag),
- le16_to_cpu(tag_p->descCRCLength), 0))
+ le16_to_cpu(tag_p->descCRC) == crc_itu_t(0,
+ bh->b_data + sizeof(tag),
+ le16_to_cpu(tag_p->descCRCLength)))
return bh;
- udf_debug("Crc failure block %d: crc = %d, crclen = %d\n",
- block + sbi->s_session, le16_to_cpu(tag_p->descCRC),
- le16_to_cpu(tag_p->descCRCLength));
+ udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", block,
+ le16_to_cpu(tag_p->descCRC), le16_to_cpu(tag_p->descCRCLength));
error_out:
brelse(bh);
@@ -270,7 +268,7 @@ void udf_update_tag(char *data, int length)
length -= sizeof(tag);
tptr->descCRCLength = cpu_to_le16(length);
- tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0));
+ tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(tag), length));
tptr->tagChecksum = udf_tag_checksum(tptr);
}
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 112a5fb0b27..ba5537d4bc1 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -31,6 +31,7 @@
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/sched.h>
+#include <linux/crc-itu-t.h>
static inline int udf_match(int len1, const char *name1, int len2,
const char *name2)
@@ -97,25 +98,23 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
memset(fibh->ebh->b_data, 0x00, padlen + offset);
}
- crc = udf_crc((uint8_t *)cfi + sizeof(tag),
- sizeof(struct fileIdentDesc) - sizeof(tag), 0);
+ crc = crc_itu_t(0, (uint8_t *)cfi + sizeof(tag),
+ sizeof(struct fileIdentDesc) - sizeof(tag));
if (fibh->sbh == fibh->ebh) {
- crc = udf_crc((uint8_t *)sfi->impUse,
+ crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
crclen + sizeof(tag) -
- sizeof(struct fileIdentDesc), crc);
+ sizeof(struct fileIdentDesc));
} else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) {
- crc = udf_crc(fibh->ebh->b_data +
+ crc = crc_itu_t(crc, fibh->ebh->b_data +
sizeof(struct fileIdentDesc) +
fibh->soffset,
crclen + sizeof(tag) -
- sizeof(struct fileIdentDesc),
- crc);
+ sizeof(struct fileIdentDesc));
} else {
- crc = udf_crc((uint8_t *)sfi->impUse,
- -fibh->soffset - sizeof(struct fileIdentDesc),
- crc);
- crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc);
+ crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
+ -fibh->soffset - sizeof(struct fileIdentDesc));
+ crc = crc_itu_t(crc, fibh->ebh->b_data, fibh->eoffset);
}
cfi->descTag.descCRC = cpu_to_le16(crc);
@@ -149,7 +148,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
struct fileIdentDesc *fi = NULL;
loff_t f_pos;
int block, flen;
- char fname[UDF_NAME_LEN];
+ char *fname = NULL;
char *nameptr;
uint8_t lfi;
uint16_t liu;
@@ -163,12 +162,12 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
size = udf_ext0_offset(dir) + dir->i_size;
f_pos = udf_ext0_offset(dir);
+ fibh->sbh = fibh->ebh = NULL;
fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
- if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
- fibh->sbh = fibh->ebh = NULL;
- else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits,
- &epos, &eloc, &elen, &offset) ==
- (EXT_RECORDED_ALLOCATED >> 30)) {
+ if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+ if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
+ &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
+ goto out_err;
block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
@@ -179,25 +178,19 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
offset = 0;
fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
- if (!fibh->sbh) {
- brelse(epos.bh);
- return NULL;
- }
- } else {
- brelse(epos.bh);
- return NULL;
+ if (!fibh->sbh)
+ goto out_err;
}
+ fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+ if (!fname)
+ goto out_err;
+
while (f_pos < size) {
fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
&elen, &offset);
- if (!fi) {
- if (fibh->sbh != fibh->ebh)
- brelse(fibh->ebh);
- brelse(fibh->sbh);
- brelse(epos.bh);
- return NULL;
- }
+ if (!fi)
+ goto out_err;
liu = le16_to_cpu(cfi->lengthOfImpUse);
lfi = cfi->lengthFileIdent;
@@ -237,53 +230,22 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
if (flen && udf_match(flen, fname, dentry->d_name.len,
- dentry->d_name.name)) {
- brelse(epos.bh);
- return fi;
- }
+ dentry->d_name.name))
+ goto out_ok;
}
+out_err:
+ fi = NULL;
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
+out_ok:
brelse(epos.bh);
+ kfree(fname);
- return NULL;
+ return fi;
}
-/*
- * udf_lookup
- *
- * PURPOSE
- * Look-up the inode for a given name.
- *
- * DESCRIPTION
- * Required - lookup_dentry() will return -ENOTDIR if this routine is not
- * available for a directory. The filesystem is useless if this routine is
- * not available for at least the filesystem's root directory.
- *
- * This routine is passed an incomplete dentry - it must be completed by
- * calling d_add(dentry, inode). If the name does not exist, then the
- * specified inode must be set to null. An error should only be returned
- * when the lookup fails for a reason other than the name not existing.
- * Note that the directory inode semaphore is held during the call.
- *
- * Refer to lookup_dentry() in fs/namei.c
- * lookup_dentry() -> lookup() -> real_lookup() -> .
- *
- * PRE-CONDITIONS
- * dir Pointer to inode of parent directory.
- * dentry Pointer to dentry to complete.
- * nd Pointer to lookup nameidata
- *
- * POST-CONDITIONS
- * <return> Zero on success.
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
- */
-
static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
@@ -336,11 +298,9 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
{
struct super_block *sb = dir->i_sb;
struct fileIdentDesc *fi = NULL;
- char name[UDF_NAME_LEN], fname[UDF_NAME_LEN];
+ char *name = NULL;
int namelen;
loff_t f_pos;
- int flen;
- char *nameptr;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
int nfidlen;
uint8_t lfi;
@@ -352,16 +312,23 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
struct extent_position epos = {};
struct udf_inode_info *dinfo;
+ fibh->sbh = fibh->ebh = NULL;
+ name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+ if (!name) {
+ *err = -ENOMEM;
+ goto out_err;
+ }
+
if (dentry) {
if (!dentry->d_name.len) {
*err = -EINVAL;
- return NULL;
+ goto out_err;
}
namelen = udf_put_filename(sb, dentry->d_name.name, name,
dentry->d_name.len);
if (!namelen) {
*err = -ENAMETOOLONG;
- return NULL;
+ goto out_err;
}
} else {
namelen = 0;
@@ -373,11 +340,14 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
dinfo = UDF_I(dir);
- if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
- fibh->sbh = fibh->ebh = NULL;
- else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits,
- &epos, &eloc, &elen, &offset) ==
- (EXT_RECORDED_ALLOCATED >> 30)) {
+ if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+ if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
+ &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) {
+ block = udf_get_lb_pblock(dir->i_sb,
+ dinfo->i_location, 0);
+ fibh->soffset = fibh->eoffset = sb->s_blocksize;
+ goto add;
+ }
block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
@@ -389,17 +359,11 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
if (!fibh->sbh) {
- brelse(epos.bh);
*err = -EIO;
- return NULL;
+ goto out_err;
}
block = dinfo->i_location.logicalBlockNum;
- } else {
- block = udf_get_lb_pblock(dir->i_sb, dinfo->i_location, 0);
- fibh->sbh = fibh->ebh = NULL;
- fibh->soffset = fibh->eoffset = sb->s_blocksize;
- goto add;
}
while (f_pos < size) {
@@ -407,41 +371,16 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
&elen, &offset);
if (!fi) {
- if (fibh->sbh != fibh->ebh)
- brelse(fibh->ebh);
- brelse(fibh->sbh);
- brelse(epos.bh);
*err = -EIO;
- return NULL;
+ goto out_err;
}
liu = le16_to_cpu(cfi->lengthOfImpUse);
lfi = cfi->lengthFileIdent;
- if (fibh->sbh == fibh->ebh)
- nameptr = fi->fileIdent + liu;
- else {
- int poffset; /* Unpaded ending offset */
-
- poffset = fibh->soffset + sizeof(struct fileIdentDesc) +
- liu + lfi;
-
- if (poffset >= lfi)
- nameptr = (char *)(fibh->ebh->b_data +
- poffset - lfi);
- else {
- nameptr = fname;
- memcpy(nameptr, fi->fileIdent + liu,
- lfi - poffset);
- memcpy(nameptr + lfi - poffset,
- fibh->ebh->b_data, poffset);
- }
- }
-
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
if (((sizeof(struct fileIdentDesc) +
liu + lfi + 3) & ~3) == nfidlen) {
- brelse(epos.bh);
cfi->descTag.tagSerialNum = cpu_to_le16(1);
cfi->fileVersionNum = cpu_to_le16(1);
cfi->fileCharacteristics = 0;
@@ -449,27 +388,13 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
cfi->lengthOfImpUse = cpu_to_le16(0);
if (!udf_write_fi(dir, cfi, fi, fibh, NULL,
name))
- return fi;
+ goto out_ok;
else {
*err = -EIO;
- return NULL;
+ goto out_err;
}
}
}
-
- if (!lfi || !dentry)
- continue;
-
- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
- if (flen && udf_match(flen, fname, dentry->d_name.len,
- dentry->d_name.name)) {
- if (fibh->sbh != fibh->ebh)
- brelse(fibh->ebh);
- brelse(fibh->sbh);
- brelse(epos.bh);
- *err = -EEXIST;
- return NULL;
- }
}
add:
@@ -496,7 +421,7 @@ add:
fibh->sbh = fibh->ebh =
udf_expand_dir_adinicb(dir, &block, err);
if (!fibh->sbh)
- return NULL;
+ goto out_err;
epos.block = dinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(dir);
/* Load extent udf_expand_dir_adinicb() has created */
@@ -537,11 +462,8 @@ add:
dir->i_sb->s_blocksize_bits);
fibh->ebh = udf_bread(dir,
f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
- if (!fibh->ebh) {
- brelse(epos.bh);
- brelse(fibh->sbh);
- return NULL;
- }
+ if (!fibh->ebh)
+ goto out_err;
if (!fibh->soffset) {
if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
@@ -572,20 +494,25 @@ add:
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
- brelse(epos.bh);
dir->i_size += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
dinfo->i_lenAlloc += nfidlen;
mark_inode_dirty(dir);
- return fi;
+ goto out_ok;
} else {
- brelse(epos.bh);
- if (fibh->sbh != fibh->ebh)
- brelse(fibh->ebh);
- brelse(fibh->sbh);
*err = -EIO;
- return NULL;
+ goto out_err;
}
+
+out_err:
+ fi = NULL;
+ if (fibh->sbh != fibh->ebh)
+ brelse(fibh->ebh);
+ brelse(fibh->sbh);
+out_ok:
+ brelse(epos.bh);
+ kfree(name);
+ return fi;
}
static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
@@ -940,7 +867,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
char *ea;
int err;
int block;
- char name[UDF_NAME_LEN];
+ char *name = NULL;
int namelen;
struct buffer_head *bh;
struct udf_inode_info *iinfo;
@@ -950,6 +877,12 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
if (!inode)
goto out;
+ name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+ if (!name) {
+ err = -ENOMEM;
+ goto out_no_entry;
+ }
+
iinfo = UDF_I(inode);
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_data.a_ops = &udf_symlink_aops;
@@ -1089,6 +1022,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
err = 0;
out:
+ kfree(name);
unlock_kernel();
return err;
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index fc533345ab8..63610f026ae 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -24,7 +24,6 @@
#include <linux/fs.h>
#include <linux/string.h>
-#include <linux/udf_fs.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
@@ -55,11 +54,10 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map;
struct udf_virtual_data *vdata;
- struct udf_inode_info *iinfo;
+ struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
map = &sbi->s_partmaps[partition];
vdata = &map->s_type_specific.s_virtual;
- index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
if (block > vdata->s_num_entries) {
udf_debug("Trying to access block beyond end of VAT "
@@ -67,6 +65,12 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
return 0xFFFFFFFF;
}
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data +
+ vdata->s_start_offset))[block]);
+ goto translate;
+ }
+ index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
if (block >= index) {
block -= index;
newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
@@ -89,7 +93,7 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
brelse(bh);
- iinfo = UDF_I(sbi->s_vat_inode);
+translate:
if (iinfo->i_location.partitionReferenceNum == partition) {
udf_debug("recursive call to udf_get_pblock!\n");
return 0xFFFFFFFF;
@@ -263,3 +267,58 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
return 0;
}
+
+static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
+ uint16_t partition, uint32_t offset)
+{
+ struct super_block *sb = inode->i_sb;
+ struct udf_part_map *map;
+ kernel_lb_addr eloc;
+ uint32_t elen;
+ sector_t ext_offset;
+ struct extent_position epos = {};
+ uint32_t phyblock;
+
+ if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
+ (EXT_RECORDED_ALLOCATED >> 30))
+ phyblock = 0xFFFFFFFF;
+ else {
+ map = &UDF_SB(sb)->s_partmaps[partition];
+ /* map to sparable/physical partition desc */
+ phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
+ map->s_partition_num, ext_offset + offset);
+ }
+
+ brelse(epos.bh);
+ return phyblock;
+}
+
+uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
+ uint16_t partition, uint32_t offset)
+{
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct udf_part_map *map;
+ struct udf_meta_data *mdata;
+ uint32_t retblk;
+ struct inode *inode;
+
+ udf_debug("READING from METADATA\n");
+
+ map = &sbi->s_partmaps[partition];
+ mdata = &map->s_type_specific.s_metadata;
+ inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
+
+ /* We shouldn't mount such media... */
+ BUG_ON(!inode);
+ retblk = udf_try_read_meta(inode, block, partition, offset);
+ if (retblk == 0xFFFFFFFF) {
+ udf_warning(sb, __func__, "error reading from METADATA, "
+ "trying to read from MIRROR");
+ inode = mdata->s_mirror_fe;
+ if (!inode)
+ return 0xFFFFFFFF;
+ retblk = udf_try_read_meta(inode, block, partition, offset);
+ }
+
+ return retblk;
+}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index f3ac4abfc94..b564fc140fe 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -55,9 +55,10 @@
#include <linux/errno.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
+#include <linux/bitmap.h>
+#include <linux/crc-itu-t.h>
#include <asm/byteorder.h>
-#include <linux/udf_fs.h>
#include "udf_sb.h"
#include "udf_i.h"
@@ -84,22 +85,19 @@ static void udf_write_super(struct super_block *);
static int udf_remount_fs(struct super_block *, int *, char *);
static int udf_check_valid(struct super_block *, int, int);
static int udf_vrs(struct super_block *sb, int silent);
-static int udf_load_partition(struct super_block *, kernel_lb_addr *);
-static int udf_load_logicalvol(struct super_block *, struct buffer_head *,
- kernel_lb_addr *);
static void udf_load_logicalvolint(struct super_block *, kernel_extent_ad);
static void udf_find_anchor(struct super_block *);
static int udf_find_fileset(struct super_block *, kernel_lb_addr *,
kernel_lb_addr *);
-static void udf_load_pvoldesc(struct super_block *, struct buffer_head *);
static void udf_load_fileset(struct super_block *, struct buffer_head *,
kernel_lb_addr *);
-static int udf_load_partdesc(struct super_block *, struct buffer_head *);
static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
static int udf_statfs(struct dentry *, struct kstatfs *);
static int udf_show_options(struct seq_file *, struct vfsmount *);
+static void udf_error(struct super_block *sb, const char *function,
+ const char *fmt, ...);
struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
{
@@ -587,48 +585,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
return 0;
}
-/*
- * udf_set_blocksize
- *
- * PURPOSE
- * Set the block size to be used in all transfers.
- *
- * DESCRIPTION
- * To allow room for a DMA transfer, it is best to guess big when unsure.
- * This routine picks 2048 bytes as the blocksize when guessing. This
- * should be adequate until devices with larger block sizes become common.
- *
- * Note that the Linux kernel can currently only deal with blocksizes of
- * 512, 1024, 2048, 4096, and 8192 bytes.
- *
- * PRE-CONDITIONS
- * sb Pointer to _locked_ superblock.
- *
- * POST-CONDITIONS
- * sb->s_blocksize Blocksize.
- * sb->s_blocksize_bits log2 of blocksize.
- * <return> 0 Blocksize is valid.
- * <return> 1 Blocksize is invalid.
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
- */
-static int udf_set_blocksize(struct super_block *sb, int bsize)
-{
- if (!sb_min_blocksize(sb, bsize)) {
- udf_debug("Bad block size (%d)\n", bsize);
- printk(KERN_ERR "udf: bad block size (%d)\n", bsize);
- return 0;
- }
-
- return sb->s_blocksize;
-}
-
static int udf_vrs(struct super_block *sb, int silent)
{
struct volStructDesc *vsd = NULL;
- int sector = 32768;
+ loff_t sector = 32768;
int sectorsize;
struct buffer_head *bh = NULL;
int iso9660 = 0;
@@ -649,7 +609,8 @@ static int udf_vrs(struct super_block *sb, int silent)
sector += (sbi->s_session << sb->s_blocksize_bits);
udf_debug("Starting at sector %u (%ld byte sectors)\n",
- (sector >> sb->s_blocksize_bits), sb->s_blocksize);
+ (unsigned int)(sector >> sb->s_blocksize_bits),
+ sb->s_blocksize);
/* Process the sequence (if applicable) */
for (; !nsr02 && !nsr03; sector += sectorsize) {
/* Read a block */
@@ -719,162 +680,140 @@ static int udf_vrs(struct super_block *sb, int silent)
}
/*
- * udf_find_anchor
- *
- * PURPOSE
- * Find an anchor volume descriptor.
- *
- * PRE-CONDITIONS
- * sb Pointer to _locked_ superblock.
- * lastblock Last block on media.
- *
- * POST-CONDITIONS
- * <return> 1 if not found, 0 if ok
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
+ * Check whether there is an anchor block in the given block
*/
-static void udf_find_anchor(struct super_block *sb)
+static int udf_check_anchor_block(struct super_block *sb, sector_t block,
+ bool varconv)
{
- int lastblock;
struct buffer_head *bh = NULL;
+ tag *t;
uint16_t ident;
uint32_t location;
- int i;
- struct udf_sb_info *sbi;
- sbi = UDF_SB(sb);
- lastblock = sbi->s_last_block;
+ if (varconv) {
+ if (udf_fixed_to_variable(block) >=
+ sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
+ return 0;
+ bh = sb_bread(sb, udf_fixed_to_variable(block));
+ }
+ else
+ bh = sb_bread(sb, block);
- if (lastblock) {
- int varlastblock = udf_variable_to_fixed(lastblock);
- int last[] = { lastblock, lastblock - 2,
- lastblock - 150, lastblock - 152,
- varlastblock, varlastblock - 2,
- varlastblock - 150, varlastblock - 152 };
-
- lastblock = 0;
-
- /* Search for an anchor volume descriptor pointer */
-
- /* according to spec, anchor is in either:
- * block 256
- * lastblock-256
- * lastblock
- * however, if the disc isn't closed, it could be 512 */
-
- for (i = 0; !lastblock && i < ARRAY_SIZE(last); i++) {
- ident = location = 0;
- if (last[i] >= 0) {
- bh = sb_bread(sb, last[i]);
- if (bh) {
- tag *t = (tag *)bh->b_data;
- ident = le16_to_cpu(t->tagIdent);
- location = le32_to_cpu(t->tagLocation);
- brelse(bh);
- }
- }
+ if (!bh)
+ return 0;
- if (ident == TAG_IDENT_AVDP) {
- if (location == last[i] - sbi->s_session) {
- lastblock = last[i] - sbi->s_session;
- sbi->s_anchor[0] = lastblock;
- sbi->s_anchor[1] = lastblock - 256;
- } else if (location ==
- udf_variable_to_fixed(last[i]) -
- sbi->s_session) {
- UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
- lastblock =
- udf_variable_to_fixed(last[i]) -
- sbi->s_session;
- sbi->s_anchor[0] = lastblock;
- sbi->s_anchor[1] = lastblock - 256 -
- sbi->s_session;
- } else {
- udf_debug("Anchor found at block %d, "
- "location mismatch %d.\n",
- last[i], location);
- }
- } else if (ident == TAG_IDENT_FE ||
- ident == TAG_IDENT_EFE) {
- lastblock = last[i];
- sbi->s_anchor[3] = 512;
- } else {
- ident = location = 0;
- if (last[i] >= 256) {
- bh = sb_bread(sb, last[i] - 256);
- if (bh) {
- tag *t = (tag *)bh->b_data;
- ident = le16_to_cpu(
- t->tagIdent);
- location = le32_to_cpu(
- t->tagLocation);
- brelse(bh);
- }
- }
+ t = (tag *)bh->b_data;
+ ident = le16_to_cpu(t->tagIdent);
+ location = le32_to_cpu(t->tagLocation);
+ brelse(bh);
+ if (ident != TAG_IDENT_AVDP)
+ return 0;
+ return location == block;
+}
- if (ident == TAG_IDENT_AVDP &&
- location == last[i] - 256 -
- sbi->s_session) {
- lastblock = last[i];
- sbi->s_anchor[1] = last[i] - 256;
- } else {
- ident = location = 0;
- if (last[i] >= 312 + sbi->s_session) {
- bh = sb_bread(sb,
- last[i] - 312 -
- sbi->s_session);
- if (bh) {
- tag *t = (tag *)
- bh->b_data;
- ident = le16_to_cpu(
- t->tagIdent);
- location = le32_to_cpu(
- t->tagLocation);
- brelse(bh);
- }
- }
+/* Search for an anchor volume descriptor pointer */
+static sector_t udf_scan_anchors(struct super_block *sb, bool varconv,
+ sector_t lastblock)
+{
+ sector_t last[6];
+ int i;
+ struct udf_sb_info *sbi = UDF_SB(sb);
- if (ident == TAG_IDENT_AVDP &&
- location == udf_variable_to_fixed(last[i]) - 256) {
- UDF_SET_FLAG(sb,
- UDF_FLAG_VARCONV);
- lastblock = udf_variable_to_fixed(last[i]);
- sbi->s_anchor[1] = lastblock - 256;
- }
- }
- }
+ last[0] = lastblock;
+ last[1] = last[0] - 1;
+ last[2] = last[0] + 1;
+ last[3] = last[0] - 2;
+ last[4] = last[0] - 150;
+ last[5] = last[0] - 152;
+
+ /* according to spec, anchor is in either:
+ * block 256
+ * lastblock-256
+ * lastblock
+ * however, if the disc isn't closed, it could be 512 */
+
+ for (i = 0; i < ARRAY_SIZE(last); i++) {
+ if (last[i] < 0)
+ continue;
+ if (last[i] >= sb->s_bdev->bd_inode->i_size >>
+ sb->s_blocksize_bits)
+ continue;
+
+ if (udf_check_anchor_block(sb, last[i], varconv)) {
+ sbi->s_anchor[0] = last[i];
+ sbi->s_anchor[1] = last[i] - 256;
+ return last[i];
}
- }
- if (!lastblock) {
- /* We haven't found the lastblock. check 312 */
- bh = sb_bread(sb, 312 + sbi->s_session);
- if (bh) {
- tag *t = (tag *)bh->b_data;
- ident = le16_to_cpu(t->tagIdent);
- location = le32_to_cpu(t->tagLocation);
- brelse(bh);
+ if (last[i] < 256)
+ continue;
- if (ident == TAG_IDENT_AVDP && location == 256)
- UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+ if (udf_check_anchor_block(sb, last[i] - 256, varconv)) {
+ sbi->s_anchor[1] = last[i] - 256;
+ return last[i];
}
}
+ if (udf_check_anchor_block(sb, sbi->s_session + 256, varconv)) {
+ sbi->s_anchor[0] = sbi->s_session + 256;
+ return last[0];
+ }
+ if (udf_check_anchor_block(sb, sbi->s_session + 512, varconv)) {
+ sbi->s_anchor[0] = sbi->s_session + 512;
+ return last[0];
+ }
+ return 0;
+}
+
+/*
+ * Find an anchor volume descriptor. The function expects sbi->s_lastblock to
+ * be the last block on the media.
+ *
+ * Return 1 if not found, 0 if ok
+ *
+ */
+static void udf_find_anchor(struct super_block *sb)
+{
+ sector_t lastblock;
+ struct buffer_head *bh = NULL;
+ uint16_t ident;
+ int i;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+
+ lastblock = udf_scan_anchors(sb, 0, sbi->s_last_block);
+ if (lastblock)
+ goto check_anchor;
+
+ /* No anchor found? Try VARCONV conversion of block numbers */
+ /* Firstly, we try to not convert number of the last block */
+ lastblock = udf_scan_anchors(sb, 1,
+ udf_variable_to_fixed(sbi->s_last_block));
+ if (lastblock) {
+ UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+ goto check_anchor;
+ }
+
+ /* Secondly, we try with converted number of the last block */
+ lastblock = udf_scan_anchors(sb, 1, sbi->s_last_block);
+ if (lastblock)
+ UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+
+check_anchor:
+ /*
+ * Check located anchors and the anchor block supplied via
+ * mount options
+ */
for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) {
- if (sbi->s_anchor[i]) {
- bh = udf_read_tagged(sb, sbi->s_anchor[i],
- sbi->s_anchor[i], &ident);
- if (!bh)
+ if (!sbi->s_anchor[i])
+ continue;
+ bh = udf_read_tagged(sb, sbi->s_anchor[i],
+ sbi->s_anchor[i], &ident);
+ if (!bh)
+ sbi->s_anchor[i] = 0;
+ else {
+ brelse(bh);
+ if (ident != TAG_IDENT_AVDP)
sbi->s_anchor[i] = 0;
- else {
- brelse(bh);
- if ((ident != TAG_IDENT_AVDP) &&
- (i || (ident != TAG_IDENT_FE &&
- ident != TAG_IDENT_EFE)))
- sbi->s_anchor[i] = 0;
- }
}
}
@@ -971,27 +910,30 @@ static int udf_find_fileset(struct super_block *sb,
return 1;
}
-static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
+static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
{
struct primaryVolDesc *pvoldesc;
- time_t recording;
- long recording_usec;
struct ustr instr;
struct ustr outstr;
+ struct buffer_head *bh;
+ uint16_t ident;
+
+ bh = udf_read_tagged(sb, block, block, &ident);
+ if (!bh)
+ return 1;
+ BUG_ON(ident != TAG_IDENT_PVD);
pvoldesc = (struct primaryVolDesc *)bh->b_data;
- if (udf_stamp_to_time(&recording, &recording_usec,
- lets_to_cpu(pvoldesc->recordingDateAndTime))) {
- kernel_timestamp ts;
- ts = lets_to_cpu(pvoldesc->recordingDateAndTime);
- udf_debug("recording time %ld/%ld, %04u/%02u/%02u"
+ if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
+ pvoldesc->recordingDateAndTime)) {
+#ifdef UDFFS_DEBUG
+ timestamp *ts = &pvoldesc->recordingDateAndTime;
+ udf_debug("recording time %04u/%02u/%02u"
" %02u:%02u (%x)\n",
- recording, recording_usec,
- ts.year, ts.month, ts.day, ts.hour,
- ts.minute, ts.typeAndTimezone);
- UDF_SB(sb)->s_record_time.tv_sec = recording;
- UDF_SB(sb)->s_record_time.tv_nsec = recording_usec * 1000;
+ le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
+ ts->minute, le16_to_cpu(ts->typeAndTimezone));
+#endif
}
if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32))
@@ -1005,6 +947,104 @@ static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128))
if (udf_CS0toUTF8(&outstr, &instr))
udf_debug("volSetIdent[] = '%s'\n", outstr.u_name);
+
+ brelse(bh);
+ return 0;
+}
+
+static int udf_load_metadata_files(struct super_block *sb, int partition)
+{
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct udf_part_map *map;
+ struct udf_meta_data *mdata;
+ kernel_lb_addr addr;
+ int fe_error = 0;
+
+ map = &sbi->s_partmaps[partition];
+ mdata = &map->s_type_specific.s_metadata;
+
+ /* metadata address */
+ addr.logicalBlockNum = mdata->s_meta_file_loc;
+ addr.partitionReferenceNum = map->s_partition_num;
+
+ udf_debug("Metadata file location: block = %d part = %d\n",
+ addr.logicalBlockNum, addr.partitionReferenceNum);
+
+ mdata->s_metadata_fe = udf_iget(sb, addr);
+
+ if (mdata->s_metadata_fe == NULL) {
+ udf_warning(sb, __func__, "metadata inode efe not found, "
+ "will try mirror inode.");
+ fe_error = 1;
+ } else if (UDF_I(mdata->s_metadata_fe)->i_alloc_type !=
+ ICBTAG_FLAG_AD_SHORT) {
+ udf_warning(sb, __func__, "metadata inode efe does not have "
+ "short allocation descriptors!");
+ fe_error = 1;
+ iput(mdata->s_metadata_fe);
+ mdata->s_metadata_fe = NULL;
+ }
+
+ /* mirror file entry */
+ addr.logicalBlockNum = mdata->s_mirror_file_loc;
+ addr.partitionReferenceNum = map->s_partition_num;
+
+ udf_debug("Mirror metadata file location: block = %d part = %d\n",
+ addr.logicalBlockNum, addr.partitionReferenceNum);
+
+ mdata->s_mirror_fe = udf_iget(sb, addr);
+
+ if (mdata->s_mirror_fe == NULL) {
+ if (fe_error) {
+ udf_error(sb, __func__, "mirror inode efe not found "
+ "and metadata inode is missing too, exiting...");
+ goto error_exit;
+ } else
+ udf_warning(sb, __func__, "mirror inode efe not found,"
+ " but metadata inode is OK");
+ } else if (UDF_I(mdata->s_mirror_fe)->i_alloc_type !=
+ ICBTAG_FLAG_AD_SHORT) {
+ udf_warning(sb, __func__, "mirror inode efe does not have "
+ "short allocation descriptors!");
+ iput(mdata->s_mirror_fe);
+ mdata->s_mirror_fe = NULL;
+ if (fe_error)
+ goto error_exit;
+ }
+
+ /*
+ * bitmap file entry
+ * Note:
+ * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
+ */
+ if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
+ addr.logicalBlockNum = mdata->s_bitmap_file_loc;
+ addr.partitionReferenceNum = map->s_partition_num;
+
+ udf_debug("Bitmap file location: block = %d part = %d\n",
+ addr.logicalBlockNum, addr.partitionReferenceNum);
+
+ mdata->s_bitmap_fe = udf_iget(sb, addr);
+
+ if (mdata->s_bitmap_fe == NULL) {
+ if (sb->s_flags & MS_RDONLY)
+ udf_warning(sb, __func__, "bitmap inode efe "
+ "not found but it's ok since the disc"
+ " is mounted read-only");
+ else {
+ udf_error(sb, __func__, "bitmap inode efe not "
+ "found and attempted read-write mount");
+ goto error_exit;
+ }
+ }
+ }
+
+ udf_debug("udf_load_metadata_files Ok\n");
+
+ return 0;
+
+error_exit:
+ return 1;
}
static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
@@ -1025,10 +1065,9 @@ static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
int udf_compute_nr_groups(struct super_block *sb, u32 partition)
{
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
- return (map->s_partition_len +
- (sizeof(struct spaceBitmapDesc) << 3) +
- (sb->s_blocksize * 8) - 1) /
- (sb->s_blocksize * 8);
+ return DIV_ROUND_UP(map->s_partition_len +
+ (sizeof(struct spaceBitmapDesc) << 3),
+ sb->s_blocksize * 8);
}
static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
@@ -1059,134 +1098,241 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
return bitmap;
}
-static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
+static int udf_fill_partdesc_info(struct super_block *sb,
+ struct partitionDesc *p, int p_index)
+{
+ struct udf_part_map *map;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct partitionHeaderDesc *phd;
+
+ map = &sbi->s_partmaps[p_index];
+
+ map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
+ map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
+
+ if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
+ map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
+ if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
+ map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
+ if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
+ map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
+ if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
+ map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
+
+ udf_debug("Partition (%d type %x) starts at physical %d, "
+ "block length %d\n", p_index,
+ map->s_partition_type, map->s_partition_root,
+ map->s_partition_len);
+
+ if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
+ strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
+ return 0;
+
+ phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
+ if (phd->unallocSpaceTable.extLength) {
+ kernel_lb_addr loc = {
+ .logicalBlockNum = le32_to_cpu(
+ phd->unallocSpaceTable.extPosition),
+ .partitionReferenceNum = p_index,
+ };
+
+ map->s_uspace.s_table = udf_iget(sb, loc);
+ if (!map->s_uspace.s_table) {
+ udf_debug("cannot load unallocSpaceTable (part %d)\n",
+ p_index);
+ return 1;
+ }
+ map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
+ udf_debug("unallocSpaceTable (part %d) @ %ld\n",
+ p_index, map->s_uspace.s_table->i_ino);
+ }
+
+ if (phd->unallocSpaceBitmap.extLength) {
+ struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
+ if (!bitmap)
+ return 1;
+ map->s_uspace.s_bitmap = bitmap;
+ bitmap->s_extLength = le32_to_cpu(
+ phd->unallocSpaceBitmap.extLength);
+ bitmap->s_extPosition = le32_to_cpu(
+ phd->unallocSpaceBitmap.extPosition);
+ map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
+ udf_debug("unallocSpaceBitmap (part %d) @ %d\n", p_index,
+ bitmap->s_extPosition);
+ }
+
+ if (phd->partitionIntegrityTable.extLength)
+ udf_debug("partitionIntegrityTable (part %d)\n", p_index);
+
+ if (phd->freedSpaceTable.extLength) {
+ kernel_lb_addr loc = {
+ .logicalBlockNum = le32_to_cpu(
+ phd->freedSpaceTable.extPosition),
+ .partitionReferenceNum = p_index,
+ };
+
+ map->s_fspace.s_table = udf_iget(sb, loc);
+ if (!map->s_fspace.s_table) {
+ udf_debug("cannot load freedSpaceTable (part %d)\n",
+ p_index);
+ return 1;
+ }
+
+ map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
+ udf_debug("freedSpaceTable (part %d) @ %ld\n",
+ p_index, map->s_fspace.s_table->i_ino);
+ }
+
+ if (phd->freedSpaceBitmap.extLength) {
+ struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
+ if (!bitmap)
+ return 1;
+ map->s_fspace.s_bitmap = bitmap;
+ bitmap->s_extLength = le32_to_cpu(
+ phd->freedSpaceBitmap.extLength);
+ bitmap->s_extPosition = le32_to_cpu(
+ phd->freedSpaceBitmap.extPosition);
+ map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
+ udf_debug("freedSpaceBitmap (part %d) @ %d\n", p_index,
+ bitmap->s_extPosition);
+ }
+ return 0;
+}
+
+static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
+{
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct udf_part_map *map = &sbi->s_partmaps[p_index];
+ kernel_lb_addr ino;
+ struct buffer_head *bh = NULL;
+ struct udf_inode_info *vati;
+ uint32_t pos;
+ struct virtualAllocationTable20 *vat20;
+
+ /* VAT file entry is in the last recorded block */
+ ino.partitionReferenceNum = type1_index;
+ ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
+ sbi->s_vat_inode = udf_iget(sb, ino);
+ if (!sbi->s_vat_inode)
+ return 1;
+
+ if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
+ map->s_type_specific.s_virtual.s_start_offset = 0;
+ map->s_type_specific.s_virtual.s_num_entries =
+ (sbi->s_vat_inode->i_size - 36) >> 2;
+ } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
+ vati = UDF_I(sbi->s_vat_inode);
+ if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+ pos = udf_block_map(sbi->s_vat_inode, 0);
+ bh = sb_bread(sb, pos);
+ if (!bh)
+ return 1;
+ vat20 = (struct virtualAllocationTable20 *)bh->b_data;
+ } else {
+ vat20 = (struct virtualAllocationTable20 *)
+ vati->i_ext.i_data;
+ }
+
+ map->s_type_specific.s_virtual.s_start_offset =
+ le16_to_cpu(vat20->lengthHeader);
+ map->s_type_specific.s_virtual.s_num_entries =
+ (sbi->s_vat_inode->i_size -
+ map->s_type_specific.s_virtual.
+ s_start_offset) >> 2;
+ brelse(bh);
+ }
+ return 0;
+}
+
+static int udf_load_partdesc(struct super_block *sb, sector_t block)
{
+ struct buffer_head *bh;
struct partitionDesc *p;
- int i;
struct udf_part_map *map;
- struct udf_sb_info *sbi;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ int i, type1_idx;
+ uint16_t partitionNumber;
+ uint16_t ident;
+ int ret = 0;
+
+ bh = udf_read_tagged(sb, block, block, &ident);
+ if (!bh)
+ return 1;
+ if (ident != TAG_IDENT_PD)
+ goto out_bh;
p = (struct partitionDesc *)bh->b_data;
- sbi = UDF_SB(sb);
+ partitionNumber = le16_to_cpu(p->partitionNumber);
+ /* First scan for TYPE1, SPARABLE and METADATA partitions */
for (i = 0; i < sbi->s_partitions; i++) {
map = &sbi->s_partmaps[i];
udf_debug("Searching map: (%d == %d)\n",
- map->s_partition_num,
- le16_to_cpu(p->partitionNumber));
- if (map->s_partition_num ==
- le16_to_cpu(p->partitionNumber)) {
- map->s_partition_len =
- le32_to_cpu(p->partitionLength); /* blocks */
- map->s_partition_root =
- le32_to_cpu(p->partitionStartingLocation);
- if (p->accessType ==
- cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
- map->s_partition_flags |=
- UDF_PART_FLAG_READ_ONLY;
- if (p->accessType ==
- cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
- map->s_partition_flags |=
- UDF_PART_FLAG_WRITE_ONCE;
- if (p->accessType ==
- cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
- map->s_partition_flags |=
- UDF_PART_FLAG_REWRITABLE;
- if (p->accessType ==
- cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
- map->s_partition_flags |=
- UDF_PART_FLAG_OVERWRITABLE;
-
- if (!strcmp(p->partitionContents.ident,
- PD_PARTITION_CONTENTS_NSR02) ||
- !strcmp(p->partitionContents.ident,
- PD_PARTITION_CONTENTS_NSR03)) {
- struct partitionHeaderDesc *phd;
-
- phd = (struct partitionHeaderDesc *)
- (p->partitionContentsUse);
- if (phd->unallocSpaceTable.extLength) {
- kernel_lb_addr loc = {
- .logicalBlockNum = le32_to_cpu(phd->unallocSpaceTable.extPosition),
- .partitionReferenceNum = i,
- };
-
- map->s_uspace.s_table =
- udf_iget(sb, loc);
- if (!map->s_uspace.s_table) {
- udf_debug("cannot load unallocSpaceTable (part %d)\n", i);
- return 1;
- }
- map->s_partition_flags |=
- UDF_PART_FLAG_UNALLOC_TABLE;
- udf_debug("unallocSpaceTable (part %d) @ %ld\n",
- i, map->s_uspace.s_table->i_ino);
- }
- if (phd->unallocSpaceBitmap.extLength) {
- struct udf_bitmap *bitmap =
- udf_sb_alloc_bitmap(sb, i);
- map->s_uspace.s_bitmap = bitmap;
- if (bitmap != NULL) {
- bitmap->s_extLength =
- le32_to_cpu(phd->unallocSpaceBitmap.extLength);
- bitmap->s_extPosition =
- le32_to_cpu(phd->unallocSpaceBitmap.extPosition);
- map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
- udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
- i, bitmap->s_extPosition);
- }
- }
- if (phd->partitionIntegrityTable.extLength)
- udf_debug("partitionIntegrityTable (part %d)\n", i);
- if (phd->freedSpaceTable.extLength) {
- kernel_lb_addr loc = {
- .logicalBlockNum = le32_to_cpu(phd->freedSpaceTable.extPosition),
- .partitionReferenceNum = i,
- };
-
- map->s_fspace.s_table =
- udf_iget(sb, loc);
- if (!map->s_fspace.s_table) {
- udf_debug("cannot load freedSpaceTable (part %d)\n", i);
- return 1;
- }
- map->s_partition_flags |=
- UDF_PART_FLAG_FREED_TABLE;
- udf_debug("freedSpaceTable (part %d) @ %ld\n",
- i, map->s_fspace.s_table->i_ino);
- }
- if (phd->freedSpaceBitmap.extLength) {
- struct udf_bitmap *bitmap =
- udf_sb_alloc_bitmap(sb, i);
- map->s_fspace.s_bitmap = bitmap;
- if (bitmap != NULL) {
- bitmap->s_extLength =
- le32_to_cpu(phd->freedSpaceBitmap.extLength);
- bitmap->s_extPosition =
- le32_to_cpu(phd->freedSpaceBitmap.extPosition);
- map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
- udf_debug("freedSpaceBitmap (part %d) @ %d\n",
- i, bitmap->s_extPosition);
- }
- }
- }
+ map->s_partition_num, partitionNumber);
+ if (map->s_partition_num == partitionNumber &&
+ (map->s_partition_type == UDF_TYPE1_MAP15 ||
+ map->s_partition_type == UDF_SPARABLE_MAP15))
break;
- }
}
- if (i == sbi->s_partitions)
+
+ if (i >= sbi->s_partitions) {
udf_debug("Partition (%d) not found in partition map\n",
- le16_to_cpu(p->partitionNumber));
- else
- udf_debug("Partition (%d:%d type %x) starts at physical %d, "
- "block length %d\n",
- le16_to_cpu(p->partitionNumber), i,
- map->s_partition_type,
- map->s_partition_root,
- map->s_partition_len);
- return 0;
+ partitionNumber);
+ goto out_bh;
+ }
+
+ ret = udf_fill_partdesc_info(sb, p, i);
+
+ /*
+ * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
+ * PHYSICAL partitions are already set up
+ */
+ type1_idx = i;
+ for (i = 0; i < sbi->s_partitions; i++) {
+ map = &sbi->s_partmaps[i];
+
+ if (map->s_partition_num == partitionNumber &&
+ (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
+ map->s_partition_type == UDF_VIRTUAL_MAP20 ||
+ map->s_partition_type == UDF_METADATA_MAP25))
+ break;
+ }
+
+ if (i >= sbi->s_partitions)
+ goto out_bh;
+
+ ret = udf_fill_partdesc_info(sb, p, i);
+ if (ret)
+ goto out_bh;
+
+ if (map->s_partition_type == UDF_METADATA_MAP25) {
+ ret = udf_load_metadata_files(sb, i);
+ if (ret) {
+ printk(KERN_ERR "UDF-fs: error loading MetaData "
+ "partition map %d\n", i);
+ goto out_bh;
+ }
+ } else {
+ ret = udf_load_vat(sb, i, type1_idx);
+ if (ret)
+ goto out_bh;
+ /*
+ * Mark filesystem read-only if we have a partition with
+ * virtual map since we don't handle writing to it (we
+ * overwrite blocks instead of relocating them).
+ */
+ sb->s_flags |= MS_RDONLY;
+ printk(KERN_NOTICE "UDF-fs: Filesystem marked read-only "
+ "because writing to pseudooverwrite partition is "
+ "not implemented.\n");
+ }
+out_bh:
+ /* In case loading failed, we handle cleanup in udf_fill_super */
+ brelse(bh);
+ return ret;
}
-static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
+static int udf_load_logicalvol(struct super_block *sb, sector_t block,
kernel_lb_addr *fileset)
{
struct logicalVolDesc *lvd;
@@ -1194,12 +1340,21 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
uint8_t type;
struct udf_sb_info *sbi = UDF_SB(sb);
struct genericPartitionMap *gpm;
+ uint16_t ident;
+ struct buffer_head *bh;
+ int ret = 0;
+ bh = udf_read_tagged(sb, block, block, &ident);
+ if (!bh)
+ return 1;
+ BUG_ON(ident != TAG_IDENT_LVD);
lvd = (struct logicalVolDesc *)bh->b_data;
i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
- if (i != 0)
- return i;
+ if (i != 0) {
+ ret = i;
+ goto out_bh;
+ }
for (i = 0, offset = 0;
i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
@@ -1223,12 +1378,12 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
u16 suf =
le16_to_cpu(((__le16 *)upm2->partIdent.
identSuffix)[0]);
- if (suf == 0x0150) {
+ if (suf < 0x0200) {
map->s_partition_type =
UDF_VIRTUAL_MAP15;
map->s_partition_func =
udf_get_pblock_virt15;
- } else if (suf == 0x0200) {
+ } else {
map->s_partition_type =
UDF_VIRTUAL_MAP20;
map->s_partition_func =
@@ -1238,7 +1393,6 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
UDF_ID_SPARABLE,
strlen(UDF_ID_SPARABLE))) {
uint32_t loc;
- uint16_t ident;
struct sparingTable *st;
struct sparablePartitionMap *spm =
(struct sparablePartitionMap *)gpm;
@@ -1256,22 +1410,64 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
map->s_type_specific.s_sparing.
s_spar_map[j] = bh2;
- if (bh2 != NULL) {
- st = (struct sparingTable *)
- bh2->b_data;
- if (ident != 0 || strncmp(
- st->sparingIdent.ident,
- UDF_ID_SPARING,
- strlen(UDF_ID_SPARING))) {
- brelse(bh2);
- map->s_type_specific.
- s_sparing.
- s_spar_map[j] =
- NULL;
- }
+ if (bh2 == NULL)
+ continue;
+
+ st = (struct sparingTable *)bh2->b_data;
+ if (ident != 0 || strncmp(
+ st->sparingIdent.ident,
+ UDF_ID_SPARING,
+ strlen(UDF_ID_SPARING))) {
+ brelse(bh2);
+ map->s_type_specific.s_sparing.
+ s_spar_map[j] = NULL;
}
}
map->s_partition_func = udf_get_pblock_spar15;
+ } else if (!strncmp(upm2->partIdent.ident,
+ UDF_ID_METADATA,
+ strlen(UDF_ID_METADATA))) {
+ struct udf_meta_data *mdata =
+ &map->s_type_specific.s_metadata;
+ struct metadataPartitionMap *mdm =
+ (struct metadataPartitionMap *)
+ &(lvd->partitionMaps[offset]);
+ udf_debug("Parsing Logical vol part %d "
+ "type %d id=%s\n", i, type,
+ UDF_ID_METADATA);
+
+ map->s_partition_type = UDF_METADATA_MAP25;
+ map->s_partition_func = udf_get_pblock_meta25;
+
+ mdata->s_meta_file_loc =
+ le32_to_cpu(mdm->metadataFileLoc);
+ mdata->s_mirror_file_loc =
+ le32_to_cpu(mdm->metadataMirrorFileLoc);
+ mdata->s_bitmap_file_loc =
+ le32_to_cpu(mdm->metadataBitmapFileLoc);
+ mdata->s_alloc_unit_size =
+ le32_to_cpu(mdm->allocUnitSize);
+ mdata->s_align_unit_size =
+ le16_to_cpu(mdm->alignUnitSize);
+ mdata->s_dup_md_flag =
+ mdm->flags & 0x01;
+
+ udf_debug("Metadata Ident suffix=0x%x\n",
+ (le16_to_cpu(
+ ((__le16 *)
+ mdm->partIdent.identSuffix)[0])));
+ udf_debug("Metadata part num=%d\n",
+ le16_to_cpu(mdm->partitionNum));
+ udf_debug("Metadata part alloc unit size=%d\n",
+ le32_to_cpu(mdm->allocUnitSize));
+ udf_debug("Metadata file loc=%d\n",
+ le32_to_cpu(mdm->metadataFileLoc));
+ udf_debug("Mirror file loc=%d\n",
+ le32_to_cpu(mdm->metadataMirrorFileLoc));
+ udf_debug("Bitmap file loc=%d\n",
+ le32_to_cpu(mdm->metadataBitmapFileLoc));
+ udf_debug("Duplicate Flag: %d %d\n",
+ mdata->s_dup_md_flag, mdm->flags);
} else {
udf_debug("Unknown ident: %s\n",
upm2->partIdent.ident);
@@ -1296,7 +1492,9 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
if (lvd->integritySeqExt.extLength)
udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
- return 0;
+out_bh:
+ brelse(bh);
+ return ret;
}
/*
@@ -1345,7 +1543,7 @@ static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
-static int udf_process_sequence(struct super_block *sb, long block,
+static noinline int udf_process_sequence(struct super_block *sb, long block,
long lastblock, kernel_lb_addr *fileset)
{
struct buffer_head *bh = NULL;
@@ -1354,19 +1552,25 @@ static int udf_process_sequence(struct super_block *sb, long block,
struct generic_desc *gd;
struct volDescPtr *vdp;
int done = 0;
- int i, j;
uint32_t vdsn;
uint16_t ident;
long next_s = 0, next_e = 0;
memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
- /* Read the main descriptor sequence */
+ /*
+ * Read the main descriptor sequence and find which descriptors
+ * are in it.
+ */
for (; (!done && block <= lastblock); block++) {
bh = udf_read_tagged(sb, block, block, &ident);
- if (!bh)
- break;
+ if (!bh) {
+ printk(KERN_ERR "udf: Block %Lu of volume descriptor "
+ "sequence is corrupted or we could not read "
+ "it.\n", (unsigned long long)block);
+ return 1;
+ }
/* Process each descriptor (ISO 13346 3/8.3-8.4) */
gd = (struct generic_desc *)bh->b_data;
@@ -1432,41 +1636,31 @@ static int udf_process_sequence(struct super_block *sb, long block,
}
brelse(bh);
}
- for (i = 0; i < VDS_POS_LENGTH; i++) {
- if (vds[i].block) {
- bh = udf_read_tagged(sb, vds[i].block, vds[i].block,
- &ident);
-
- if (i == VDS_POS_PRIMARY_VOL_DESC) {
- udf_load_pvoldesc(sb, bh);
- } else if (i == VDS_POS_LOGICAL_VOL_DESC) {
- if (udf_load_logicalvol(sb, bh, fileset)) {
- brelse(bh);
- return 1;
- }
- } else if (i == VDS_POS_PARTITION_DESC) {
- struct buffer_head *bh2 = NULL;
- if (udf_load_partdesc(sb, bh)) {
- brelse(bh);
- return 1;
- }
- for (j = vds[i].block + 1;
- j < vds[VDS_POS_TERMINATING_DESC].block;
- j++) {
- bh2 = udf_read_tagged(sb, j, j, &ident);
- gd = (struct generic_desc *)bh2->b_data;
- if (ident == TAG_IDENT_PD)
- if (udf_load_partdesc(sb,
- bh2)) {
- brelse(bh);
- brelse(bh2);
- return 1;
- }
- brelse(bh2);
- }
- }
- brelse(bh);
- }
+ /*
+ * Now read interesting descriptors again and process them
+ * in a suitable order
+ */
+ if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
+ printk(KERN_ERR "udf: Primary Volume Descriptor not found!\n");
+ return 1;
+ }
+ if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
+ return 1;
+
+ if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
+ vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
+ return 1;
+
+ if (vds[VDS_POS_PARTITION_DESC].block) {
+ /*
+ * We rescan the whole descriptor sequence to find
+ * partition descriptor blocks and process them.
+ */
+ for (block = vds[VDS_POS_PARTITION_DESC].block;
+ block < vds[VDS_POS_TERMINATING_DESC].block;
+ block++)
+ if (udf_load_partdesc(sb, block))
+ return 1;
}
return 0;
@@ -1478,6 +1672,7 @@ static int udf_process_sequence(struct super_block *sb, long block,
static int udf_check_valid(struct super_block *sb, int novrs, int silent)
{
long block;
+ struct udf_sb_info *sbi = UDF_SB(sb);
if (novrs) {
udf_debug("Validity check skipped because of novrs option\n");
@@ -1485,27 +1680,22 @@ static int udf_check_valid(struct super_block *sb, int novrs, int silent)
}
/* Check that it is NSR02 compliant */
/* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
- else {
- block = udf_vrs(sb, silent);
- if (block == -1) {
- struct udf_sb_info *sbi = UDF_SB(sb);
- udf_debug("Failed to read byte 32768. Assuming open "
- "disc. Skipping validity check\n");
- if (!sbi->s_last_block)
- sbi->s_last_block = udf_get_last_block(sb);
- return 0;
- } else
- return !block;
- }
+ block = udf_vrs(sb, silent);
+ if (block == -1)
+ udf_debug("Failed to read byte 32768. Assuming open "
+ "disc. Skipping validity check\n");
+ if (block && !sbi->s_last_block)
+ sbi->s_last_block = udf_get_last_block(sb);
+ return !block;
}
-static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
+static int udf_load_sequence(struct super_block *sb, kernel_lb_addr *fileset)
{
struct anchorVolDescPtr *anchor;
uint16_t ident;
struct buffer_head *bh;
long main_s, main_e, reserve_s, reserve_e;
- int i, j;
+ int i;
struct udf_sb_info *sbi;
if (!sb)
@@ -1515,6 +1705,7 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) {
if (!sbi->s_anchor[i])
continue;
+
bh = udf_read_tagged(sb, sbi->s_anchor[i], sbi->s_anchor[i],
&ident);
if (!bh)
@@ -1553,76 +1744,6 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
}
udf_debug("Using anchor in block %d\n", sbi->s_anchor[i]);
- for (i = 0; i < sbi->s_partitions; i++) {
- kernel_lb_addr uninitialized_var(ino);
- struct udf_part_map *map = &sbi->s_partmaps[i];
- switch (map->s_partition_type) {
- case UDF_VIRTUAL_MAP15:
- case UDF_VIRTUAL_MAP20:
- if (!sbi->s_last_block) {
- sbi->s_last_block = udf_get_last_block(sb);
- udf_find_anchor(sb);
- }
-
- if (!sbi->s_last_block) {
- udf_debug("Unable to determine Lastblock (For "
- "Virtual Partition)\n");
- return 1;
- }
-
- for (j = 0; j < sbi->s_partitions; j++) {
- struct udf_part_map *map2 = &sbi->s_partmaps[j];
- if (j != i &&
- map->s_volumeseqnum ==
- map2->s_volumeseqnum &&
- map->s_partition_num ==
- map2->s_partition_num) {
- ino.partitionReferenceNum = j;
- ino.logicalBlockNum =
- sbi->s_last_block -
- map2->s_partition_root;
- break;
- }
- }
-
- if (j == sbi->s_partitions)
- return 1;
-
- sbi->s_vat_inode = udf_iget(sb, ino);
- if (!sbi->s_vat_inode)
- return 1;
-
- if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
- map->s_type_specific.s_virtual.s_start_offset =
- udf_ext0_offset(sbi->s_vat_inode);
- map->s_type_specific.s_virtual.s_num_entries =
- (sbi->s_vat_inode->i_size - 36) >> 2;
- } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
- uint32_t pos;
- struct virtualAllocationTable20 *vat20;
-
- pos = udf_block_map(sbi->s_vat_inode, 0);
- bh = sb_bread(sb, pos);
- if (!bh)
- return 1;
- vat20 = (struct virtualAllocationTable20 *)
- bh->b_data +
- udf_ext0_offset(sbi->s_vat_inode);
- map->s_type_specific.s_virtual.s_start_offset =
- le16_to_cpu(vat20->lengthHeader) +
- udf_ext0_offset(sbi->s_vat_inode);
- map->s_type_specific.s_virtual.s_num_entries =
- (sbi->s_vat_inode->i_size -
- map->s_type_specific.s_virtual.
- s_start_offset) >> 2;
- brelse(bh);
- }
- map->s_partition_root = udf_get_pblock(sb, 0, i, 0);
- map->s_partition_len =
- sbi->s_partmaps[ino.partitionReferenceNum].
- s_partition_len;
- }
- }
return 0;
}
@@ -1630,65 +1751,61 @@ static void udf_open_lvid(struct super_block *sb)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct buffer_head *bh = sbi->s_lvid_bh;
- if (bh) {
- kernel_timestamp cpu_time;
- struct logicalVolIntegrityDesc *lvid =
- (struct logicalVolIntegrityDesc *)bh->b_data;
- struct logicalVolIntegrityDescImpUse *lvidiu =
- udf_sb_lvidiu(sbi);
+ struct logicalVolIntegrityDesc *lvid;
+ struct logicalVolIntegrityDescImpUse *lvidiu;
+ if (!bh)
+ return;
- lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
- lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
- if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
- lvid->recordingDateAndTime = cpu_to_lets(cpu_time);
- lvid->integrityType = LVID_INTEGRITY_TYPE_OPEN;
+ lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+ lvidiu = udf_sb_lvidiu(sbi);
- lvid->descTag.descCRC = cpu_to_le16(
- udf_crc((char *)lvid + sizeof(tag),
- le16_to_cpu(lvid->descTag.descCRCLength),
- 0));
+ lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
+ lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
+ udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
+ CURRENT_TIME);
+ lvid->integrityType = LVID_INTEGRITY_TYPE_OPEN;
- lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
- mark_buffer_dirty(bh);
- }
+ lvid->descTag.descCRC = cpu_to_le16(
+ crc_itu_t(0, (char *)lvid + sizeof(tag),
+ le16_to_cpu(lvid->descTag.descCRCLength)));
+
+ lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+ mark_buffer_dirty(bh);
}
static void udf_close_lvid(struct super_block *sb)
{
- kernel_timestamp cpu_time;
struct udf_sb_info *sbi = UDF_SB(sb);
struct buffer_head *bh = sbi->s_lvid_bh;
struct logicalVolIntegrityDesc *lvid;
+ struct logicalVolIntegrityDescImpUse *lvidiu;
if (!bh)
return;
lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
- if (lvid->integrityType == LVID_INTEGRITY_TYPE_OPEN) {
- struct logicalVolIntegrityDescImpUse *lvidiu =
- udf_sb_lvidiu(sbi);
- lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
- lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
- if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
- lvid->recordingDateAndTime = cpu_to_lets(cpu_time);
- if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
- lvidiu->maxUDFWriteRev =
- cpu_to_le16(UDF_MAX_WRITE_VERSION);
- if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
- lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
- if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
- lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
- lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
-
- lvid->descTag.descCRC = cpu_to_le16(
- udf_crc((char *)lvid + sizeof(tag),
- le16_to_cpu(lvid->descTag.descCRCLength),
- 0));
-
- lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
- mark_buffer_dirty(bh);
- }
+ if (lvid->integrityType != LVID_INTEGRITY_TYPE_OPEN)
+ return;
+
+ lvidiu = udf_sb_lvidiu(sbi);
+ lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
+ lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
+ udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
+ if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
+ lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
+ if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
+ lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
+ if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
+ lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
+ lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
+
+ lvid->descTag.descCRC = cpu_to_le16(
+ crc_itu_t(0, (char *)lvid + sizeof(tag),
+ le16_to_cpu(lvid->descTag.descCRCLength)));
+
+ lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+ mark_buffer_dirty(bh);
}
static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
@@ -1708,22 +1825,35 @@ static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
vfree(bitmap);
}
-/*
- * udf_read_super
- *
- * PURPOSE
- * Complete the specified super block.
- *
- * PRE-CONDITIONS
- * sb Pointer to superblock to complete - never NULL.
- * sb->s_dev Device to read suberblock from.
- * options Pointer to mount options.
- * silent Silent flag.
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
- */
+static void udf_free_partition(struct udf_part_map *map)
+{
+ int i;
+ struct udf_meta_data *mdata;
+
+ if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
+ iput(map->s_uspace.s_table);
+ if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
+ iput(map->s_fspace.s_table);
+ if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
+ udf_sb_free_bitmap(map->s_uspace.s_bitmap);
+ if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
+ udf_sb_free_bitmap(map->s_fspace.s_bitmap);
+ if (map->s_partition_type == UDF_SPARABLE_MAP15)
+ for (i = 0; i < 4; i++)
+ brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
+ else if (map->s_partition_type == UDF_METADATA_MAP25) {
+ mdata = &map->s_type_specific.s_metadata;
+ iput(mdata->s_metadata_fe);
+ mdata->s_metadata_fe = NULL;
+
+ iput(mdata->s_mirror_fe);
+ mdata->s_mirror_fe = NULL;
+
+ iput(mdata->s_bitmap_fe);
+ mdata->s_bitmap_fe = NULL;
+ }
+}
+
static int udf_fill_super(struct super_block *sb, void *options, int silent)
{
int i;
@@ -1776,8 +1906,11 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sbi->s_nls_map = uopt.nls_map;
/* Set the block size for all transfers */
- if (!udf_set_blocksize(sb, uopt.blocksize))
+ if (!sb_min_blocksize(sb, uopt.blocksize)) {
+ udf_debug("Bad block size (%d)\n", uopt.blocksize);
+ printk(KERN_ERR "udf: bad block size (%d)\n", uopt.blocksize);
goto error_out;
+ }
if (uopt.session == 0xFFFFFFFF)
sbi->s_session = udf_get_last_session(sb);
@@ -1789,7 +1922,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sbi->s_last_block = uopt.lastblock;
sbi->s_anchor[0] = sbi->s_anchor[1] = 0;
sbi->s_anchor[2] = uopt.anchor;
- sbi->s_anchor[3] = 256;
if (udf_check_valid(sb, uopt.novrs, silent)) {
/* read volume recognition sequences */
@@ -1806,7 +1938,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sb->s_magic = UDF_SUPER_MAGIC;
sb->s_time_gran = 1000;
- if (udf_load_partition(sb, &fileset)) {
+ if (udf_load_sequence(sb, &fileset)) {
printk(KERN_WARNING "UDF-fs: No partition found (1)\n");
goto error_out;
}
@@ -1856,12 +1988,12 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
}
if (!silent) {
- kernel_timestamp ts;
- udf_time_to_stamp(&ts, sbi->s_record_time);
+ timestamp ts;
+ udf_time_to_disk_stamp(&ts, sbi->s_record_time);
udf_info("UDF: Mounting volume '%s', "
"timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
- sbi->s_volume_ident, ts.year, ts.month, ts.day,
- ts.hour, ts.minute, ts.typeAndTimezone);
+ sbi->s_volume_ident, le16_to_cpu(ts.year), ts.month, ts.day,
+ ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
}
if (!(sb->s_flags & MS_RDONLY))
udf_open_lvid(sb);
@@ -1890,21 +2022,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
error_out:
if (sbi->s_vat_inode)
iput(sbi->s_vat_inode);
- if (sbi->s_partitions) {
- struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition];
- if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
- iput(map->s_uspace.s_table);
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
- iput(map->s_fspace.s_table);
- if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
- udf_sb_free_bitmap(map->s_uspace.s_bitmap);
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
- udf_sb_free_bitmap(map->s_fspace.s_bitmap);
- if (map->s_partition_type == UDF_SPARABLE_MAP15)
- for (i = 0; i < 4; i++)
- brelse(map->s_type_specific.s_sparing.
- s_spar_map[i]);
- }
+ if (sbi->s_partitions)
+ for (i = 0; i < sbi->s_partitions; i++)
+ udf_free_partition(&sbi->s_partmaps[i]);
#ifdef CONFIG_UDF_NLS
if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
unload_nls(sbi->s_nls_map);
@@ -1920,8 +2040,8 @@ error_out:
return -EINVAL;
}
-void udf_error(struct super_block *sb, const char *function,
- const char *fmt, ...)
+static void udf_error(struct super_block *sb, const char *function,
+ const char *fmt, ...)
{
va_list args;
@@ -1948,19 +2068,6 @@ void udf_warning(struct super_block *sb, const char *function,
sb->s_id, function, error_buf);
}
-/*
- * udf_put_super
- *
- * PURPOSE
- * Prepare for destruction of the superblock.
- *
- * DESCRIPTION
- * Called before the filesystem is unmounted.
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
- */
static void udf_put_super(struct super_block *sb)
{
int i;
@@ -1969,21 +2076,9 @@ static void udf_put_super(struct super_block *sb)
sbi = UDF_SB(sb);
if (sbi->s_vat_inode)
iput(sbi->s_vat_inode);
- if (sbi->s_partitions) {
- struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition];
- if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
- iput(map->s_uspace.s_table);
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
- iput(map->s_fspace.s_table);
- if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
- udf_sb_free_bitmap(map->s_uspace.s_bitmap);
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
- udf_sb_free_bitmap(map->s_fspace.s_bitmap);
- if (map->s_partition_type == UDF_SPARABLE_MAP15)
- for (i = 0; i < 4; i++)
- brelse(map->s_type_specific.s_sparing.
- s_spar_map[i]);
- }
+ if (sbi->s_partitions)
+ for (i = 0; i < sbi->s_partitions; i++)
+ udf_free_partition(&sbi->s_partmaps[i]);
#ifdef CONFIG_UDF_NLS
if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
unload_nls(sbi->s_nls_map);
@@ -1996,19 +2091,6 @@ static void udf_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
}
-/*
- * udf_stat_fs
- *
- * PURPOSE
- * Return info about the filesystem.
- *
- * DESCRIPTION
- * Called by sys_statfs()
- *
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
- */
static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
@@ -2035,10 +2117,6 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static unsigned char udf_bitmap_lookup[16] = {
- 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
-};
-
static unsigned int udf_count_free_bitmap(struct super_block *sb,
struct udf_bitmap *bitmap)
{
@@ -2048,7 +2126,6 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
int block = 0, newblock;
kernel_lb_addr loc;
uint32_t bytes;
- uint8_t value;
uint8_t *ptr;
uint16_t ident;
struct spaceBitmapDesc *bm;
@@ -2074,13 +2151,10 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
ptr = (uint8_t *)bh->b_data;
while (bytes > 0) {
- while ((bytes > 0) && (index < sb->s_blocksize)) {
- value = ptr[index];
- accum += udf_bitmap_lookup[value & 0x0f];
- accum += udf_bitmap_lookup[value >> 4];
- index++;
- bytes--;
- }
+ u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
+ accum += bitmap_weight((const unsigned long *)(ptr + index),
+ cur_bytes * 8);
+ bytes -= cur_bytes;
if (bytes) {
brelse(bh);
newblock = udf_get_lb_pblock(sb, loc, ++block);
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 6ec99221e50..c3265e1385d 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -23,7 +23,6 @@
#include <asm/uaccess.h>
#include <linux/errno.h>
#include <linux/fs.h>
-#include <linux/udf_fs.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/stat.h>
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index fe61be17cda..65e19b4f942 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -22,7 +22,6 @@
#include "udfdecl.h"
#include <linux/fs.h>
#include <linux/mm.h>
-#include <linux/udf_fs.h>
#include <linux/buffer_head.h>
#include "udf_i.h"
@@ -180,6 +179,24 @@ void udf_discard_prealloc(struct inode *inode)
brelse(epos.bh);
}
+static void udf_update_alloc_ext_desc(struct inode *inode,
+ struct extent_position *epos,
+ u32 lenalloc)
+{
+ struct super_block *sb = inode->i_sb;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+
+ struct allocExtDesc *aed = (struct allocExtDesc *) (epos->bh->b_data);
+ int len = sizeof(struct allocExtDesc);
+
+ aed->lengthAllocDescs = cpu_to_le32(lenalloc);
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || sbi->s_udfrev >= 0x0201)
+ len += lenalloc;
+
+ udf_update_tag(epos->bh->b_data, len);
+ mark_buffer_dirty_inode(epos->bh, inode);
+}
+
void udf_truncate_extents(struct inode *inode)
{
struct extent_position epos;
@@ -187,7 +204,6 @@ void udf_truncate_extents(struct inode *inode)
uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
int8_t etype;
struct super_block *sb = inode->i_sb;
- struct udf_sb_info *sbi = UDF_SB(sb);
sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset;
loff_t byte_offset;
int adsize;
@@ -224,35 +240,15 @@ void udf_truncate_extents(struct inode *inode)
if (indirect_ext_len) {
/* We managed to free all extents in the
* indirect extent - free it too */
- if (!epos.bh)
- BUG();
+ BUG_ON(!epos.bh);
udf_free_blocks(sb, inode, epos.block,
0, indirect_ext_len);
- } else {
- if (!epos.bh) {
- iinfo->i_lenAlloc =
- lenalloc;
- mark_inode_dirty(inode);
- } else {
- struct allocExtDesc *aed =
- (struct allocExtDesc *)
- (epos.bh->b_data);
- int len =
- sizeof(struct allocExtDesc);
-
- aed->lengthAllocDescs =
- cpu_to_le32(lenalloc);
- if (!UDF_QUERY_FLAG(sb,
- UDF_FLAG_STRICT) ||
- sbi->s_udfrev >= 0x0201)
- len += lenalloc;
-
- udf_update_tag(epos.bh->b_data,
- len);
- mark_buffer_dirty_inode(
- epos.bh, inode);
- }
- }
+ } else if (!epos.bh) {
+ iinfo->i_lenAlloc = lenalloc;
+ mark_inode_dirty(inode);
+ } else
+ udf_update_alloc_ext_desc(inode,
+ &epos, lenalloc);
brelse(epos.bh);
epos.offset = sizeof(struct allocExtDesc);
epos.block = eloc;
@@ -272,29 +268,14 @@ void udf_truncate_extents(struct inode *inode)
}
if (indirect_ext_len) {
- if (!epos.bh)
- BUG();
+ BUG_ON(!epos.bh);
udf_free_blocks(sb, inode, epos.block, 0,
indirect_ext_len);
- } else {
- if (!epos.bh) {
- iinfo->i_lenAlloc = lenalloc;
- mark_inode_dirty(inode);
- } else {
- struct allocExtDesc *aed =
- (struct allocExtDesc *)(epos.bh->b_data);
- aed->lengthAllocDescs = cpu_to_le32(lenalloc);
- if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
- sbi->s_udfrev >= 0x0201)
- udf_update_tag(epos.bh->b_data,
- lenalloc +
- sizeof(struct allocExtDesc));
- else
- udf_update_tag(epos.bh->b_data,
- sizeof(struct allocExtDesc));
- mark_buffer_dirty_inode(epos.bh, inode);
- }
- }
+ } else if (!epos.bh) {
+ iinfo->i_lenAlloc = lenalloc;
+ mark_inode_dirty(inode);
+ } else
+ udf_update_alloc_ext_desc(inode, &epos, lenalloc);
} else if (inode->i_size) {
if (byte_offset) {
kernel_long_ad extent;
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index ccc52f16bf7..4f86b1d98a5 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -1,10 +1,32 @@
-#ifndef __LINUX_UDF_I_H
-#define __LINUX_UDF_I_H
+#ifndef _UDF_I_H
+#define _UDF_I_H
+
+struct udf_inode_info {
+ struct timespec i_crtime;
+ /* Physical address of inode */
+ kernel_lb_addr i_location;
+ __u64 i_unique;
+ __u32 i_lenEAttr;
+ __u32 i_lenAlloc;
+ __u64 i_lenExtents;
+ __u32 i_next_alloc_block;
+ __u32 i_next_alloc_goal;
+ unsigned i_alloc_type : 3;
+ unsigned i_efe : 1; /* extendedFileEntry */
+ unsigned i_use : 1; /* unallocSpaceEntry */
+ unsigned i_strat4096 : 1;
+ unsigned reserved : 26;
+ union {
+ short_ad *i_sad;
+ long_ad *i_lad;
+ __u8 *i_data;
+ } i_ext;
+ struct inode vfs_inode;
+};
-#include <linux/udf_fs_i.h>
static inline struct udf_inode_info *UDF_I(struct inode *inode)
{
return list_entry(inode, struct udf_inode_info, vfs_inode);
}
-#endif /* !defined(_LINUX_UDF_I_H) */
+#endif /* _UDF_I_H) */
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 737d1c604ee..1c1c514a972 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -1,10 +1,12 @@
#ifndef __LINUX_UDF_SB_H
#define __LINUX_UDF_SB_H
+#include <linux/mutex.h>
+
/* Since UDF 2.01 is ISO 13346 based... */
#define UDF_SUPER_MAGIC 0x15013346
-#define UDF_MAX_READ_VERSION 0x0201
+#define UDF_MAX_READ_VERSION 0x0250
#define UDF_MAX_WRITE_VERSION 0x0201
#define UDF_FLAG_USE_EXTENDED_FE 0
@@ -38,6 +40,111 @@
#define UDF_PART_FLAG_REWRITABLE 0x0040
#define UDF_PART_FLAG_OVERWRITABLE 0x0080
+#define UDF_MAX_BLOCK_LOADED 8
+
+#define UDF_TYPE1_MAP15 0x1511U
+#define UDF_VIRTUAL_MAP15 0x1512U
+#define UDF_VIRTUAL_MAP20 0x2012U
+#define UDF_SPARABLE_MAP15 0x1522U
+#define UDF_METADATA_MAP25 0x2511U
+
+#pragma pack(1) /* XXX(hch): Why? This file just defines in-core structures */
+
+struct udf_meta_data {
+ __u32 s_meta_file_loc;
+ __u32 s_mirror_file_loc;
+ __u32 s_bitmap_file_loc;
+ __u32 s_alloc_unit_size;
+ __u16 s_align_unit_size;
+ __u8 s_dup_md_flag;
+ struct inode *s_metadata_fe;
+ struct inode *s_mirror_fe;
+ struct inode *s_bitmap_fe;
+};
+
+struct udf_sparing_data {
+ __u16 s_packet_len;
+ struct buffer_head *s_spar_map[4];
+};
+
+struct udf_virtual_data {
+ __u32 s_num_entries;
+ __u16 s_start_offset;
+};
+
+struct udf_bitmap {
+ __u32 s_extLength;
+ __u32 s_extPosition;
+ __u16 s_nr_groups;
+ struct buffer_head **s_block_bitmap;
+};
+
+struct udf_part_map {
+ union {
+ struct udf_bitmap *s_bitmap;
+ struct inode *s_table;
+ } s_uspace;
+ union {
+ struct udf_bitmap *s_bitmap;
+ struct inode *s_table;
+ } s_fspace;
+ __u32 s_partition_root;
+ __u32 s_partition_len;
+ __u16 s_partition_type;
+ __u16 s_partition_num;
+ union {
+ struct udf_sparing_data s_sparing;
+ struct udf_virtual_data s_virtual;
+ struct udf_meta_data s_metadata;
+ } s_type_specific;
+ __u32 (*s_partition_func)(struct super_block *, __u32, __u16, __u32);
+ __u16 s_volumeseqnum;
+ __u16 s_partition_flags;
+};
+
+#pragma pack()
+
+struct udf_sb_info {
+ struct udf_part_map *s_partmaps;
+ __u8 s_volume_ident[32];
+
+ /* Overall info */
+ __u16 s_partitions;
+ __u16 s_partition;
+
+ /* Sector headers */
+ __s32 s_session;
+ __u32 s_anchor[3];
+ __u32 s_last_block;
+
+ struct buffer_head *s_lvid_bh;
+
+ /* Default permissions */
+ mode_t s_umask;
+ gid_t s_gid;
+ uid_t s_uid;
+
+ /* Root Info */
+ struct timespec s_record_time;
+
+ /* Fileset Info */
+ __u16 s_serial_number;
+
+ /* highest UDF revision we have recorded to this media */
+ __u16 s_udfrev;
+
+ /* Miscellaneous flags */
+ __u32 s_flags;
+
+ /* Encoding info */
+ struct nls_table *s_nls_map;
+
+ /* VAT inode */
+ struct inode *s_vat_inode;
+
+ struct mutex s_alloc_mutex;
+};
+
static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
{
return sb->s_fs_info;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 681dc2b66cd..f3f45d02927 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -1,17 +1,37 @@
#ifndef __UDF_DECL_H
#define __UDF_DECL_H
-#include <linux/udf_fs.h>
#include "ecma_167.h"
#include "osta_udf.h"
#include <linux/fs.h>
#include <linux/types.h>
-#include <linux/udf_fs_i.h>
-#include <linux/udf_fs_sb.h>
#include <linux/buffer_head.h>
+#include <linux/udf_fs_i.h>
+#include "udf_sb.h"
#include "udfend.h"
+#include "udf_i.h"
+
+#define UDF_PREALLOCATE
+#define UDF_DEFAULT_PREALLOC_BLOCKS 8
+
+#define UDFFS_DEBUG
+
+#ifdef UDFFS_DEBUG
+#define udf_debug(f, a...) \
+do { \
+ printk(KERN_DEBUG "UDF-fs DEBUG %s:%d:%s: ", \
+ __FILE__, __LINE__, __func__); \
+ printk(f, ##a); \
+} while (0)
+#else
+#define udf_debug(f, a...) /**/
+#endif
+
+#define udf_info(f, a...) \
+ printk(KERN_INFO "UDF-fs INFO " f, ##a);
+
#define udf_fixed_to_variable(x) ( ( ( (x) >> 5 ) * 39 ) + ( (x) & 0x0000001F ) )
#define udf_variable_to_fixed(x) ( ( ( (x) / 39 ) << 5 ) + ( (x) % 39 ) )
@@ -23,16 +43,24 @@
#define UDF_NAME_LEN 256
#define UDF_PATH_LEN 1023
-#define udf_file_entry_alloc_offset(inode)\
- (UDF_I(inode)->i_use ?\
- sizeof(struct unallocSpaceEntry) :\
- ((UDF_I(inode)->i_efe ?\
- sizeof(struct extendedFileEntry) :\
- sizeof(struct fileEntry)) + UDF_I(inode)->i_lenEAttr))
-
-#define udf_ext0_offset(inode)\
- (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ?\
- udf_file_entry_alloc_offset(inode) : 0)
+static inline size_t udf_file_entry_alloc_offset(struct inode *inode)
+{
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ if (iinfo->i_use)
+ return sizeof(struct unallocSpaceEntry);
+ else if (iinfo->i_efe)
+ return sizeof(struct extendedFileEntry) + iinfo->i_lenEAttr;
+ else
+ return sizeof(struct fileEntry) + iinfo->i_lenEAttr;
+}
+
+static inline size_t udf_ext0_offset(struct inode *inode)
+{
+ if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+ return udf_file_entry_alloc_offset(inode);
+ else
+ return 0;
+}
#define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset))
@@ -83,7 +111,6 @@ struct extent_position {
};
/* super.c */
-extern void udf_error(struct super_block *, const char *, const char *, ...);
extern void udf_warning(struct super_block *, const char *, const char *, ...);
/* namei.c */
@@ -150,6 +177,8 @@ extern uint32_t udf_get_pblock_virt20(struct super_block *, uint32_t, uint16_t,
uint32_t);
extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t,
uint32_t);
+extern uint32_t udf_get_pblock_meta25(struct super_block *, uint32_t, uint16_t,
+ uint32_t);
extern int udf_relocate_blocks(struct super_block *, long, long *);
/* unicode.c */
@@ -157,7 +186,7 @@ extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
int);
extern int udf_build_ustr(struct ustr *, dstring *, int);
-extern int udf_CS0toUTF8(struct ustr *, struct ustr *);
+extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
/* ialloc.c */
extern void udf_free_inode(struct inode *);
@@ -191,11 +220,9 @@ extern struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize,
extern long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int);
extern short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int);
-/* crc.c */
-extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t);
-
/* udftime.c */
-extern time_t *udf_stamp_to_time(time_t *, long *, kernel_timestamp);
-extern kernel_timestamp *udf_time_to_stamp(kernel_timestamp *, struct timespec);
+extern struct timespec *udf_disk_stamp_to_time(struct timespec *dest,
+ timestamp src);
+extern timestamp *udf_time_to_disk_stamp(timestamp *dest, struct timespec src);
#endif /* __UDF_DECL_H */
diff --git a/fs/udf/udfend.h b/fs/udf/udfend.h
index c4bd1203f85..489f52fb428 100644
--- a/fs/udf/udfend.h
+++ b/fs/udf/udfend.h
@@ -24,17 +24,6 @@ static inline lb_addr cpu_to_lelb(kernel_lb_addr in)
return out;
}
-static inline kernel_timestamp lets_to_cpu(timestamp in)
-{
- kernel_timestamp out;
-
- memcpy(&out, &in, sizeof(timestamp));
- out.typeAndTimezone = le16_to_cpu(in.typeAndTimezone);
- out.year = le16_to_cpu(in.year);
-
- return out;
-}
-
static inline short_ad lesa_to_cpu(short_ad in)
{
short_ad out;
@@ -85,15 +74,4 @@ static inline kernel_extent_ad leea_to_cpu(extent_ad in)
return out;
}
-static inline timestamp cpu_to_lets(kernel_timestamp in)
-{
- timestamp out;
-
- memcpy(&out, &in, sizeof(timestamp));
- out.typeAndTimezone = cpu_to_le16(in.typeAndTimezone);
- out.year = cpu_to_le16(in.year);
-
- return out;
-}
-
#endif /* __UDF_ENDIAN_H */
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index ce595732ba6..5f811655c9b 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -85,39 +85,38 @@ extern struct timezone sys_tz;
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
-time_t *udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src)
+struct timespec *udf_disk_stamp_to_time(struct timespec *dest, timestamp src)
{
int yday;
- uint8_t type = src.typeAndTimezone >> 12;
+ u16 typeAndTimezone = le16_to_cpu(src.typeAndTimezone);
+ u16 year = le16_to_cpu(src.year);
+ uint8_t type = typeAndTimezone >> 12;
int16_t offset;
if (type == 1) {
- offset = src.typeAndTimezone << 4;
+ offset = typeAndTimezone << 4;
/* sign extent offset */
offset = (offset >> 4);
if (offset == -2047) /* unspecified offset */
offset = 0;
- } else {
+ } else
offset = 0;
- }
- if ((src.year < EPOCH_YEAR) ||
- (src.year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) {
- *dest = -1;
- *dest_usec = -1;
+ if ((year < EPOCH_YEAR) ||
+ (year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) {
return NULL;
}
- *dest = year_seconds[src.year - EPOCH_YEAR];
- *dest -= offset * 60;
+ dest->tv_sec = year_seconds[year - EPOCH_YEAR];
+ dest->tv_sec -= offset * 60;
- yday = ((__mon_yday[__isleap(src.year)][src.month - 1]) + src.day - 1);
- *dest += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second;
- *dest_usec = src.centiseconds * 10000 +
- src.hundredsOfMicroseconds * 100 + src.microseconds;
+ yday = ((__mon_yday[__isleap(year)][src.month - 1]) + src.day - 1);
+ dest->tv_sec += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second;
+ dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
+ src.hundredsOfMicroseconds * 100 + src.microseconds);
return dest;
}
-kernel_timestamp *udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
+timestamp *udf_time_to_disk_stamp(timestamp *dest, struct timespec ts)
{
long int days, rem, y;
const unsigned short int *ip;
@@ -128,7 +127,7 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
if (!dest)
return NULL;
- dest->typeAndTimezone = 0x1000 | (offset & 0x0FFF);
+ dest->typeAndTimezone = cpu_to_le16(0x1000 | (offset & 0x0FFF));
ts.tv_sec += offset * 60;
days = ts.tv_sec / SECS_PER_DAY;
@@ -151,7 +150,7 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
- LEAPS_THRU_END_OF(y - 1));
y = yg;
}
- dest->year = y;
+ dest->year = cpu_to_le16(y);
ip = __mon_yday[__isleap(y)];
for (y = 11; days < (long int)ip[y]; --y)
continue;
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index e533b11703b..9fdf8c93c58 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/string.h> /* for memset */
#include <linux/nls.h>
-#include <linux/udf_fs.h>
+#include <linux/crc-itu-t.h>
#include "udf_sb.h"
@@ -49,14 +49,16 @@ int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
{
int usesize;
- if ((!dest) || (!ptr) || (!size))
+ if (!dest || !ptr || !size)
return -1;
+ BUG_ON(size < 2);
- memset(dest, 0, sizeof(struct ustr));
- usesize = (size > UDF_NAME_LEN) ? UDF_NAME_LEN : size;
+ usesize = min_t(size_t, ptr[size - 1], sizeof(dest->u_name));
+ usesize = min(usesize, size - 2);
dest->u_cmpID = ptr[0];
- dest->u_len = ptr[size - 1];
- memcpy(dest->u_name, ptr + 1, usesize - 1);
+ dest->u_len = usesize;
+ memcpy(dest->u_name, ptr + 1, usesize);
+ memset(dest->u_name + usesize, 0, sizeof(dest->u_name) - usesize);
return 0;
}
@@ -83,9 +85,6 @@ static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
* PURPOSE
* Convert OSTA Compressed Unicode to the UTF-8 equivalent.
*
- * DESCRIPTION
- * This routine is only called by udf_filldir().
- *
* PRE-CONDITIONS
* utf Pointer to UTF-8 output buffer.
* ocu Pointer to OSTA Compressed Unicode input buffer
@@ -99,43 +98,39 @@ static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
* November 12, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
-int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i)
+int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
{
- uint8_t *ocu;
- uint32_t c;
+ const uint8_t *ocu;
uint8_t cmp_id, ocu_len;
int i;
- ocu = ocu_i->u_name;
-
ocu_len = ocu_i->u_len;
- cmp_id = ocu_i->u_cmpID;
- utf_o->u_len = 0;
-
if (ocu_len == 0) {
memset(utf_o, 0, sizeof(struct ustr));
- utf_o->u_cmpID = 0;
- utf_o->u_len = 0;
return 0;
}
- if ((cmp_id != 8) && (cmp_id != 16)) {
+ cmp_id = ocu_i->u_cmpID;
+ if (cmp_id != 8 && cmp_id != 16) {
+ memset(utf_o, 0, sizeof(struct ustr));
printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
cmp_id, ocu_i->u_name);
return 0;
}
+ ocu = ocu_i->u_name;
+ utf_o->u_len = 0;
for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
/* Expand OSTA compressed Unicode to Unicode */
- c = ocu[i++];
+ uint32_t c = ocu[i++];
if (cmp_id == 16)
c = (c << 8) | ocu[i++];
/* Compress Unicode to UTF-8 */
- if (c < 0x80U) {
+ if (c < 0x80U)
utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
- } else if (c < 0x800U) {
+ else if (c < 0x800U) {
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0xc0 | (c >> 6));
utf_o->u_name[utf_o->u_len++] =
@@ -255,35 +250,32 @@ error_out:
}
static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
- struct ustr *ocu_i)
+ const struct ustr *ocu_i)
{
- uint8_t *ocu;
- uint32_t c;
+ const uint8_t *ocu;
uint8_t cmp_id, ocu_len;
int i;
- ocu = ocu_i->u_name;
ocu_len = ocu_i->u_len;
- cmp_id = ocu_i->u_cmpID;
- utf_o->u_len = 0;
-
if (ocu_len == 0) {
memset(utf_o, 0, sizeof(struct ustr));
- utf_o->u_cmpID = 0;
- utf_o->u_len = 0;
return 0;
}
- if ((cmp_id != 8) && (cmp_id != 16)) {
+ cmp_id = ocu_i->u_cmpID;
+ if (cmp_id != 8 && cmp_id != 16) {
+ memset(utf_o, 0, sizeof(struct ustr));
printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
cmp_id, ocu_i->u_name);
return 0;
}
+ ocu = ocu_i->u_name;
+ utf_o->u_len = 0;
for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
/* Expand OSTA compressed Unicode to Unicode */
- c = ocu[i++];
+ uint32_t c = ocu[i++];
if (cmp_id == 16)
c = (c << 8) | ocu[i++];
@@ -463,7 +455,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
} else if (newIndex > 250)
newIndex = 250;
newName[newIndex++] = CRC_MARK;
- valueCRC = udf_crc(fidName, fidNameLen, 0);
+ valueCRC = crc_itu_t(0, fidName, fidNameLen);
newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12];
newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8];
newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4];
diff --git a/fs/xattr.c b/fs/xattr.c
index f7062da505d..89a942f07e1 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -307,7 +307,6 @@ sys_fsetxattr(int fd, char __user *name, void __user *value,
error = setxattr(dentry, name, value, size, flags);
mnt_drop_write(f->f_path.mnt);
}
-out_fput:
fput(f);
return error;
}