diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/Makefile | 4 | ||||
-rw-r--r-- | fs/afs/afs.h | 23 | ||||
-rw-r--r-- | fs/afs/afs_fs.h | 3 | ||||
-rw-r--r-- | fs/afs/dir.c | 18 | ||||
-rw-r--r-- | fs/afs/file.c | 2 | ||||
-rw-r--r-- | fs/afs/fsclient.c | 298 | ||||
-rw-r--r-- | fs/afs/inode.c | 10 | ||||
-rw-r--r-- | fs/afs/internal.h | 6 | ||||
-rw-r--r-- | fs/afs/super.c | 44 | ||||
-rw-r--r-- | fs/afs/vnode.c | 85 | ||||
-rw-r--r-- | fs/afs/write.c | 5 | ||||
-rw-r--r-- | fs/aio.c | 28 | ||||
-rw-r--r-- | fs/anon_inodes.c | 200 | ||||
-rw-r--r-- | fs/autofs/autofs_i.h | 4 | ||||
-rw-r--r-- | fs/autofs/inode.c | 47 | ||||
-rw-r--r-- | fs/autofs/root.c | 83 | ||||
-rw-r--r-- | fs/autofs4/inode.c | 16 | ||||
-rw-r--r-- | fs/autofs4/root.c | 18 | ||||
-rw-r--r-- | fs/compat.c | 49 | ||||
-rw-r--r-- | fs/eventfd.c | 228 | ||||
-rw-r--r-- | fs/eventpoll.c | 1178 | ||||
-rw-r--r-- | fs/exec.c | 13 | ||||
-rw-r--r-- | fs/mpage.c | 174 | ||||
-rw-r--r-- | fs/partitions/Kconfig | 2 | ||||
-rw-r--r-- | fs/partitions/efi.c | 12 | ||||
-rw-r--r-- | fs/signalfd.c | 349 | ||||
-rw-r--r-- | fs/timerfd.c | 227 |
27 files changed, 2164 insertions, 962 deletions
diff --git a/fs/Makefile b/fs/Makefile index 9edf4112bee..720c29d57a6 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -22,6 +22,10 @@ endif obj-$(CONFIG_INOTIFY) += inotify.o obj-$(CONFIG_INOTIFY_USER) += inotify_user.o obj-$(CONFIG_EPOLL) += eventpoll.o +obj-$(CONFIG_ANON_INODES) += anon_inodes.o +obj-$(CONFIG_SIGNALFD) += signalfd.o +obj-$(CONFIG_TIMERFD) += timerfd.o +obj-$(CONFIG_EVENTFD) += eventfd.o obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o nfsd-$(CONFIG_NFSD) := nfsctl.o diff --git a/fs/afs/afs.h b/fs/afs/afs.h index 52d0752265b..24525794814 100644 --- a/fs/afs/afs.h +++ b/fs/afs/afs.h @@ -16,6 +16,9 @@ #define AFS_MAXCELLNAME 64 /* maximum length of a cell name */ #define AFS_MAXVOLNAME 64 /* maximum length of a volume name */ +#define AFSNAMEMAX 256 /* maximum length of a filename plus NUL */ +#define AFSPATHMAX 1024 /* maximum length of a pathname plus NUL */ +#define AFSOPAQUEMAX 1024 /* maximum length of an opaque field */ typedef unsigned afs_volid_t; typedef unsigned afs_vnodeid_t; @@ -143,4 +146,24 @@ struct afs_volsync { time_t creation; /* volume creation time */ }; +/* + * AFS volume status record + */ +struct afs_volume_status { + u32 vid; /* volume ID */ + u32 parent_id; /* parent volume ID */ + u8 online; /* true if volume currently online and available */ + u8 in_service; /* true if volume currently in service */ + u8 blessed; /* same as in_service */ + u8 needs_salvage; /* true if consistency checking required */ + u32 type; /* volume type (afs_voltype_t) */ + u32 min_quota; /* minimum space set aside (blocks) */ + u32 max_quota; /* maximum space this volume may occupy (blocks) */ + u32 blocks_in_use; /* space this volume currently occupies (blocks) */ + u32 part_blocks_avail; /* space available in volume's partition */ + u32 part_max_blocks; /* size of volume's partition */ +}; + +#define AFS_BLOCK_SIZE 1024 + #endif /* AFS_H */ diff --git a/fs/afs/afs_fs.h b/fs/afs/afs_fs.h index d963ef4daee..a18c374ebe0 100644 --- a/fs/afs/afs_fs.h +++ b/fs/afs/afs_fs.h @@ -28,7 +28,8 @@ enum AFS_FS_Operations { FSMAKEDIR = 141, /* AFS Create a directory */ FSREMOVEDIR = 142, /* AFS Remove a directory */ FSGIVEUPCALLBACKS = 147, /* AFS Discard callback promises */ - FSGETVOLUMEINFO = 148, /* AFS Get root volume information */ + FSGETVOLUMEINFO = 148, /* AFS Get information about a volume */ + FSGETVOLUMESTATUS = 149, /* AFS Get volume status information */ FSGETROOTVOLUME = 151, /* AFS Get root volume name */ FSLOOKUP = 161, /* AFS lookup file in directory */ FSFETCHDATA64 = 65537, /* AFS Fetch file data */ diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 2fb31276196..719af4fb15d 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -497,7 +497,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, ASSERTCMP(dentry->d_inode, ==, NULL); - if (dentry->d_name.len > 255) { + if (dentry->d_name.len >= AFSNAMEMAX) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } @@ -736,7 +736,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode) dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode); ret = -ENAMETOOLONG; - if (dentry->d_name.len > 255) + if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); @@ -801,7 +801,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); ret = -ENAMETOOLONG; - if (dentry->d_name.len > 255) + if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); @@ -847,7 +847,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); ret = -ENAMETOOLONG; - if (dentry->d_name.len > 255) + if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); @@ -921,7 +921,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, int mode, dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode); ret = -ENAMETOOLONG; - if (dentry->d_name.len > 255) + if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); @@ -990,7 +990,7 @@ static int afs_link(struct dentry *from, struct inode *dir, dentry->d_name.name); ret = -ENAMETOOLONG; - if (dentry->d_name.len > 255) + if (dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(dvnode->volume->cell); @@ -1038,11 +1038,11 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, content); ret = -ENAMETOOLONG; - if (dentry->d_name.len > 255) + if (dentry->d_name.len >= AFSNAMEMAX) goto error; ret = -EINVAL; - if (strlen(content) > 1023) + if (strlen(content) >= AFSPATHMAX) goto error; key = afs_request_key(dvnode->volume->cell); @@ -1112,7 +1112,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.name); ret = -ENAMETOOLONG; - if (new_dentry->d_name.len > 255) + if (new_dentry->d_name.len >= AFSNAMEMAX) goto error; key = afs_request_key(orig_dvnode->volume->cell); diff --git a/fs/afs/file.c b/fs/afs/file.c index 3e25795e5a4..9c0e721d9fc 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -236,7 +236,7 @@ static void afs_invalidatepage(struct page *page, unsigned long offset) { int ret = 1; - kenter("{%lu},%lu", page->index, offset); + _enter("{%lu},%lu", page->index, offset); BUG_ON(!PageLocked(page)); diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 56cc0efa2a0..5dff1308b6f 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -202,6 +202,29 @@ static void xdr_encode_AFS_StoreStatus(__be32 **_bp, struct iattr *attr) } /* + * decode an AFSFetchVolumeStatus block + */ +static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp, + struct afs_volume_status *vs) +{ + const __be32 *bp = *_bp; + + vs->vid = ntohl(*bp++); + vs->parent_id = ntohl(*bp++); + vs->online = ntohl(*bp++); + vs->in_service = ntohl(*bp++); + vs->blessed = ntohl(*bp++); + vs->needs_salvage = ntohl(*bp++); + vs->type = ntohl(*bp++); + vs->min_quota = ntohl(*bp++); + vs->max_quota = ntohl(*bp++); + vs->blocks_in_use = ntohl(*bp++); + vs->part_blocks_avail = ntohl(*bp++); + vs->part_max_blocks = ntohl(*bp++); + *_bp = bp; +} + +/* * deliver reply data to an FS.FetchStatus */ static int afs_deliver_fs_fetch_status(struct afs_call *call, @@ -1450,3 +1473,278 @@ int afs_fs_setattr(struct afs_server *server, struct key *key, return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); } + +/* + * deliver reply data to an FS.GetVolumeStatus + */ +static int afs_deliver_fs_get_volume_status(struct afs_call *call, + struct sk_buff *skb, bool last) +{ + const __be32 *bp; + char *p; + int ret; + + _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); + + switch (call->unmarshall) { + case 0: + call->offset = 0; + call->unmarshall++; + + /* extract the returned status record */ + case 1: + _debug("extract status"); + ret = afs_extract_data(call, skb, last, call->buffer, + 12 * 4); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + + bp = call->buffer; + xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2); + call->offset = 0; + call->unmarshall++; + + /* extract the volume name length */ + case 2: + ret = afs_extract_data(call, skb, last, &call->tmp, 4); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + + call->count = ntohl(call->tmp); + _debug("volname length: %u", call->count); + if (call->count >= AFSNAMEMAX) + return -EBADMSG; + call->offset = 0; + call->unmarshall++; + + /* extract the volume name */ + case 3: + _debug("extract volname"); + if (call->count > 0) { + ret = afs_extract_data(call, skb, last, call->reply3, + call->count); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + } + + p = call->reply3; + p[call->count] = 0; + _debug("volname '%s'", p); + + call->offset = 0; + call->unmarshall++; + + /* extract the volume name padding */ + if ((call->count & 3) == 0) { + call->unmarshall++; + goto no_volname_padding; + } + call->count = 4 - (call->count & 3); + + case 4: + ret = afs_extract_data(call, skb, last, call->buffer, + call->count); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + + call->offset = 0; + call->unmarshall++; + no_volname_padding: + + /* extract the offline message length */ + case 5: + ret = afs_extract_data(call, skb, last, &call->tmp, 4); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + + call->count = ntohl(call->tmp); + _debug("offline msg length: %u", call->count); + if (call->count >= AFSNAMEMAX) + return -EBADMSG; + call->offset = 0; + call->unmarshall++; + + /* extract the offline message */ + case 6: + _debug("extract offline"); + if (call->count > 0) { + ret = afs_extract_data(call, skb, last, call->reply3, + call->count); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + } + + p = call->reply3; + p[call->count] = 0; + _debug("offline '%s'", p); + + call->offset = 0; + call->unmarshall++; + + /* extract the offline message padding */ + if ((call->count & 3) == 0) { + call->unmarshall++; + goto no_offline_padding; + } + call->count = 4 - (call->count & 3); + + case 7: + ret = afs_extract_data(call, skb, last, call->buffer, + call->count); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + + call->offset = 0; + call->unmarshall++; + no_offline_padding: + + /* extract the message of the day length */ + case 8: + ret = afs_extract_data(call, skb, last, &call->tmp, 4); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + + call->count = ntohl(call->tmp); + _debug("motd length: %u", call->count); + if (call->count >= AFSNAMEMAX) + return -EBADMSG; + call->offset = 0; + call->unmarshall++; + + /* extract the message of the day */ + case 9: + _debug("extract motd"); + if (call->count > 0) { + ret = afs_extract_data(call, skb, last, call->reply3, + call->count); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + } + + p = call->reply3; + p[call->count] = 0; + _debug("motd '%s'", p); + + call->offset = 0; + call->unmarshall++; + + /* extract the message of the day padding */ + if ((call->count & 3) == 0) { + call->unmarshall++; + goto no_motd_padding; + } + call->count = 4 - (call->count & 3); + + case 10: + ret = afs_extract_data(call, skb, last, call->buffer, + call->count); + switch (ret) { + case 0: break; + case -EAGAIN: return 0; + default: return ret; + } + + call->offset = 0; + call->unmarshall++; + no_motd_padding: + + case 11: + _debug("trailer %d", skb->len); + if (skb->len != 0) + return -EBADMSG; + break; + } + + if (!last) + return 0; + + _leave(" = 0 [done]"); + return 0; +} + +/* + * destroy an FS.GetVolumeStatus call + */ +static void afs_get_volume_status_call_destructor(struct afs_call *call) +{ + kfree(call->reply3); + call->reply3 = NULL; + afs_flat_call_destructor(call); +} + +/* + * FS.GetVolumeStatus operation type + */ +static const struct afs_call_type afs_RXFSGetVolumeStatus = { + .name = "FS.GetVolumeStatus", + .deliver = afs_deliver_fs_get_volume_status, + .abort_to_error = afs_abort_to_error, + .destructor = afs_get_volume_status_call_destructor, +}; + +/* + * fetch the status of a volume + */ +int afs_fs_get_volume_status(struct afs_server *server, + struct key *key, + struct afs_vnode *vnode, + struct afs_volume_status *vs, + const struct afs_wait_mode *wait_mode) +{ + struct afs_call *call; + __be32 *bp; + void *tmpbuf; + + _enter(""); + + tmpbuf = kmalloc(AFSOPAQUEMAX, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + + call = afs_alloc_flat_call(&afs_RXFSGetVolumeStatus, 2 * 4, 12 * 4); + if (!call) { + kfree(tmpbuf); + return -ENOMEM; + } + + call->key = key; + call->reply = vnode; + call->reply2 = vs; + call->reply3 = tmpbuf; + call->service_id = FS_SERVICE; + call->port = htons(AFS_FS_PORT); + + /* marshall the parameters */ + bp = call->request; + bp[0] = htonl(FSGETVOLUMESTATUS); + bp[1] = htonl(vnode->fid.vid); + + return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); +} diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 515a5d12d8f..47f5fed7195 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -209,11 +209,15 @@ bad_inode: */ void afs_zap_data(struct afs_vnode *vnode) { - _enter("zap data {%x:%u}", vnode->fid.vid, vnode->fid.vnode); + _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); /* nuke all the non-dirty pages that aren't locked, mapped or being - * written back */ - invalidate_remote_inode(&vnode->vfs_inode); + * written back in a regular file and completely discard the pages in a + * directory or symlink */ + if (S_ISREG(vnode->vfs_inode.i_mode)) + invalidate_remote_inode(&vnode->vfs_inode); + else + invalidate_inode_pages2(vnode->vfs_inode.i_mapping); } /* diff --git a/fs/afs/internal.h b/fs/afs/internal.h index a30d4fa768e..4953ba5a6f4 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -506,6 +506,10 @@ extern int afs_fs_store_data(struct afs_server *, struct afs_writeback *, extern int afs_fs_setattr(struct afs_server *, struct key *, struct afs_vnode *, struct iattr *, const struct afs_wait_mode *); +extern int afs_fs_get_volume_status(struct afs_server *, struct key *, + struct afs_vnode *, + struct afs_volume_status *, + const struct afs_wait_mode *); /* * inode.c @@ -672,6 +676,8 @@ extern int afs_vnode_rename(struct afs_vnode *, struct afs_vnode *, extern int afs_vnode_store_data(struct afs_writeback *, pgoff_t, pgoff_t, unsigned, unsigned); extern int afs_vnode_setattr(struct afs_vnode *, struct key *, struct iattr *); +extern int afs_vnode_get_volume_status(struct afs_vnode *, struct key *, + struct afs_volume_status *); /* * volume.c diff --git a/fs/afs/super.c b/fs/afs/super.c index d24be334b60..579af632c8e 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -21,22 +21,20 @@ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/parser.h> +#include <linux/statfs.h> #include "internal.h" #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ static void afs_i_init_once(void *foo, struct kmem_cache *cachep, unsigned long flags); - static int afs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt); - static struct inode *afs_alloc_inode(struct super_block *sb); - static void afs_put_super(struct super_block *sb); - static void afs_destroy_inode(struct inode *inode); +static int afs_statfs(struct dentry *dentry, struct kstatfs *buf); struct file_system_type afs_fs_type = { .owner = THIS_MODULE, @@ -47,7 +45,7 @@ struct file_system_type afs_fs_type = { }; static const struct super_operations afs_super_ops = { - .statfs = simple_statfs, + .statfs = afs_statfs, .alloc_inode = afs_alloc_inode, .drop_inode = generic_delete_inode, .write_inode = afs_write_inode, @@ -488,6 +486,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb) vnode->flags = 1 << AFS_VNODE_UNSET; vnode->cb_promised = false; + _leave(" = %p", &vnode->vfs_inode); return &vnode->vfs_inode; } @@ -498,7 +497,7 @@ static void afs_destroy_inode(struct inode *inode) { struct afs_vnode *vnode = AFS_FS_I(inode); - _enter("{%lu}", inode->i_ino); + _enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode); _debug("DESTROY INODE %p", inode); @@ -507,3 +506,36 @@ static void afs_destroy_inode(struct inode *inode) kmem_cache_free(afs_inode_cachep, vnode); atomic_dec(&afs_count_active_inodes); } + +/* + * return information about an AFS volume + */ +static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct afs_volume_status vs; + struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); + struct key *key; + int ret; + + key = afs_request_key(vnode->volume->cell); + if (IS_ERR(key)) + return PTR_ERR(key); + + ret = afs_vnode_get_volume_status(vnode, key, &vs); + key_put(key); + if (ret < 0) { + _leave(" = %d", ret); + return ret; + } + + buf->f_type = dentry->d_sb->s_magic; + buf->f_bsize = AFS_BLOCK_SIZE; + buf->f_namelen = AFSNAMEMAX - 1; + + if (vs.max_quota == 0) + buf->f_blocks = vs.part_max_blocks; + else + buf->f_blocks = vs.max_quota; + buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use; + return 0; +} diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c index ec814660209..c36c98ce2c3 100644 --- a/fs/afs/vnode.c +++ b/fs/afs/vnode.c @@ -175,24 +175,33 @@ static void afs_vnode_deleted_remotely(struct afs_vnode *vnode) { struct afs_server *server; + _enter("{%p}", vnode->server); + set_bit(AFS_VNODE_DELETED, &vnode->flags); server = vnode->server; - if (vnode->cb_promised) { - spin_lock(&server->cb_lock); + if (server) { if (vnode->cb_promised) { - rb_erase(&vnode->cb_promise, &server->cb_promises); - vnode->cb_promised = false; + spin_lock(&server->cb_lock); + if (vnode->cb_promised) { + rb_erase(&vnode->cb_promise, + &server->cb_promises); + vnode->cb_promised = false; + } + spin_unlock(&server->cb_lock); } - spin_unlock(&server->cb_lock); - } - spin_lock(&vnode->server->fs_lock); - rb_erase(&vnode->server_rb, &vnode->server->fs_vnodes); - spin_unlock(&vnode->server->fs_lock); + spin_lock(&server->fs_lock); + rb_erase(&vnode->server_rb, &server->fs_vnodes); + spin_unlock(&server->fs_lock); - vnode->server = NULL; - afs_put_server(server); + vnode->server = NULL; + afs_put_server(server); + } else { + ASSERT(!vnode->cb_promised); + } + + _leave(""); } /* @@ -225,7 +234,7 @@ void afs_vnode_finalise_status_update(struct afs_vnode *vnode, */ static void afs_vnode_status_update_failed(struct afs_vnode *vnode, int ret) { - _enter("%p,%d", vnode, ret); + _enter("{%x:%u},%d", vnode->fid.vid, vnode->fid.vnode, ret); spin_lock(&vnode->lock); @@ -860,3 +869,55 @@ no_server: spin_unlock(&vnode->lock); return PTR_ERR(server); } + +/* + * get the status of a volume + */ +int afs_vnode_get_volume_status(struct afs_vnode *vnode, struct key *key, + struct afs_volume_status *vs) +{ + struct afs_server *server; + int ret; + + _enter("%s{%x:%u.%u},%x,", + vnode->volume->vlocation->vldb.name, + vnode->fid.vid, + vnode->fid.vnode, + vnode->fid.unique, + key_serial(key)); + + /* this op will fetch the status */ + spin_lock(&vnode->lock); + vnode->update_cnt++; + spin_unlock(&vnode->lock); + + do { + /* pick a server to query */ + server = afs_volume_pick_fileserver(vnode); + if (IS_ERR(server)) + goto no_server; + + _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); + + ret = afs_fs_get_volume_status(server, key, vnode, vs, &afs_sync_call); + + } while (!afs_volume_release_fileserver(vnode, server, ret)); + + /* adjust the flags */ + if (ret == 0) { + afs_vnode_finalise_status_update(vnode, server); + afs_put_server(server); + } else { + afs_vnode_status_update_failed(vnode, ret); + } + + _leave(" = %d", ret); + return ret; + +no_server: + spin_lock(&vnode->lock); + vnode->update_cnt--; + ASSERTCMP(vnode->update_cnt, >=, 0); + spin_unlock(&vnode->lock); + return PTR_ERR(server); +} diff --git a/fs/afs/write.c b/fs/afs/write.c index 67ae4dbf66b..28f37516c12 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -395,8 +395,9 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb, if (n == 0) goto no_more; if (pages[0]->index != start) { - for (n--; n >= 0; n--) - put_page(pages[n]); + do { + put_page(pages[--n]); + } while (n > 0); goto no_more; } @@ -30,6 +30,7 @@ #include <linux/highmem.h> #include <linux/workqueue.h> #include <linux/security.h> +#include <linux/eventfd.h> #include <asm/kmap_types.h> #include <asm/uaccess.h> @@ -417,6 +418,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) req->private = NULL; req->ki_iovec = NULL; INIT_LIST_HEAD(&req->ki_run_list); + req->ki_eventfd = ERR_PTR(-EINVAL); /* Check if the completion queue has enough free space to * accept an event from this io. @@ -458,6 +460,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) { assert_spin_locked(&ctx->ctx_lock); + if (!IS_ERR(req->ki_eventfd)) + fput(req->ki_eventfd); if (req->ki_dtor) req->ki_dtor(req); if (req->ki_iovec != &req->ki_inline_vec) @@ -942,6 +946,14 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) return 1; } + /* + * Check if the user asked us to deliver the result through an + * eventfd. The eventfd_signal() function is safe to be called + * from IRQ context. + */ + if (!IS_ERR(iocb->ki_eventfd)) + eventfd_signal(iocb->ki_eventfd, 1); + info = &ctx->ring_info; /* add a completion event to the ring buffer. @@ -1526,8 +1538,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, ssize_t ret; /* enforce forwards compatibility on users */ - if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2 || - iocb->aio_reserved3)) { + if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { pr_debug("EINVAL: io_submit: reserve field set\n"); return -EINVAL; } @@ -1551,6 +1562,19 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, fput(file); return -EAGAIN; } + if (iocb->aio_flags & IOCB_FLAG_RESFD) { + /* + * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an + * instance of the file* now. The file descriptor must be + * an eventfd() fd, and will be signaled for each completed + * event using the eventfd_signal() function. + */ + req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); + if (unlikely(IS_ERR(req->ki_eventfd))) { + ret = PTR_ERR(req->ki_eventfd); + goto out_put_req; + } + } req->ki_filp = file; ret = put_user(req->ki_key, &user_iocb->aio_key); diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c new file mode 100644 index 00000000000..40fe3a3222e --- /dev/null +++ b/fs/anon_inodes.c @@ -0,0 +1,200 @@ +/* + * fs/anon_inodes.c + * + * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> + * + * Thanks to Arnd Bergmann for code review and suggestions. + * More changes for Thomas Gleixner suggestions. + * + */ + +#include <linux/file.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/mount.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/magic.h> +#include <linux/anon_inodes.h> + +#include <asm/uaccess.h> + +static struct vfsmount *anon_inode_mnt __read_mostly; +static struct inode *anon_inode_inode; +static const struct file_operations anon_inode_fops; + +static int anon_inodefs_get_sb(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, + struct vfsmount *mnt) +{ + return get_sb_pseudo(fs_type, "anon_inode:", NULL, ANON_INODE_FS_MAGIC, + mnt); +} + +static int anon_inodefs_delete_dentry(struct dentry *dentry) +{ + /* + * We faked vfs to believe the dentry was hashed when we created it. + * Now we restore the flag so that dput() will work correctly. + */ + dentry->d_flags |= DCACHE_UNHASHED; + return 1; +} + +static struct file_system_type anon_inode_fs_type = { + .name = "anon_inodefs", + .get_sb = anon_inodefs_get_sb, + .kill_sb = kill_anon_super, +}; +static struct dentry_operations anon_inodefs_dentry_operations = { + .d_delete = anon_inodefs_delete_dentry, +}; + +/** + * anon_inode_getfd - creates a new file instance by hooking it up to and + * anonymous inode, and a dentry that describe the "class" + * of the file + * + * @pfd: [out] pointer to the file descriptor + * @dpinode: [out] pointer to the inode + * @pfile: [out] pointer to the file struct + * @name: [in] name of the "class" of the new file + * @fops [in] file operations for the new file + * @priv [in] private data for the new file (will be file's private_data) + * + * Creates a new file by hooking it on a single inode. This is useful for files + * that do not need to have a full-fledged inode in order to operate correctly. + * All the files created with anon_inode_getfd() will share a single inode, by + * hence saving memory and avoiding code duplication for the file/inode/dentry + * setup. + */ +int anon_inode_getfd(int *pfd, struct inode **pinode, struct file **pfile, + const char *name, const struct file_operations *fops, + void *priv) +{ + struct qstr this; + struct dentry *dentry; + struct inode *inode; + struct file *file; + int error, fd; + + if (IS_ERR(anon_inode_inode)) + return -ENODEV; + file = get_empty_filp(); + if (!file) + return -ENFILE; + + inode = igrab(anon_inode_inode); + if (IS_ERR(inode)) { + error = PTR_ERR(inode); + goto err_put_filp; + } + + error = get_unused_fd(); + if (error < 0) + goto err_iput; + fd = error; + + /* + * Link the inode to a directory entry by creating a unique name + * using the inode sequence number. + */ + error = -ENOMEM; + this.name = name; + this.len = strlen(name); + this.hash = 0; + dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this); + if (!dentry) + goto err_put_unused_fd; + dentry->d_op = &anon_inodefs_dentry_operations; + /* Do not publish this dentry inside the global dentry hash table */ + dentry->d_flags &= ~DCACHE_UNHASHED; + d_instantiate(dentry, inode); + + file->f_path.mnt = mntget(anon_inode_mnt); + file->f_path.dentry = dentry; + file->f_mapping = inode->i_mapping; + + file->f_pos = 0; + file->f_flags = O_RDWR; + file->f_op = fops; + file->f_mode = FMODE_READ | FMODE_WRITE; + file->f_version = 0; + file->private_data = priv; + + fd_install(fd, file); + + *pfd = fd; + *pinode = inode; + *pfile = file; + return 0; + +err_put_unused_fd: + put_unused_fd(fd); +err_iput: + iput(inode); +err_put_filp: + put_filp(file); + return error; +} + +/* + * A single inode exist for all anon_inode files. Contrary to pipes, + * anon_inode inodes has no per-instance data associated, so we can avoid + * the allocation of multiple of them. + */ +static struct inode *anon_inode_mkinode(void) +{ + struct inode *inode = new_inode(anon_inode_mnt->mnt_sb); + + if (!inode) + return ERR_PTR(-ENOMEM); + + inode->i_fop = &anon_inode_fops; + + /* + * Mark the inode dirty from the very beginning, + * that way it will never be moved to the dirty + * list because mark_inode_dirty() will think + * that it already _is_ on the dirty list. + */ + inode->i_state = I_DIRTY; + inode->i_mode = S_IRUSR | S_IWUSR; + inode->i_uid = current->fsuid; + inode->i_gid = current->fsgid; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + return inode; +} + +static int __init anon_inode_init(void) +{ + int error; + + error = register_filesystem(&anon_inode_fs_type); + if (error) + goto err_exit; + anon_inode_mnt = kern_mount(&anon_inode_fs_type); + if (IS_ERR(anon_inode_mnt)) { + error = PTR_ERR(anon_inode_mnt); + goto err_unregister_filesystem; + } + anon_inode_inode = anon_inode_mkinode(); + if (IS_ERR(anon_inode_inode)) { + error = PTR_ERR(anon_inode_inode); + goto err_mntput; + } + + return 0; + +err_mntput: + mntput(anon_inode_mnt); +err_unregister_filesystem: + unregister_filesystem(&anon_inode_fs_type); +err_exit: + panic(KERN_ERR "anon_inode_init() failed (%d)\n", error); +} + +fs_initcall(anon_inode_init); + diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h index 4ef544434b5..8b4cca3c470 100644 --- a/fs/autofs/autofs_i.h +++ b/fs/autofs/autofs_i.h @@ -101,7 +101,7 @@ struct autofs_symlink { struct autofs_sb_info { u32 magic; struct file *pipe; - pid_t oz_pgrp; + struct pid *oz_pgrp; int catatonic; struct super_block *sb; unsigned long exp_timeout; @@ -122,7 +122,7 @@ static inline struct autofs_sb_info *autofs_sbi(struct super_block *sb) filesystem without "magic".) */ static inline int autofs_oz_mode(struct autofs_sb_info *sbi) { - return sbi->catatonic || process_group(current) == sbi->oz_pgrp; + return sbi->catatonic || task_pgrp(current) == sbi->oz_pgrp; } /* Hash operations */ diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index aa0b61ff827..e7204d71acc 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -34,12 +34,14 @@ void autofs_kill_sb(struct super_block *sb) if (!sbi) goto out_kill_sb; - if ( !sbi->catatonic ) + if (!sbi->catatonic) autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */ + put_pid(sbi->oz_pgrp); + autofs_hash_nuke(sbi); - for ( n = 0 ; n < AUTOFS_MAX_SYMLINKS ; n++ ) { - if ( test_bit(n, sbi->symlink_bitmap) ) + for (n = 0; n < AUTOFS_MAX_SYMLINKS; n++) { + if (test_bit(n, sbi->symlink_bitmap)) kfree(sbi->symlink[n].data); } @@ -69,7 +71,8 @@ static match_table_t autofs_tokens = { {Opt_err, NULL} }; -static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, pid_t *pgrp, int *minproto, int *maxproto) +static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, + pid_t *pgrp, int *minproto, int *maxproto) { char *p; substring_t args[MAX_OPT_ARGS]; @@ -138,9 +141,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) int pipefd; struct autofs_sb_info *sbi; int minproto, maxproto; + pid_t pgid; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if ( !sbi ) + if (!sbi) goto fail_unlock; DPRINTK(("autofs: starting up, sbi = %p\n",sbi)); @@ -149,7 +153,6 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) sbi->pipe = NULL; sbi->catatonic = 1; sbi->exp_timeout = 0; - sbi->oz_pgrp = process_group(current); autofs_initialize_hash(&sbi->dirhash); sbi->queues = NULL; memset(sbi->symlink_bitmap, 0, sizeof(long)*AUTOFS_SYMLINK_BITMAP_LEN); @@ -169,26 +172,36 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) goto fail_iput; /* Can this call block? - WTF cares? s is locked. */ - if ( parse_options(data,&pipefd,&root_inode->i_uid,&root_inode->i_gid,&sbi->oz_pgrp,&minproto,&maxproto) ) { + if (parse_options(data, &pipefd, &root_inode->i_uid, + &root_inode->i_gid, &pgid, &minproto, + &maxproto)) { printk("autofs: called with bogus options\n"); goto fail_dput; } /* Couldn't this be tested earlier? */ - if ( minproto > AUTOFS_PROTO_VERSION || - maxproto < AUTOFS_PROTO_VERSION ) { + if (minproto > AUTOFS_PROTO_VERSION || + maxproto < AUTOFS_PROTO_VERSION) { printk("autofs: kernel does not match daemon version\n"); goto fail_dput; } - DPRINTK(("autofs: pipe fd = %d, pgrp = %u\n", pipefd, sbi->oz_pgrp)); + DPRINTK(("autofs: pipe fd = %d, pgrp = %u\n", pipefd, pgid)); + sbi->oz_pgrp = find_get_pid(pgid); + + if (!sbi->oz_pgrp) { + printk("autofs: could not find process group %d\n", pgid); + goto fail_dput; + } + pipe = fget(pipefd); - if ( !pipe ) { + if (!pipe) { printk("autofs: could not open pipe file descriptor\n"); - goto fail_dput; + goto fail_put_pid; } - if ( !pipe->f_op || !pipe->f_op->write ) + + if (!pipe->f_op || !pipe->f_op->write) goto fail_fput; sbi->pipe = pipe; sbi->catatonic = 0; @@ -202,6 +215,8 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) fail_fput: printk("autofs: pipe file descriptor does not contain proper ops\n"); fput(pipe); +fail_put_pid: + put_pid(sbi->oz_pgrp); fail_dput: dput(root); goto fail_free; @@ -230,7 +245,7 @@ static void autofs_read_inode(struct inode *inode) inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_blocks = 0; - if ( ino == AUTOFS_ROOT_INO ) { + if (ino == AUTOFS_ROOT_INO) { inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR; inode->i_op = &autofs_root_inode_operations; inode->i_fop = &autofs_root_operations; @@ -241,12 +256,12 @@ static void autofs_read_inode(struct inode *inode) inode->i_uid = inode->i_sb->s_root->d_inode->i_uid; inode->i_gid = inode->i_sb->s_root->d_inode->i_gid; - if ( ino >= AUTOFS_FIRST_SYMLINK && ino < AUTOFS_FIRST_DIR_INO ) { + if (ino >= AUTOFS_FIRST_SYMLINK && ino < AUTOFS_FIRST_DIR_INO) { /* Symlink inode - should be in symlink list */ struct autofs_symlink *sl; n = ino - AUTOFS_FIRST_SYMLINK; - if ( n >= AUTOFS_MAX_SYMLINKS || !test_bit(n,sbi->symlink_bitmap)) { + if (n >= AUTOFS_MAX_SYMLINKS || !test_bit(n,sbi->symlink_bitmap)) { printk("autofs: Looking for bad symlink inode %u\n", (unsigned int) ino); return; } diff --git a/fs/autofs/root.c b/fs/autofs/root.c index f2597205939..c1489533277 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c @@ -67,8 +67,8 @@ static int autofs_root_readdir(struct file *filp, void *dirent, filldir_t filldi filp->f_pos = ++nr; /* fall through */ default: - while ( onr = nr, ent = autofs_hash_enum(dirhash,&nr,ent) ) { - if ( !ent->dentry || d_mountpoint(ent->dentry) ) { + while (onr = nr, ent = autofs_hash_enum(dirhash,&nr,ent)) { + if (!ent->dentry || d_mountpoint(ent->dentry)) { if (filldir(dirent,ent->name,ent->len,onr,ent->ino,DT_UNKNOWN) < 0) goto out; filp->f_pos = nr; @@ -88,10 +88,10 @@ static int try_to_fill_dentry(struct dentry *dentry, struct super_block *sb, str struct autofs_dir_ent *ent; int status = 0; - if ( !(ent = autofs_hash_lookup(&sbi->dirhash, &dentry->d_name)) ) { + if (!(ent = autofs_hash_lookup(&sbi->dirhash, &dentry->d_name))) { do { - if ( status && dentry->d_inode ) { - if ( status != -ENOENT ) + if (status && dentry->d_inode) { + if (status != -ENOENT) printk("autofs warning: lookup failure on positive dentry, status = %d, name = %s\n", status, dentry->d_name.name); return 0; /* Try to get the kernel to invalidate this dentry */ } @@ -106,7 +106,7 @@ static int try_to_fill_dentry(struct dentry *dentry, struct super_block *sb, str return 1; } status = autofs_wait(sbi, &dentry->d_name); - } while (!(ent = autofs_hash_lookup(&sbi->dirhash, &dentry->d_name)) ); + } while (!(ent = autofs_hash_lookup(&sbi->dirhash, &dentry->d_name))); } /* Abuse this field as a pointer to the directory entry, used to @@ -124,13 +124,13 @@ static int try_to_fill_dentry(struct dentry *dentry, struct super_block *sb, str /* If this is a directory that isn't a mount point, bitch at the daemon and fix it in user space */ - if ( S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry) ) { + if (S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry)) { return !autofs_wait(sbi, &dentry->d_name); } /* We don't update the usages for the autofs daemon itself, this is necessary for recursive autofs mounts */ - if ( !autofs_oz_mode(sbi) ) { + if (!autofs_oz_mode(sbi)) { autofs_update_usage(&sbi->dirhash,ent); } @@ -157,7 +157,7 @@ static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd) sbi = autofs_sbi(dir->i_sb); /* Pending dentry */ - if ( dentry->d_flags & DCACHE_AUTOFS_PENDING ) { + if (dentry->d_flags & DCACHE_AUTOFS_PENDING) { if (autofs_oz_mode(sbi)) res = 1; else @@ -173,7 +173,7 @@ static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd) } /* Check for a non-mountpoint directory */ - if ( S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry) ) { + if (S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry)) { if (autofs_oz_mode(sbi)) res = 1; else @@ -183,9 +183,9 @@ static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd) } /* Update the usage list */ - if ( !autofs_oz_mode(sbi) ) { + if (!autofs_oz_mode(sbi)) { ent = (struct autofs_dir_ent *) dentry->d_time; - if ( ent ) + if (ent) autofs_update_usage(&sbi->dirhash,ent); } unlock_kernel(); @@ -213,8 +213,10 @@ static struct dentry *autofs_root_lookup(struct inode *dir, struct dentry *dentr sbi = autofs_sbi(dir->i_sb); oz_mode = autofs_oz_mode(sbi); - DPRINTK(("autofs_lookup: pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d\n", - current->pid, process_group(current), sbi->catatonic, oz_mode)); + DPRINTK(("autofs_lookup: pid = %u, pgrp = %u, catatonic = %d, " + "oz_mode = %d\n", pid_nr(task_pid(current)), + process_group(current), sbi->catatonic, + oz_mode)); /* * Mark the dentry incomplete, but add it. This is needed so @@ -258,7 +260,7 @@ static struct dentry *autofs_root_lookup(struct inode *dir, struct dentry *dentr * doesn't do the right thing for all system calls, but it should * be OK for the operations we permit from an autofs. */ - if ( dentry->d_inode && d_unhashed(dentry) ) + if (dentry->d_inode && d_unhashed(dentry)) return ERR_PTR(-ENOENT); return NULL; @@ -277,18 +279,18 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c autofs_say(dentry->d_name.name,dentry->d_name.len); lock_kernel(); - if ( !autofs_oz_mode(sbi) ) { + if (!autofs_oz_mode(sbi)) { unlock_kernel(); return -EACCES; } - if ( autofs_hash_lookup(dh, &dentry->d_name) ) { + if (autofs_hash_lookup(dh, &dentry->d_name)) { unlock_kernel(); return -EEXIST; } n = find_first_zero_bit(sbi->symlink_bitmap,AUTOFS_MAX_SYMLINKS); - if ( n >= AUTOFS_MAX_SYMLINKS ) { + if (n >= AUTOFS_MAX_SYMLINKS) { unlock_kernel(); return -ENOSPC; } @@ -297,14 +299,14 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c sl = &sbi->symlink[n]; sl->len = strlen(symname); sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL); - if ( !sl->data ) { + if (!sl->data) { clear_bit(n,sbi->symlink_bitmap); unlock_kernel(); return -ENOSPC; } ent = kmalloc(sizeof(struct autofs_dir_ent), GFP_KERNEL); - if ( !ent ) { + if (!ent) { kfree(sl->data); clear_bit(n,sbi->symlink_bitmap); unlock_kernel(); @@ -312,7 +314,7 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c } ent->name = kmalloc(dentry->d_name.len+1, GFP_KERNEL); - if ( !ent->name ) { + if (!ent->name) { kfree(sl->data); kfree(ent); clear_bit(n,sbi->symlink_bitmap); @@ -354,23 +356,23 @@ static int autofs_root_unlink(struct inode *dir, struct dentry *dentry) /* This allows root to remove symlinks */ lock_kernel(); - if ( !autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) ) { + if (!autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) { unlock_kernel(); return -EACCES; } ent = autofs_hash_lookup(dh, &dentry->d_name); - if ( !ent ) { + if (!ent) { unlock_kernel(); return -ENOENT; } n = ent->ino - AUTOFS_FIRST_SYMLINK; - if ( n >= AUTOFS_MAX_SYMLINKS ) { + if (n >= AUTOFS_MAX_SYMLINKS) { unlock_kernel(); return -EISDIR; /* It's a directory, dummy */ } - if ( !test_bit(n,sbi->symlink_bitmap) ) { + if (!test_bit(n,sbi->symlink_bitmap)) { unlock_kernel(); return -EINVAL; /* Nonexistent symlink? Shouldn't happen */ } @@ -392,23 +394,23 @@ static int autofs_root_rmdir(struct inode *dir, struct dentry *dentry) struct autofs_dir_ent *ent; lock_kernel(); - if ( !autofs_oz_mode(sbi) ) { + if (!autofs_oz_mode(sbi)) { unlock_kernel(); return -EACCES; } ent = autofs_hash_lookup(dh, &dentry->d_name); - if ( !ent ) { + if (!ent) { unlock_kernel(); return -ENOENT; } - if ( (unsigned int)ent->ino < AUTOFS_FIRST_DIR_INO ) { + if ((unsigned int)ent->ino < AUTOFS_FIRST_DIR_INO) { unlock_kernel(); return -ENOTDIR; /* Not a directory */ } - if ( ent->dentry != dentry ) { + if (ent->dentry != dentry) { printk("autofs_rmdir: odentry != dentry for entry %s\n", dentry->d_name.name); } @@ -429,18 +431,18 @@ static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode) ino_t ino; lock_kernel(); - if ( !autofs_oz_mode(sbi) ) { + if (!autofs_oz_mode(sbi)) { unlock_kernel(); return -EACCES; } ent = autofs_hash_lookup(dh, &dentry->d_name); - if ( ent ) { + if (ent) { unlock_kernel(); return -EEXIST; } - if ( sbi->next_dir_ino < AUTOFS_FIRST_DIR_INO ) { + if (sbi->next_dir_ino < AUTOFS_FIRST_DIR_INO) { printk("autofs: Out of inode numbers -- what the heck did you do??\n"); unlock_kernel(); return -ENOSPC; @@ -448,13 +450,13 @@ static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode) ino = sbi->next_dir_ino++; ent = kmalloc(sizeof(struct autofs_dir_ent), GFP_KERNEL); - if ( !ent ) { + if (!ent) { unlock_kernel(); return -ENOSPC; } ent->name = kmalloc(dentry->d_name.len+1, GFP_KERNEL); - if ( !ent->name ) { + if (!ent->name) { kfree(ent); unlock_kernel(); return -ENOSPC; @@ -483,7 +485,7 @@ static inline int autofs_get_set_timeout(struct autofs_sb_info *sbi, put_user(sbi->exp_timeout / HZ, p)) return -EFAULT; - if ( ntimeout > ULONG_MAX/HZ ) + if (ntimeout > ULONG_MAX/HZ) sbi->exp_timeout = 0; else sbi->exp_timeout = ntimeout * HZ; @@ -511,15 +513,14 @@ static inline int autofs_expire_run(struct super_block *sb, pkt.hdr.proto_version = AUTOFS_PROTO_VERSION; pkt.hdr.type = autofs_ptype_expire; - if ( !sbi->exp_timeout || - !(ent = autofs_expire(sb,sbi,mnt)) ) + if (!sbi->exp_timeout || !(ent = autofs_expire(sb,sbi,mnt))) return -EAGAIN; pkt.len = ent->len; memcpy(pkt.name, ent->name, pkt.len); pkt.name[pkt.len] = '\0'; - if ( copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)) ) + if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) return -EFAULT; return 0; @@ -537,11 +538,11 @@ static int autofs_root_ioctl(struct inode *inode, struct file *filp, DPRINTK(("autofs_ioctl: cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u\n",cmd,arg,sbi,process_group(current))); - if ( _IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) || - _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT ) + if (_IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) || + _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT) return -ENOTTY; - if ( !autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) ) + if (!autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) return -EPERM; switch(cmd) { diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 5769a2f9ad6..692364e8ffc 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -218,8 +218,7 @@ static match_table_t tokens = { }; static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, - pid_t *pgrp, unsigned int *type, - int *minproto, int *maxproto) + pid_t *pgrp, unsigned int *type, int *minproto, int *maxproto) { char *p; substring_t args[MAX_OPT_ARGS]; @@ -314,7 +313,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) struct autofs_info *ino; sbi = kmalloc(sizeof(*sbi), GFP_KERNEL); - if ( !sbi ) + if (!sbi) goto fail_unlock; DPRINTK("starting up, sbi = %p",sbi); @@ -363,10 +362,9 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) root->d_fsdata = ino; /* Can this call block? */ - if (parse_options(data, &pipefd, - &root_inode->i_uid, &root_inode->i_gid, - &sbi->oz_pgrp, &sbi->type, - &sbi->min_proto, &sbi->max_proto)) { + if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid, + &sbi->oz_pgrp, &sbi->type, &sbi->min_proto, + &sbi->max_proto)) { printk("autofs: called with bogus options\n"); goto fail_dput; } @@ -396,11 +394,11 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) DPRINTK("pipe fd = %d, pgrp = %u", pipefd, sbi->oz_pgrp); pipe = fget(pipefd); - if ( !pipe ) { + if (!pipe) { printk("autofs: could not open pipe file descriptor\n"); goto fail_dput; } - if ( !pipe->f_op || !pipe->f_op->write ) + if (!pipe->f_op || !pipe->f_op->write) goto fail_fput; sbi->pipe = pipe; sbi->pipefd = pipefd; diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 15170f4e13a..2d4c8a3e604 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -759,7 +759,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) struct autofs_info *p_ino; /* This allows root to remove symlinks */ - if ( !autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) ) + if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) return -EACCES; if (atomic_dec_and_test(&ino->count)) { @@ -833,7 +833,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) struct autofs_info *p_ino; struct inode *inode; - if ( !autofs4_oz_mode(sbi) ) + if (!autofs4_oz_mode(sbi)) return -EACCES; DPRINTK("dentry %p, creating %.*s", @@ -871,11 +871,11 @@ static inline int autofs4_get_set_timeout(struct autofs_sb_info *sbi, int rv; unsigned long ntimeout; - if ( (rv = get_user(ntimeout, p)) || - (rv = put_user(sbi->exp_timeout/HZ, p)) ) + if ((rv = get_user(ntimeout, p)) || + (rv = put_user(sbi->exp_timeout/HZ, p))) return rv; - if ( ntimeout > ULONG_MAX/HZ ) + if (ntimeout > ULONG_MAX/HZ) sbi->exp_timeout = 0; else sbi->exp_timeout = ntimeout * HZ; @@ -906,7 +906,7 @@ static inline int autofs4_ask_reghost(struct autofs_sb_info *sbi, int __user *p) DPRINTK("returning %d", sbi->needs_reghost); status = put_user(sbi->needs_reghost, p); - if ( status ) + if (status) return status; sbi->needs_reghost = 0; @@ -975,11 +975,11 @@ static int autofs4_root_ioctl(struct inode *inode, struct file *filp, DPRINTK("cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u", cmd,arg,sbi,process_group(current)); - if ( _IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) || - _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT ) + if (_IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) || + _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT) return -ENOTTY; - if ( !autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) ) + if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) return -EPERM; switch(cmd) { diff --git a/fs/compat.c b/fs/compat.c index 9cf75df9b2b..7b21b0a8259 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -46,6 +46,7 @@ #include <linux/tsacct_kern.h> #include <linux/security.h> #include <linux/highmem.h> +#include <linux/signal.h> #include <linux/poll.h> #include <linux/mm.h> #include <linux/eventpoll.h> @@ -2199,3 +2200,51 @@ asmlinkage long compat_sys_epoll_pwait(int epfd, #endif /* TIF_RESTORE_SIGMASK */ #endif /* CONFIG_EPOLL */ + +#ifdef CONFIG_SIGNALFD + +asmlinkage long compat_sys_signalfd(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize) +{ + compat_sigset_t ss32; + sigset_t tmp; + sigset_t __user *ksigmask; + + if (sigsetsize != sizeof(compat_sigset_t)) + return -EINVAL; + if (copy_from_user(&ss32, sigmask, sizeof(ss32))) + return -EFAULT; + sigset_from_compat(&tmp, &ss32); + ksigmask = compat_alloc_user_space(sizeof(sigset_t)); + if (copy_to_user(ksigmask, &tmp, sizeof(sigset_t))) + return -EFAULT; + + return sys_signalfd(ufd, ksigmask, sizeof(sigset_t)); +} + +#endif /* CONFIG_SIGNALFD */ + +#ifdef CONFIG_TIMERFD + +asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags, + const struct compat_itimerspec __user *utmr) +{ + long res; + struct itimerspec t; + struct itimerspec __user *ut; + + res = -EFAULT; + if (get_compat_itimerspec(&t, utmr)) + goto err_exit; + ut = compat_alloc_user_space(sizeof(*ut)); + if (copy_to_user(ut, &t, sizeof(t)) ) + goto err_exit; + + res = sys_timerfd(ufd, clockid, flags, ut); +err_exit: + return res; +} + +#endif /* CONFIG_TIMERFD */ + diff --git a/fs/eventfd.c b/fs/eventfd.c new file mode 100644 index 00000000000..480e2b3c416 --- /dev/null +++ b/fs/eventfd.c @@ -0,0 +1,228 @@ +/* + * fs/eventfd.c + * + * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> + * + */ + +#include <linux/file.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/anon_inodes.h> +#include <linux/eventfd.h> + +struct eventfd_ctx { + spinlock_t lock; + wait_queue_head_t wqh; + /* + * Every time that a write(2) is performed on an eventfd, the + * value of the __u64 being written is added to "count" and a + * wakeup is performed on "wqh". A read(2) will return the "count" + * value to userspace, and will reset "count" to zero. The kernel + * size eventfd_signal() also, adds to the "count" counter and + * issue a wakeup. + */ + __u64 count; +}; + +/* + * Adds "n" to the eventfd counter "count". Returns "n" in case of + * success, or a value lower then "n" in case of coutner overflow. + * This function is supposed to be called by the kernel in paths + * that do not allow sleeping. In this function we allow the counter + * to reach the ULLONG_MAX value, and we signal this as overflow + * condition by returining a POLLERR to poll(2). + */ +int eventfd_signal(struct file *file, int n) +{ + struct eventfd_ctx *ctx = file->private_data; + unsigned long flags; + + if (n < 0) + return -EINVAL; + spin_lock_irqsave(&ctx->lock, flags); + if (ULLONG_MAX - ctx->count < n) + n = (int) (ULLONG_MAX - ctx->count); + ctx->count += n; + if (waitqueue_active(&ctx->wqh)) + wake_up_locked(&ctx->wqh); + spin_unlock_irqrestore(&ctx->lock, flags); + + return n; +} + +static int eventfd_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static unsigned int eventfd_poll(struct file *file, poll_table *wait) +{ + struct eventfd_ctx *ctx = file->private_data; + unsigned int events = 0; + unsigned long flags; + + poll_wait(file, &ctx->wqh, wait); + + spin_lock_irqsave(&ctx->lock, flags); + if (ctx->count > 0) + events |= POLLIN; + if (ctx->count == ULLONG_MAX) + events |= POLLERR; + if (ULLONG_MAX - 1 > ctx->count) + events |= POLLOUT; + spin_unlock_irqrestore(&ctx->lock, flags); + + return events; +} + +static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct eventfd_ctx *ctx = file->private_data; + ssize_t res; + __u64 ucnt; + DECLARE_WAITQUEUE(wait, current); + + if (count < sizeof(ucnt)) + return -EINVAL; + spin_lock_irq(&ctx->lock); + res = -EAGAIN; + ucnt = ctx->count; + if (ucnt > 0) + res = sizeof(ucnt); + else if (!(file->f_flags & O_NONBLOCK)) { + __add_wait_queue(&ctx->wqh, &wait); + for (res = 0;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (ctx->count > 0) { + ucnt = ctx->count; + res = sizeof(ucnt); + break; + } + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + spin_unlock_irq(&ctx->lock); + schedule(); + spin_lock_irq(&ctx->lock); + } + __remove_wait_queue(&ctx->wqh, &wait); + __set_current_state(TASK_RUNNING); + } + if (res > 0) { + ctx->count = 0; + if (waitqueue_active(&ctx->wqh)) + wake_up_locked(&ctx->wqh); + } + spin_unlock_irq(&ctx->lock); + if (res > 0 && put_user(ucnt, (__u64 __user *) buf)) + return -EFAULT; + + return res; +} + +static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, + loff_t *ppos) +{ + struct eventfd_ctx *ctx = file->private_data; + ssize_t res; + __u64 ucnt; + DECLARE_WAITQUEUE(wait, current); + + if (count < sizeof(ucnt)) + return -EINVAL; + if (copy_from_user(&ucnt, buf, sizeof(ucnt))) + return -EFAULT; + if (ucnt == ULLONG_MAX) + return -EINVAL; + spin_lock_irq(&ctx->lock); + res = -EAGAIN; + if (ULLONG_MAX - ctx->count > ucnt) + res = sizeof(ucnt); + else if (!(file->f_flags & O_NONBLOCK)) { + __add_wait_queue(&ctx->wqh, &wait); + for (res = 0;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (ULLONG_MAX - ctx->count > ucnt) { + res = sizeof(ucnt); + break; + } + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + spin_unlock_irq(&ctx->lock); + schedule(); + spin_lock_irq(&ctx->lock); + } + __remove_wait_queue(&ctx->wqh, &wait); + __set_current_state(TASK_RUNNING); + } + if (res > 0) { + ctx->count += ucnt; + if (waitqueue_active(&ctx->wqh)) + wake_up_locked(&ctx->wqh); + } + spin_unlock_irq(&ctx->lock); + + return res; +} + +static const struct file_operations eventfd_fops = { + .release = eventfd_release, + .poll = eventfd_poll, + .read = eventfd_read, + .write = eventfd_write, +}; + +struct file *eventfd_fget(int fd) +{ + struct file *file; + + file = fget(fd); + if (!file) + return ERR_PTR(-EBADF); + if (file->f_op != &eventfd_fops) { + fput(file); + return ERR_PTR(-EINVAL); + } + + return file; +} + +asmlinkage long sys_eventfd(unsigned int count) +{ + int error, fd; + struct eventfd_ctx *ctx; + struct file *file; + struct inode *inode; + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + init_waitqueue_head(&ctx->wqh); + spin_lock_init(&ctx->lock); + ctx->count = count; + + /* + * When we call this, the initialization must be complete, since + * anon_inode_getfd() will install the fd. + */ + error = anon_inode_getfd(&fd, &inode, &file, "[eventfd]", + &eventfd_fops, ctx); + if (!error) + return fd; + + kfree(ctx); + return error; +} + diff --git a/fs/eventpoll.c b/fs/eventpoll.c index b5c7ca58493..1aad34ea61a 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -11,7 +11,6 @@ * */ -#include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> @@ -34,6 +33,7 @@ #include <linux/mount.h> #include <linux/bitops.h> #include <linux/mutex.h> +#include <linux/anon_inodes.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> @@ -41,7 +41,6 @@ #include <asm/atomic.h> #include <asm/semaphore.h> - /* * LOCKING: * There are three level of locking required by epoll : @@ -74,9 +73,6 @@ * a greater scalability. */ - -#define EVENTPOLLFS_MAGIC 0x03111965 /* My birthday should work for this :) */ - #define DEBUG_EPOLL 0 #if DEBUG_EPOLL > 0 @@ -106,7 +102,6 @@ #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) - struct epoll_filefd { struct file *file; int fd; @@ -224,43 +219,6 @@ struct ep_pqueue { struct epitem *epi; }; - - -static void ep_poll_safewake_init(struct poll_safewake *psw); -static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq); -static int ep_getfd(int *efd, struct inode **einode, struct file **efile, - struct eventpoll *ep); -static int ep_alloc(struct eventpoll **pep); -static void ep_free(struct eventpoll *ep); -static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd); -static void ep_use_epitem(struct epitem *epi); -static void ep_release_epitem(struct epitem *epi); -static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, - poll_table *pt); -static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi); -static int ep_insert(struct eventpoll *ep, struct epoll_event *event, - struct file *tfile, int fd); -static int ep_modify(struct eventpoll *ep, struct epitem *epi, - struct epoll_event *event); -static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi); -static int ep_unlink(struct eventpoll *ep, struct epitem *epi); -static int ep_remove(struct eventpoll *ep, struct epitem *epi); -static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key); -static int ep_eventpoll_close(struct inode *inode, struct file *file); -static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait); -static int ep_send_events(struct eventpoll *ep, struct list_head *txlist, - struct epoll_event __user *events, int maxevents); -static int ep_events_transfer(struct eventpoll *ep, - struct epoll_event __user *events, - int maxevents); -static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, - int maxevents, long timeout); -static int eventpollfs_delete_dentry(struct dentry *dentry); -static struct inode *ep_eventpoll_inode(void); -static int eventpollfs_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data, struct vfsmount *mnt); - /* * This semaphore is used to serialize ep_free() and eventpoll_release_file(). */ @@ -275,37 +233,6 @@ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; -/* Virtual fs used to allocate inodes for eventpoll files */ -static struct vfsmount *eventpoll_mnt __read_mostly; - -/* File callbacks that implement the eventpoll file behaviour */ -static const struct file_operations eventpoll_fops = { - .release = ep_eventpoll_close, - .poll = ep_eventpoll_poll -}; - -/* - * This is used to register the virtual file system from where - * eventpoll inodes are allocated. - */ -static struct file_system_type eventpoll_fs_type = { - .name = "eventpollfs", - .get_sb = eventpollfs_get_sb, - .kill_sb = kill_anon_super, -}; - -/* Very basic directory entry operations for the eventpoll virtual file system */ -static struct dentry_operations eventpollfs_dentry_operations = { - .d_delete = eventpollfs_delete_dentry, -}; - - - -/* Fast test to see if the file is an evenpoll file */ -static inline int is_file_epoll(struct file *f) -{ - return f->f_op == &eventpoll_fops; -} /* Setup the structure that is used as key for the rb-tree */ static inline void ep_set_ffd(struct epoll_filefd *ffd, @@ -374,7 +301,6 @@ static void ep_poll_safewake_init(struct poll_safewake *psw) spin_lock_init(&psw->lock); } - /* * Perform a safe wake up of the poll wait list. The problem is that * with the new callback'd wake up system, it is possible that the @@ -429,399 +355,144 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) spin_unlock_irqrestore(&psw->lock, flags); } - /* - * This is called from eventpoll_release() to unlink files from the eventpoll - * interface. We need to have this facility to cleanup correctly files that are - * closed without being removed from the eventpoll interface. + * This function unregister poll callbacks from the associated file descriptor. + * Since this must be called without holding "ep->lock" the atomic exchange trick + * will protect us from multiple unregister. */ -void eventpoll_release_file(struct file *file) +static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) { - struct list_head *lsthead = &file->f_ep_links; - struct eventpoll *ep; - struct epitem *epi; + int nwait; + struct list_head *lsthead = &epi->pwqlist; + struct eppoll_entry *pwq; - /* - * We don't want to get "file->f_ep_lock" because it is not - * necessary. It is not necessary because we're in the "struct file" - * cleanup path, and this means that noone is using this file anymore. - * The only hit might come from ep_free() but by holding the semaphore - * will correctly serialize the operation. We do need to acquire - * "ep->sem" after "epmutex" because ep_remove() requires it when called - * from anywhere but ep_free(). - */ - mutex_lock(&epmutex); + /* This is called without locks, so we need the atomic exchange */ + nwait = xchg(&epi->nwait, 0); - while (!list_empty(lsthead)) { - epi = list_first_entry(lsthead, struct epitem, fllink); + if (nwait) { + while (!list_empty(lsthead)) { + pwq = list_first_entry(lsthead, struct eppoll_entry, llink); - ep = epi->ep; - list_del_init(&epi->fllink); - down_write(&ep->sem); - ep_remove(ep, epi); - up_write(&ep->sem); + list_del_init(&pwq->llink); + remove_wait_queue(pwq->whead, &pwq->wait); + kmem_cache_free(pwq_cache, pwq); + } } - - mutex_unlock(&epmutex); } - /* - * It opens an eventpoll file descriptor by suggesting a storage of "size" - * file descriptors. The size parameter is just an hint about how to size - * data structures. It won't prevent the user to store more than "size" - * file descriptors inside the epoll interface. It is the kernel part of - * the userspace epoll_create(2). + * Unlink the "struct epitem" from all places it might have been hooked up. + * This function must be called with write IRQ lock on "ep->lock". */ -asmlinkage long sys_epoll_create(int size) +static int ep_unlink(struct eventpoll *ep, struct epitem *epi) { - int error, fd = -1; - struct eventpoll *ep; - struct inode *inode; - struct file *file; - - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", - current, size)); - - /* - * Sanity check on the size parameter, and create the internal data - * structure ( "struct eventpoll" ). - */ - error = -EINVAL; - if (size <= 0 || (error = ep_alloc(&ep)) != 0) - goto eexit_1; + int error; /* - * Creates all the items needed to setup an eventpoll file. That is, - * a file structure, and inode and a free file descriptor. + * It can happen that this one is called for an item already unlinked. + * The check protect us from doing a double unlink ( crash ). */ - error = ep_getfd(&fd, &inode, &file, ep); - if (error) - goto eexit_2; - - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", - current, size, fd)); - - return fd; - -eexit_2: - ep_free(ep); - kfree(ep); -eexit_1: - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", - current, size, error)); - return error; -} - - -/* - * The following function implements the controller interface for - * the eventpoll file that enables the insertion/removal/change of - * file descriptors inside the interest set. It represents - * the kernel part of the user space epoll_ctl(2). - */ -asmlinkage long -sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event) -{ - int error; - struct file *file, *tfile; - struct eventpoll *ep; - struct epitem *epi; - struct epoll_event epds; - - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n", - current, epfd, op, fd, event)); - - error = -EFAULT; - if (ep_op_has_event(op) && - copy_from_user(&epds, event, sizeof(struct epoll_event))) - goto eexit_1; - - /* Get the "struct file *" for the eventpoll file */ - error = -EBADF; - file = fget(epfd); - if (!file) - goto eexit_1; - - /* Get the "struct file *" for the target file */ - tfile = fget(fd); - if (!tfile) - goto eexit_2; - - /* The target file descriptor must support poll */ - error = -EPERM; - if (!tfile->f_op || !tfile->f_op->poll) - goto eexit_3; + error = -ENOENT; + if (!ep_rb_linked(&epi->rbn)) + goto error_return; /* - * We have to check that the file structure underneath the file descriptor - * the user passed to us _is_ an eventpoll file. And also we do not permit - * adding an epoll file descriptor inside itself. + * Clear the event mask for the unlinked item. This will avoid item + * notifications to be sent after the unlink operation from inside + * the kernel->userspace event transfer loop. */ - error = -EINVAL; - if (file == tfile || !is_file_epoll(file)) - goto eexit_3; + epi->event.events = 0; /* - * At this point it is safe to assume that the "private_data" contains - * our own data structure. + * At this point is safe to do the job, unlink the item from our rb-tree. + * This operation togheter with the above check closes the door to + * double unlinks. */ - ep = file->private_data; - - down_write(&ep->sem); - - /* Try to lookup the file inside our RB tree */ - epi = ep_find(ep, tfile, fd); - - error = -EINVAL; - switch (op) { - case EPOLL_CTL_ADD: - if (!epi) { - epds.events |= POLLERR | POLLHUP; - - error = ep_insert(ep, &epds, tfile, fd); - } else - error = -EEXIST; - break; - case EPOLL_CTL_DEL: - if (epi) - error = ep_remove(ep, epi); - else - error = -ENOENT; - break; - case EPOLL_CTL_MOD: - if (epi) { - epds.events |= POLLERR | POLLHUP; - error = ep_modify(ep, epi, &epds); - } else - error = -ENOENT; - break; - } + ep_rb_erase(&epi->rbn, &ep->rbr); /* - * The function ep_find() increments the usage count of the structure - * so, if this is not NULL, we need to release it. + * If the item we are going to remove is inside the ready file descriptors + * we want to remove it from this list to avoid stale events. */ - if (epi) - ep_release_epitem(epi); + if (ep_is_linked(&epi->rdllink)) + list_del_init(&epi->rdllink); - up_write(&ep->sem); + error = 0; +error_return: -eexit_3: - fput(tfile); -eexit_2: - fput(file); -eexit_1: - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n", - current, epfd, op, fd, event, error)); + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", + current, ep, epi->ffd.file, error)); return error; } - /* - * Implement the event wait interface for the eventpoll file. It is the kernel - * part of the user space epoll_wait(2). + * Increment the usage count of the "struct epitem" making it sure + * that the user will have a valid pointer to reference. */ -asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, - int maxevents, int timeout) +static void ep_use_epitem(struct epitem *epi) { - int error; - struct file *file; - struct eventpoll *ep; - - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n", - current, epfd, events, maxevents, timeout)); - - /* The maximum number of event must be greater than zero */ - if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) - return -EINVAL; - - /* Verify that the area passed by the user is writeable */ - if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) { - error = -EFAULT; - goto eexit_1; - } - - /* Get the "struct file *" for the eventpoll file */ - error = -EBADF; - file = fget(epfd); - if (!file) - goto eexit_1; - - /* - * We have to check that the file structure underneath the fd - * the user passed to us _is_ an eventpoll file. - */ - error = -EINVAL; - if (!is_file_epoll(file)) - goto eexit_2; - - /* - * At this point it is safe to assume that the "private_data" contains - * our own data structure. - */ - ep = file->private_data; - - /* Time to fish for events ... */ - error = ep_poll(ep, events, maxevents, timeout); - -eexit_2: - fput(file); -eexit_1: - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n", - current, epfd, events, maxevents, timeout, error)); - - return error; + atomic_inc(&epi->usecnt); } - -#ifdef TIF_RESTORE_SIGMASK - /* - * Implement the event wait interface for the eventpoll file. It is the kernel - * part of the user space epoll_pwait(2). + * Decrement ( release ) the usage count by signaling that the user + * has finished using the structure. It might lead to freeing the + * structure itself if the count goes to zero. */ -asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, - int maxevents, int timeout, const sigset_t __user *sigmask, - size_t sigsetsize) +static void ep_release_epitem(struct epitem *epi) { - int error; - sigset_t ksigmask, sigsaved; - - /* - * If the caller wants a certain signal mask to be set during the wait, - * we apply it here. - */ - if (sigmask) { - if (sigsetsize != sizeof(sigset_t)) - return -EINVAL; - if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) - return -EFAULT; - sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); - sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); - } - - error = sys_epoll_wait(epfd, events, maxevents, timeout); - - /* - * If we changed the signal mask, we need to restore the original one. - * In case we've got a signal while waiting, we do not restore the - * signal mask yet, and we allow do_signal() to deliver the signal on - * the way back to userspace, before the signal mask is restored. - */ - if (sigmask) { - if (error == -EINTR) { - memcpy(¤t->saved_sigmask, &sigsaved, - sizeof(sigsaved)); - set_thread_flag(TIF_RESTORE_SIGMASK); - } else - sigprocmask(SIG_SETMASK, &sigsaved, NULL); - } - - return error; + if (atomic_dec_and_test(&epi->usecnt)) + kmem_cache_free(epi_cache, epi); } -#endif /* #ifdef TIF_RESTORE_SIGMASK */ - - /* - * Creates the file descriptor to be used by the epoll interface. + * Removes a "struct epitem" from the eventpoll RB tree and deallocates + * all the associated resources. */ -static int ep_getfd(int *efd, struct inode **einode, struct file **efile, - struct eventpoll *ep) +static int ep_remove(struct eventpoll *ep, struct epitem *epi) { - struct qstr this; - char name[32]; - struct dentry *dentry; - struct inode *inode; - struct file *file; - int error, fd; - - /* Get an ready to use file */ - error = -ENFILE; - file = get_empty_filp(); - if (!file) - goto eexit_1; - - /* Allocates an inode from the eventpoll file system */ - inode = ep_eventpoll_inode(); - if (IS_ERR(inode)) { - error = PTR_ERR(inode); - goto eexit_2; - } - - /* Allocates a free descriptor to plug the file onto */ - error = get_unused_fd(); - if (error < 0) - goto eexit_3; - fd = error; + int error; + unsigned long flags; + struct file *file = epi->ffd.file; /* - * Link the inode to a directory entry by creating a unique name - * using the inode number. + * Removes poll wait queue hooks. We _have_ to do this without holding + * the "ep->lock" otherwise a deadlock might occur. This because of the + * sequence of the lock acquisition. Here we do "ep->lock" then the wait + * queue head lock when unregistering the wait queue. The wakeup callback + * will run by holding the wait queue head lock and will call our callback + * that will try to get "ep->lock". */ - error = -ENOMEM; - sprintf(name, "[%lu]", inode->i_ino); - this.name = name; - this.len = strlen(name); - this.hash = inode->i_ino; - dentry = d_alloc(eventpoll_mnt->mnt_sb->s_root, &this); - if (!dentry) - goto eexit_4; - dentry->d_op = &eventpollfs_dentry_operations; - d_add(dentry, inode); - file->f_path.mnt = mntget(eventpoll_mnt); - file->f_path.dentry = dentry; - file->f_mapping = inode->i_mapping; - - file->f_pos = 0; - file->f_flags = O_RDONLY; - file->f_op = &eventpoll_fops; - file->f_mode = FMODE_READ; - file->f_version = 0; - file->private_data = ep; - - /* Install the new setup file into the allocated fd. */ - fd_install(fd, file); - - *efd = fd; - *einode = inode; - *efile = file; - return 0; + ep_unregister_pollwait(ep, epi); -eexit_4: - put_unused_fd(fd); -eexit_3: - iput(inode); -eexit_2: - put_filp(file); -eexit_1: - return error; -} + /* Remove the current item from the list of epoll hooks */ + spin_lock(&file->f_ep_lock); + if (ep_is_linked(&epi->fllink)) + list_del_init(&epi->fllink); + spin_unlock(&file->f_ep_lock); + /* We need to acquire the write IRQ lock before calling ep_unlink() */ + write_lock_irqsave(&ep->lock, flags); -static int ep_alloc(struct eventpoll **pep) -{ - struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL); + /* Really unlink the item from the RB tree */ + error = ep_unlink(ep, epi); - if (!ep) - return -ENOMEM; + write_unlock_irqrestore(&ep->lock, flags); - rwlock_init(&ep->lock); - init_rwsem(&ep->sem); - init_waitqueue_head(&ep->wq); - init_waitqueue_head(&ep->poll_wait); - INIT_LIST_HEAD(&ep->rdllist); - ep->rbr = RB_ROOT; + if (error) + goto error_return; - *pep = ep; + /* At this point it is safe to free the eventpoll item */ + ep_release_epitem(epi); - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n", - current, ep)); - return 0; -} + error = 0; +error_return: + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p) = %d\n", + current, ep, file, error)); + return error; +} static void ep_free(struct eventpoll *ep) { @@ -865,6 +536,104 @@ static void ep_free(struct eventpoll *ep) mutex_unlock(&epmutex); } +static int ep_eventpoll_release(struct inode *inode, struct file *file) +{ + struct eventpoll *ep = file->private_data; + + if (ep) { + ep_free(ep); + kfree(ep); + } + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep)); + return 0; +} + +static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) +{ + unsigned int pollflags = 0; + unsigned long flags; + struct eventpoll *ep = file->private_data; + + /* Insert inside our poll wait queue */ + poll_wait(file, &ep->poll_wait, wait); + + /* Check our condition */ + read_lock_irqsave(&ep->lock, flags); + if (!list_empty(&ep->rdllist)) + pollflags = POLLIN | POLLRDNORM; + read_unlock_irqrestore(&ep->lock, flags); + + return pollflags; +} + +/* File callbacks that implement the eventpoll file behaviour */ +static const struct file_operations eventpoll_fops = { + .release = ep_eventpoll_release, + .poll = ep_eventpoll_poll +}; + +/* Fast test to see if the file is an evenpoll file */ +static inline int is_file_epoll(struct file *f) +{ + return f->f_op == &eventpoll_fops; +} + +/* + * This is called from eventpoll_release() to unlink files from the eventpoll + * interface. We need to have this facility to cleanup correctly files that are + * closed without being removed from the eventpoll interface. + */ +void eventpoll_release_file(struct file *file) +{ + struct list_head *lsthead = &file->f_ep_links; + struct eventpoll *ep; + struct epitem *epi; + + /* + * We don't want to get "file->f_ep_lock" because it is not + * necessary. It is not necessary because we're in the "struct file" + * cleanup path, and this means that noone is using this file anymore. + * The only hit might come from ep_free() but by holding the semaphore + * will correctly serialize the operation. We do need to acquire + * "ep->sem" after "epmutex" because ep_remove() requires it when called + * from anywhere but ep_free(). + */ + mutex_lock(&epmutex); + + while (!list_empty(lsthead)) { + epi = list_first_entry(lsthead, struct epitem, fllink); + + ep = epi->ep; + list_del_init(&epi->fllink); + down_write(&ep->sem); + ep_remove(ep, epi); + up_write(&ep->sem); + } + + mutex_unlock(&epmutex); +} + +static int ep_alloc(struct eventpoll **pep) +{ + struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL); + + if (!ep) + return -ENOMEM; + + rwlock_init(&ep->lock); + init_rwsem(&ep->sem); + init_waitqueue_head(&ep->wq); + init_waitqueue_head(&ep->poll_wait); + INIT_LIST_HEAD(&ep->rdllist); + ep->rbr = RB_ROOT; + + *pep = ep; + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n", + current, ep)); + return 0; +} /* * Search the file inside the eventpoll tree. It add usage count to @@ -902,30 +671,58 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) return epir; } - /* - * Increment the usage count of the "struct epitem" making it sure - * that the user will have a valid pointer to reference. + * This is the callback that is passed to the wait queue wakeup + * machanism. It is called by the stored file descriptors when they + * have events to report. */ -static void ep_use_epitem(struct epitem *epi) +static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { + int pwake = 0; + unsigned long flags; + struct epitem *epi = ep_item_from_wait(wait); + struct eventpoll *ep = epi->ep; - atomic_inc(&epi->usecnt); -} + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", + current, epi->ffd.file, epi, ep)); + write_lock_irqsave(&ep->lock, flags); -/* - * Decrement ( release ) the usage count by signaling that the user - * has finished using the structure. It might lead to freeing the - * structure itself if the count goes to zero. - */ -static void ep_release_epitem(struct epitem *epi) -{ + /* + * If the event mask does not contain any poll(2) event, we consider the + * descriptor to be disabled. This condition is likely the effect of the + * EPOLLONESHOT bit that disables the descriptor when an event is received, + * until the next EPOLL_CTL_MOD will be issued. + */ + if (!(epi->event.events & ~EP_PRIVATE_BITS)) + goto is_disabled; - if (atomic_dec_and_test(&epi->usecnt)) - kmem_cache_free(epi_cache, epi); -} + /* If this file is already in the ready list we exit soon */ + if (ep_is_linked(&epi->rdllink)) + goto is_linked; + list_add_tail(&epi->rdllink, &ep->rdllist); + +is_linked: + /* + * Wake up ( if active ) both the eventpoll wait list and the ->poll() + * wait list. + */ + if (waitqueue_active(&ep->wq)) + __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | + TASK_INTERRUPTIBLE); + if (waitqueue_active(&ep->poll_wait)) + pwake++; + +is_disabled: + write_unlock_irqrestore(&ep->lock, flags); + + /* We have to call this outside the lock */ + if (pwake) + ep_poll_safewake(&psw, &ep->poll_wait); + + return 1; +} /* * This is the callback that is used to add our wait queue to the @@ -950,7 +747,6 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, } } - static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) { int kcmp; @@ -970,7 +766,6 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) rb_insert_color(&epi->rbn, &ep->rbr); } - static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct file *tfile, int fd) { @@ -981,7 +776,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, error = -ENOMEM; if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) - goto eexit_1; + goto error_return; /* Item initialization follow here ... */ ep_rb_initnode(&epi->rbn); @@ -1011,7 +806,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, * high memory pressure. */ if (epi->nwait < 0) - goto eexit_2; + goto error_unregister; /* Add the current item to the list of active epoll hook for this file */ spin_lock(&tfile->f_ep_lock); @@ -1046,7 +841,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, return 0; -eexit_2: +error_unregister: ep_unregister_pollwait(ep, epi); /* @@ -1059,11 +854,10 @@ eexit_2: write_unlock_irqrestore(&ep->lock, flags); kmem_cache_free(epi_cache, epi); -eexit_1: +error_return: return error; } - /* * Modify the interest event mask by dropping an event if the new mask * has a match in the current file status. @@ -1126,216 +920,6 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even return 0; } - -/* - * This function unregister poll callbacks from the associated file descriptor. - * Since this must be called without holding "ep->lock" the atomic exchange trick - * will protect us from multiple unregister. - */ -static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) -{ - int nwait; - struct list_head *lsthead = &epi->pwqlist; - struct eppoll_entry *pwq; - - /* This is called without locks, so we need the atomic exchange */ - nwait = xchg(&epi->nwait, 0); - - if (nwait) { - while (!list_empty(lsthead)) { - pwq = list_first_entry(lsthead, struct eppoll_entry, llink); - - list_del_init(&pwq->llink); - remove_wait_queue(pwq->whead, &pwq->wait); - kmem_cache_free(pwq_cache, pwq); - } - } -} - - -/* - * Unlink the "struct epitem" from all places it might have been hooked up. - * This function must be called with write IRQ lock on "ep->lock". - */ -static int ep_unlink(struct eventpoll *ep, struct epitem *epi) -{ - int error; - - /* - * It can happen that this one is called for an item already unlinked. - * The check protect us from doing a double unlink ( crash ). - */ - error = -ENOENT; - if (!ep_rb_linked(&epi->rbn)) - goto eexit_1; - - /* - * Clear the event mask for the unlinked item. This will avoid item - * notifications to be sent after the unlink operation from inside - * the kernel->userspace event transfer loop. - */ - epi->event.events = 0; - - /* - * At this point is safe to do the job, unlink the item from our rb-tree. - * This operation togheter with the above check closes the door to - * double unlinks. - */ - ep_rb_erase(&epi->rbn, &ep->rbr); - - /* - * If the item we are going to remove is inside the ready file descriptors - * we want to remove it from this list to avoid stale events. - */ - if (ep_is_linked(&epi->rdllink)) - list_del_init(&epi->rdllink); - - error = 0; -eexit_1: - - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", - current, ep, epi->ffd.file, error)); - - return error; -} - - -/* - * Removes a "struct epitem" from the eventpoll RB tree and deallocates - * all the associated resources. - */ -static int ep_remove(struct eventpoll *ep, struct epitem *epi) -{ - int error; - unsigned long flags; - struct file *file = epi->ffd.file; - - /* - * Removes poll wait queue hooks. We _have_ to do this without holding - * the "ep->lock" otherwise a deadlock might occur. This because of the - * sequence of the lock acquisition. Here we do "ep->lock" then the wait - * queue head lock when unregistering the wait queue. The wakeup callback - * will run by holding the wait queue head lock and will call our callback - * that will try to get "ep->lock". - */ - ep_unregister_pollwait(ep, epi); - - /* Remove the current item from the list of epoll hooks */ - spin_lock(&file->f_ep_lock); - if (ep_is_linked(&epi->fllink)) - list_del_init(&epi->fllink); - spin_unlock(&file->f_ep_lock); - - /* We need to acquire the write IRQ lock before calling ep_unlink() */ - write_lock_irqsave(&ep->lock, flags); - - /* Really unlink the item from the RB tree */ - error = ep_unlink(ep, epi); - - write_unlock_irqrestore(&ep->lock, flags); - - if (error) - goto eexit_1; - - /* At this point it is safe to free the eventpoll item */ - ep_release_epitem(epi); - - error = 0; -eexit_1: - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p) = %d\n", - current, ep, file, error)); - - return error; -} - - -/* - * This is the callback that is passed to the wait queue wakeup - * machanism. It is called by the stored file descriptors when they - * have events to report. - */ -static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) -{ - int pwake = 0; - unsigned long flags; - struct epitem *epi = ep_item_from_wait(wait); - struct eventpoll *ep = epi->ep; - - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", - current, epi->ffd.file, epi, ep)); - - write_lock_irqsave(&ep->lock, flags); - - /* - * If the event mask does not contain any poll(2) event, we consider the - * descriptor to be disabled. This condition is likely the effect of the - * EPOLLONESHOT bit that disables the descriptor when an event is received, - * until the next EPOLL_CTL_MOD will be issued. - */ - if (!(epi->event.events & ~EP_PRIVATE_BITS)) - goto is_disabled; - - /* If this file is already in the ready list we exit soon */ - if (ep_is_linked(&epi->rdllink)) - goto is_linked; - - list_add_tail(&epi->rdllink, &ep->rdllist); - -is_linked: - /* - * Wake up ( if active ) both the eventpoll wait list and the ->poll() - * wait list. - */ - if (waitqueue_active(&ep->wq)) - __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | - TASK_INTERRUPTIBLE); - if (waitqueue_active(&ep->poll_wait)) - pwake++; - -is_disabled: - write_unlock_irqrestore(&ep->lock, flags); - - /* We have to call this outside the lock */ - if (pwake) - ep_poll_safewake(&psw, &ep->poll_wait); - - return 1; -} - - -static int ep_eventpoll_close(struct inode *inode, struct file *file) -{ - struct eventpoll *ep = file->private_data; - - if (ep) { - ep_free(ep); - kfree(ep); - } - - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep)); - return 0; -} - - -static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) -{ - unsigned int pollflags = 0; - unsigned long flags; - struct eventpoll *ep = file->private_data; - - /* Insert inside our poll wait queue */ - poll_wait(file, &ep->poll_wait, wait); - - /* Check our condition */ - read_lock_irqsave(&ep->lock, flags); - if (!list_empty(&ep->rdllist)) - pollflags = POLLIN | POLLRDNORM; - read_unlock_irqrestore(&ep->lock, flags); - - return pollflags; -} - - /* * This function is called without holding the "ep->lock" since the call to * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ @@ -1447,7 +1031,6 @@ static int ep_send_events(struct eventpoll *ep, struct list_head *txlist, return eventcnt == 0 ? error: eventcnt; } - /* * Perform the transfer of events to user space. */ @@ -1483,7 +1066,6 @@ static int ep_events_transfer(struct eventpoll *ep, return eventcnt; } - static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { @@ -1553,52 +1135,262 @@ retry: return res; } -static int eventpollfs_delete_dentry(struct dentry *dentry) +/* + * It opens an eventpoll file descriptor by suggesting a storage of "size" + * file descriptors. The size parameter is just an hint about how to size + * data structures. It won't prevent the user to store more than "size" + * file descriptors inside the epoll interface. It is the kernel part of + * the userspace epoll_create(2). + */ +asmlinkage long sys_epoll_create(int size) { + int error, fd = -1; + struct eventpoll *ep; + struct inode *inode; + struct file *file; - return 1; + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", + current, size)); + + /* + * Sanity check on the size parameter, and create the internal data + * structure ( "struct eventpoll" ). + */ + error = -EINVAL; + if (size <= 0 || (error = ep_alloc(&ep)) != 0) + goto error_return; + + /* + * Creates all the items needed to setup an eventpoll file. That is, + * a file structure, and inode and a free file descriptor. + */ + error = anon_inode_getfd(&fd, &inode, &file, "[eventpoll]", + &eventpoll_fops, ep); + if (error) + goto error_free; + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", + current, size, fd)); + + return fd; + +error_free: + ep_free(ep); + kfree(ep); +error_return: + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", + current, size, error)); + return error; } -static struct inode *ep_eventpoll_inode(void) +/* + * The following function implements the controller interface for + * the eventpoll file that enables the insertion/removal/change of + * file descriptors inside the interest set. It represents + * the kernel part of the user space epoll_ctl(2). + */ +asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, + struct epoll_event __user *event) { - int error = -ENOMEM; - struct inode *inode = new_inode(eventpoll_mnt->mnt_sb); + int error; + struct file *file, *tfile; + struct eventpoll *ep; + struct epitem *epi; + struct epoll_event epds; + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n", + current, epfd, op, fd, event)); + + error = -EFAULT; + if (ep_op_has_event(op) && + copy_from_user(&epds, event, sizeof(struct epoll_event))) + goto error_return; + + /* Get the "struct file *" for the eventpoll file */ + error = -EBADF; + file = fget(epfd); + if (!file) + goto error_return; + + /* Get the "struct file *" for the target file */ + tfile = fget(fd); + if (!tfile) + goto error_fput; + + /* The target file descriptor must support poll */ + error = -EPERM; + if (!tfile->f_op || !tfile->f_op->poll) + goto error_tgt_fput; + + /* + * We have to check that the file structure underneath the file descriptor + * the user passed to us _is_ an eventpoll file. And also we do not permit + * adding an epoll file descriptor inside itself. + */ + error = -EINVAL; + if (file == tfile || !is_file_epoll(file)) + goto error_tgt_fput; - if (!inode) - goto eexit_1; + /* + * At this point it is safe to assume that the "private_data" contains + * our own data structure. + */ + ep = file->private_data; + + down_write(&ep->sem); - inode->i_fop = &eventpoll_fops; + /* Try to lookup the file inside our RB tree */ + epi = ep_find(ep, tfile, fd); + + error = -EINVAL; + switch (op) { + case EPOLL_CTL_ADD: + if (!epi) { + epds.events |= POLLERR | POLLHUP; + error = ep_insert(ep, &epds, tfile, fd); + } else + error = -EEXIST; + break; + case EPOLL_CTL_DEL: + if (epi) + error = ep_remove(ep, epi); + else + error = -ENOENT; + break; + case EPOLL_CTL_MOD: + if (epi) { + epds.events |= POLLERR | POLLHUP; + error = ep_modify(ep, epi, &epds); + } else + error = -ENOENT; + break; + } /* - * Mark the inode dirty from the very beginning, - * that way it will never be moved to the dirty - * list because mark_inode_dirty() will think - * that it already _is_ on the dirty list. + * The function ep_find() increments the usage count of the structure + * so, if this is not NULL, we need to release it. */ - inode->i_state = I_DIRTY; - inode->i_mode = S_IRUSR | S_IWUSR; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - return inode; - -eexit_1: - return ERR_PTR(error); + if (epi) + ep_release_epitem(epi); + up_write(&ep->sem); + +error_tgt_fput: + fput(tfile); +error_fput: + fput(file); +error_return: + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n", + current, epfd, op, fd, event, error)); + + return error; } -static int -eventpollfs_get_sb(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data, struct vfsmount *mnt) +/* + * Implement the event wait interface for the eventpoll file. It is the kernel + * part of the user space epoll_wait(2). + */ +asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, + int maxevents, int timeout) { - return get_sb_pseudo(fs_type, "eventpoll:", NULL, EVENTPOLLFS_MAGIC, - mnt); + int error; + struct file *file; + struct eventpoll *ep; + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n", + current, epfd, events, maxevents, timeout)); + + /* The maximum number of event must be greater than zero */ + if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) + return -EINVAL; + + /* Verify that the area passed by the user is writeable */ + if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) { + error = -EFAULT; + goto error_return; + } + + /* Get the "struct file *" for the eventpoll file */ + error = -EBADF; + file = fget(epfd); + if (!file) + goto error_return; + + /* + * We have to check that the file structure underneath the fd + * the user passed to us _is_ an eventpoll file. + */ + error = -EINVAL; + if (!is_file_epoll(file)) + goto error_fput; + + /* + * At this point it is safe to assume that the "private_data" contains + * our own data structure. + */ + ep = file->private_data; + + /* Time to fish for events ... */ + error = ep_poll(ep, events, maxevents, timeout); + +error_fput: + fput(file); +error_return: + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n", + current, epfd, events, maxevents, timeout, error)); + + return error; } +#ifdef TIF_RESTORE_SIGMASK -static int __init eventpoll_init(void) +/* + * Implement the event wait interface for the eventpoll file. It is the kernel + * part of the user space epoll_pwait(2). + */ +asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, + int maxevents, int timeout, const sigset_t __user *sigmask, + size_t sigsetsize) { int error; + sigset_t ksigmask, sigsaved; + + /* + * If the caller wants a certain signal mask to be set during the wait, + * we apply it here. + */ + if (sigmask) { + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) + return -EFAULT; + sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); + sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); + } + + error = sys_epoll_wait(epfd, events, maxevents, timeout); + + /* + * If we changed the signal mask, we need to restore the original one. + * In case we've got a signal while waiting, we do not restore the + * signal mask yet, and we allow do_signal() to deliver the signal on + * the way back to userspace, before the signal mask is restored. + */ + if (sigmask) { + if (error == -EINTR) { + memcpy(¤t->saved_sigmask, &sigsaved, + sizeof(sigsaved)); + set_thread_flag(TIF_RESTORE_SIGMASK); + } else + sigprocmask(SIG_SETMASK, &sigsaved, NULL); + } + + return error; +} +#endif /* #ifdef TIF_RESTORE_SIGMASK */ + +static int __init eventpoll_init(void) +{ mutex_init(&epmutex); /* Initialize the structure used to perform safe poll wait head wake ups */ @@ -1614,39 +1406,7 @@ static int __init eventpoll_init(void) sizeof(struct eppoll_entry), 0, EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL); - /* - * Register the virtual file system that will be the source of inodes - * for the eventpoll files - */ - error = register_filesystem(&eventpoll_fs_type); - if (error) - goto epanic; - - /* Mount the above commented virtual file system */ - eventpoll_mnt = kern_mount(&eventpoll_fs_type); - error = PTR_ERR(eventpoll_mnt); - if (IS_ERR(eventpoll_mnt)) - goto epanic; - - DNPRINTK(3, (KERN_INFO "[%p] eventpoll: successfully initialized.\n", - current)); return 0; - -epanic: - panic("eventpoll_init() failed\n"); } +fs_initcall(eventpoll_init); - -static void __exit eventpoll_exit(void) -{ - /* Undo all operations done inside eventpoll_init() */ - unregister_filesystem(&eventpoll_fs_type); - mntput(eventpoll_mnt); - kmem_cache_destroy(pwq_cache); - kmem_cache_destroy(epi_cache); -} - -module_init(eventpoll_init); -module_exit(eventpoll_exit); - -MODULE_LICENSE("GPL"); diff --git a/fs/exec.c b/fs/exec.c index 7cf078ec758..70fa36554c1 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -50,6 +50,7 @@ #include <linux/tsacct_kern.h> #include <linux/cn_proc.h> #include <linux/audit.h> +#include <linux/signalfd.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> @@ -582,6 +583,13 @@ static int de_thread(struct task_struct *tsk) int count; /* + * Tell all the sighand listeners that this sighand has + * been detached. The signalfd_detach() function grabs the + * sighand lock, if signal listeners are present on the sighand. + */ + signalfd_detach(tsk); + + /* * If we don't share sighandlers, then we aren't sharing anything * and we can just re-use it all. */ @@ -702,7 +710,7 @@ static int de_thread(struct task_struct *tsk) */ detach_pid(tsk, PIDTYPE_PID); tsk->pid = leader->pid; - attach_pid(tsk, PIDTYPE_PID, tsk->pid); + attach_pid(tsk, PIDTYPE_PID, find_pid(tsk->pid)); transfer_pid(leader, tsk, PIDTYPE_PGID); transfer_pid(leader, tsk, PIDTYPE_SID); list_replace_rcu(&leader->tasks, &tsk->tasks); @@ -757,8 +765,7 @@ no_thread_group: spin_unlock(&oldsighand->siglock); write_unlock_irq(&tasklist_lock); - if (atomic_dec_and_test(&oldsighand->count)) - kmem_cache_free(sighand_cachep, oldsighand); + __cleanup_sighand(oldsighand); } BUG_ON(!thread_group_leader(tsk)); diff --git a/fs/mpage.c b/fs/mpage.c index 0fb914fc2ee..c1698f2291a 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -454,11 +454,18 @@ EXPORT_SYMBOL(mpage_readpage); * written, so it can intelligently allocate a suitably-sized BIO. For now, * just allocate full-size (16-page) BIOs. */ -static struct bio * -__mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, - sector_t *last_block_in_bio, int *ret, struct writeback_control *wbc, - writepage_t writepage_fn) +struct mpage_data { + struct bio *bio; + sector_t last_block_in_bio; + get_block_t *get_block; + unsigned use_writepage; +}; + +static int __mpage_writepage(struct page *page, struct writeback_control *wbc, + void *data) { + struct mpage_data *mpd = data; + struct bio *bio = mpd->bio; struct address_space *mapping = page->mapping; struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; @@ -476,6 +483,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, int length; struct buffer_head map_bh; loff_t i_size = i_size_read(inode); + int ret = 0; if (page_has_buffers(page)) { struct buffer_head *head = page_buffers(page); @@ -538,7 +546,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, map_bh.b_state = 0; map_bh.b_size = 1 << blkbits; - if (get_block(inode, block_in_file, &map_bh, 1)) + if (mpd->get_block(inode, block_in_file, &map_bh, 1)) goto confused; if (buffer_new(&map_bh)) unmap_underlying_metadata(map_bh.b_bdev, @@ -584,7 +592,7 @@ page_is_mapped: /* * This page will go to BIO. Do we need to send this BIO off first? */ - if (bio && *last_block_in_bio != blocks[0] - 1) + if (bio && mpd->last_block_in_bio != blocks[0] - 1) bio = mpage_bio_submit(WRITE, bio); alloc_new: @@ -641,7 +649,7 @@ alloc_new: boundary_block, 1 << blkbits); } } else { - *last_block_in_bio = blocks[blocks_per_page - 1]; + mpd->last_block_in_bio = blocks[blocks_per_page - 1]; } goto out; @@ -649,18 +657,19 @@ confused: if (bio) bio = mpage_bio_submit(WRITE, bio); - if (writepage_fn) { - *ret = (*writepage_fn)(page, wbc); + if (mpd->use_writepage) { + ret = mapping->a_ops->writepage(page, wbc); } else { - *ret = -EAGAIN; + ret = -EAGAIN; goto out; } /* * The caller has a ref on the inode, so *mapping is stable */ - mapping_set_error(mapping, *ret); + mapping_set_error(mapping, ret); out: - return bio; + mpd->bio = bio; + return ret; } /** @@ -683,120 +692,27 @@ out: * the call was made get new I/O started against them. If wbc->sync_mode is * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. - * - * If you fix this you should check generic_writepages() also! */ int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block) { - struct backing_dev_info *bdi = mapping->backing_dev_info; - struct bio *bio = NULL; - sector_t last_block_in_bio = 0; - int ret = 0; - int done = 0; - int (*writepage)(struct page *page, struct writeback_control *wbc); - struct pagevec pvec; - int nr_pages; - pgoff_t index; - pgoff_t end; /* Inclusive */ - int scanned = 0; - int range_whole = 0; - - if (wbc->nonblocking && bdi_write_congested(bdi)) { - wbc->encountered_congestion = 1; - return 0; - } - - writepage = NULL; - if (get_block == NULL) - writepage = mapping->a_ops->writepage; - - pagevec_init(&pvec, 0); - if (wbc->range_cyclic) { - index = mapping->writeback_index; /* Start from prev offset */ - end = -1; - } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; - if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) - range_whole = 1; - scanned = 1; + int ret; + + if (!get_block) + ret = generic_writepages(mapping, wbc); + else { + struct mpage_data mpd = { + .bio = NULL, + .last_block_in_bio = 0, + .get_block = get_block, + .use_writepage = 1, + }; + + ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); + if (mpd.bio) + mpage_bio_submit(WRITE, mpd.bio); } -retry: - while (!done && (index <= end) && - (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, - PAGECACHE_TAG_DIRTY, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { - unsigned i; - - scanned = 1; - for (i = 0; i < nr_pages; i++) { - struct page *page = pvec.pages[i]; - - /* - * At this point we hold neither mapping->tree_lock nor - * lock on the page itself: the page may be truncated or - * invalidated (changing page->mapping to NULL), or even - * swizzled back from swapper_space to tmpfs file - * mapping - */ - - lock_page(page); - - if (unlikely(page->mapping != mapping)) { - unlock_page(page); - continue; - } - - if (!wbc->range_cyclic && page->index > end) { - done = 1; - unlock_page(page); - continue; - } - - if (wbc->sync_mode != WB_SYNC_NONE) - wait_on_page_writeback(page); - - if (PageWriteback(page) || - !clear_page_dirty_for_io(page)) { - unlock_page(page); - continue; - } - - if (writepage) { - ret = (*writepage)(page, wbc); - mapping_set_error(mapping, ret); - } else { - bio = __mpage_writepage(bio, page, get_block, - &last_block_in_bio, &ret, wbc, - page->mapping->a_ops->writepage); - } - if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) - unlock_page(page); - if (ret || (--(wbc->nr_to_write) <= 0)) - done = 1; - if (wbc->nonblocking && bdi_write_congested(bdi)) { - wbc->encountered_congestion = 1; - done = 1; - } - } - pagevec_release(&pvec); - cond_resched(); - } - if (!scanned && !done) { - /* - * We hit the last page and there is more work to be done: wrap - * back to the start of the file - */ - scanned = 1; - index = 0; - goto retry; - } - if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) - mapping->writeback_index = index; - if (bio) - mpage_bio_submit(WRITE, bio); return ret; } EXPORT_SYMBOL(mpage_writepages); @@ -804,15 +720,15 @@ EXPORT_SYMBOL(mpage_writepages); int mpage_writepage(struct page *page, get_block_t get_block, struct writeback_control *wbc) { - int ret = 0; - struct bio *bio; - sector_t last_block_in_bio = 0; - - bio = __mpage_writepage(NULL, page, get_block, - &last_block_in_bio, &ret, wbc, NULL); - if (bio) - mpage_bio_submit(WRITE, bio); - + struct mpage_data mpd = { + .bio = NULL, + .last_block_in_bio = 0, + .get_block = get_block, + .use_writepage = 0, + }; + int ret = __mpage_writepage(page, wbc, &mpd); + if (mpd.bio) + mpage_bio_submit(WRITE, mpd.bio); return ret; } EXPORT_SYMBOL(mpage_writepage); diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig index 01207042048..7638a1c42a7 100644 --- a/fs/partitions/Kconfig +++ b/fs/partitions/Kconfig @@ -239,7 +239,7 @@ config EFI_PARTITION config SYSV68_PARTITION bool "SYSV68 partition table support" if PARTITION_ADVANCED - default y if M68K + default y if VME help Say Y here if you would like to be able to read the hard disk partition table format used by Motorola Delta machines (using diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c index 1bea610078b..e7b07006bc4 100644 --- a/fs/partitions/efi.c +++ b/fs/partitions/efi.c @@ -152,7 +152,7 @@ last_lba(struct block_device *bdev) } static inline int -pmbr_part_valid(struct partition *part, u64 lastlba) +pmbr_part_valid(struct partition *part) { if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT && le32_to_cpu(part->start_sect) == 1UL) @@ -163,7 +163,6 @@ pmbr_part_valid(struct partition *part, u64 lastlba) /** * is_pmbr_valid(): test Protective MBR for validity * @mbr: pointer to a legacy mbr structure - * @lastlba: last_lba for the whole device * * Description: Returns 1 if PMBR is valid, 0 otherwise. * Validity depends on two things: @@ -171,13 +170,13 @@ pmbr_part_valid(struct partition *part, u64 lastlba) * 2) One partition of type 0xEE is found */ static int -is_pmbr_valid(legacy_mbr *mbr, u64 lastlba) +is_pmbr_valid(legacy_mbr *mbr) { int i; if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE) return 0; for (i = 0; i < 4; i++) - if (pmbr_part_valid(&mbr->partition_record[i], lastlba)) + if (pmbr_part_valid(&mbr->partition_record[i])) return 1; return 0; } @@ -516,7 +515,7 @@ find_valid_gpt(struct block_device *bdev, gpt_header **gpt, gpt_entry **ptes) int good_pgpt = 0, good_agpt = 0, good_pmbr = 0; gpt_header *pgpt = NULL, *agpt = NULL; gpt_entry *pptes = NULL, *aptes = NULL; - legacy_mbr *legacymbr = NULL; + legacy_mbr *legacymbr; u64 lastlba; if (!bdev || !gpt || !ptes) return 0; @@ -528,9 +527,8 @@ find_valid_gpt(struct block_device *bdev, gpt_header **gpt, gpt_entry **ptes) if (legacymbr) { read_lba(bdev, 0, (u8 *) legacymbr, sizeof (*legacymbr)); - good_pmbr = is_pmbr_valid(legacymbr, lastlba); + good_pmbr = is_pmbr_valid(legacymbr); kfree(legacymbr); - legacymbr=NULL; } if (!good_pmbr) goto fail; diff --git a/fs/signalfd.c b/fs/signalfd.c new file mode 100644 index 00000000000..7cfeab412b4 --- /dev/null +++ b/fs/signalfd.c @@ -0,0 +1,349 @@ +/* + * fs/signalfd.c + * + * Copyright (C) 2003 Linus Torvalds + * + * Mon Mar 5, 2007: Davide Libenzi <davidel@xmailserver.org> + * Changed ->read() to return a siginfo strcture instead of signal number. + * Fixed locking in ->poll(). + * Added sighand-detach notification. + * Added fd re-use in sys_signalfd() syscall. + * Now using anonymous inode source. + * Thanks to Oleg Nesterov for useful code review and suggestions. + * More comments and suggestions from Arnd Bergmann. + */ + +#include <linux/file.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/list.h> +#include <linux/anon_inodes.h> +#include <linux/signalfd.h> + +struct signalfd_ctx { + struct list_head lnk; + wait_queue_head_t wqh; + sigset_t sigmask; + struct task_struct *tsk; +}; + +struct signalfd_lockctx { + struct task_struct *tsk; + unsigned long flags; +}; + +/* + * Tries to acquire the sighand lock. We do not increment the sighand + * use count, and we do not even pin the task struct, so we need to + * do it inside an RCU read lock, and we must be prepared for the + * ctx->tsk going to NULL (in signalfd_deliver()), and for the sighand + * being detached. We return 0 if the sighand has been detached, or + * 1 if we were able to pin the sighand lock. + */ +static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk) +{ + struct sighand_struct *sighand = NULL; + + rcu_read_lock(); + lk->tsk = rcu_dereference(ctx->tsk); + if (likely(lk->tsk != NULL)) + sighand = lock_task_sighand(lk->tsk, &lk->flags); + rcu_read_unlock(); + + if (sighand && !ctx->tsk) { + unlock_task_sighand(lk->tsk, &lk->flags); + sighand = NULL; + } + + return sighand != NULL; +} + +static void signalfd_unlock(struct signalfd_lockctx *lk) +{ + unlock_task_sighand(lk->tsk, &lk->flags); +} + +/* + * This must be called with the sighand lock held. + */ +void signalfd_deliver(struct task_struct *tsk, int sig) +{ + struct sighand_struct *sighand = tsk->sighand; + struct signalfd_ctx *ctx, *tmp; + + BUG_ON(!sig); + list_for_each_entry_safe(ctx, tmp, &sighand->signalfd_list, lnk) { + /* + * We use a negative signal value as a way to broadcast that the + * sighand has been orphaned, so that we can notify all the + * listeners about this. Remember the ctx->sigmask is inverted, + * so if the user is interested in a signal, that corresponding + * bit will be zero. + */ + if (sig < 0) { + if (ctx->tsk == tsk) { + ctx->tsk = NULL; + list_del_init(&ctx->lnk); + wake_up(&ctx->wqh); + } + } else { + if (!sigismember(&ctx->sigmask, sig)) + wake_up(&ctx->wqh); + } + } +} + +static void signalfd_cleanup(struct signalfd_ctx *ctx) +{ + struct signalfd_lockctx lk; + + /* + * This is tricky. If the sighand is gone, we do not need to remove + * context from the list, the list itself won't be there anymore. + */ + if (signalfd_lock(ctx, &lk)) { + list_del(&ctx->lnk); + signalfd_unlock(&lk); + } + kfree(ctx); +} + +static int signalfd_release(struct inode *inode, struct file *file) +{ + signalfd_cleanup(file->private_data); + return 0; +} + +static unsigned int signalfd_poll(struct file *file, poll_table *wait) +{ + struct signalfd_ctx *ctx = file->private_data; + unsigned int events = 0; + struct signalfd_lockctx lk; + + poll_wait(file, &ctx->wqh, wait); + + /* + * Let the caller get a POLLIN in this case, ala socket recv() when + * the peer disconnects. + */ + if (signalfd_lock(ctx, &lk)) { + if (next_signal(&lk.tsk->pending, &ctx->sigmask) > 0 || + next_signal(&lk.tsk->signal->shared_pending, + &ctx->sigmask) > 0) + events |= POLLIN; + signalfd_unlock(&lk); + } else + events |= POLLIN; + + return events; +} + +/* + * Copied from copy_siginfo_to_user() in kernel/signal.c + */ +static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, + siginfo_t const *kinfo) +{ + long err; + + BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128); + + /* + * Unused memebers should be zero ... + */ + err = __clear_user(uinfo, sizeof(*uinfo)); + + /* + * If you change siginfo_t structure, please be sure + * this code is fixed accordingly. + */ + err |= __put_user(kinfo->si_signo, &uinfo->signo); + err |= __put_user(kinfo->si_errno, &uinfo->err); + err |= __put_user((short)kinfo->si_code, &uinfo->code); + switch (kinfo->si_code & __SI_MASK) { + case __SI_KILL: + err |= __put_user(kinfo->si_pid, &uinfo->pid); + err |= __put_user(kinfo->si_uid, &uinfo->uid); + break; + case __SI_TIMER: + err |= __put_user(kinfo->si_tid, &uinfo->tid); + err |= __put_user(kinfo->si_overrun, &uinfo->overrun); + err |= __put_user((long)kinfo->si_ptr, &uinfo->svptr); + break; + case __SI_POLL: + err |= __put_user(kinfo->si_band, &uinfo->band); + err |= __put_user(kinfo->si_fd, &uinfo->fd); + break; + case __SI_FAULT: + err |= __put_user((long)kinfo->si_addr, &uinfo->addr); +#ifdef __ARCH_SI_TRAPNO + err |= __put_user(kinfo->si_trapno, &uinfo->trapno); +#endif + break; + case __SI_CHLD: + err |= __put_user(kinfo->si_pid, &uinfo->pid); + err |= __put_user(kinfo->si_uid, &uinfo->uid); + err |= __put_user(kinfo->si_status, &uinfo->status); + err |= __put_user(kinfo->si_utime, &uinfo->utime); + err |= __put_user(kinfo->si_stime, &uinfo->stime); + break; + case __SI_RT: /* This is not generated by the kernel as of now. */ + case __SI_MESGQ: /* But this is */ + err |= __put_user(kinfo->si_pid, &uinfo->pid); + err |= __put_user(kinfo->si_uid, &uinfo->uid); + err |= __put_user((long)kinfo->si_ptr, &uinfo->svptr); + break; + default: /* this is just in case for now ... */ + err |= __put_user(kinfo->si_pid, &uinfo->pid); + err |= __put_user(kinfo->si_uid, &uinfo->uid); + break; + } + + return err ? -EFAULT: sizeof(*uinfo); +} + +/* + * Returns either the size of a "struct signalfd_siginfo", or zero if the + * sighand we are attached to, has been orphaned. The "count" parameter + * must be at least the size of a "struct signalfd_siginfo". + */ +static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct signalfd_ctx *ctx = file->private_data; + ssize_t res = 0; + int locked, signo; + siginfo_t info; + struct signalfd_lockctx lk; + DECLARE_WAITQUEUE(wait, current); + + if (count < sizeof(struct signalfd_siginfo)) + return -EINVAL; + locked = signalfd_lock(ctx, &lk); + if (!locked) + return 0; + res = -EAGAIN; + signo = dequeue_signal(lk.tsk, &ctx->sigmask, &info); + if (signo == 0 && !(file->f_flags & O_NONBLOCK)) { + add_wait_queue(&ctx->wqh, &wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + signo = dequeue_signal(lk.tsk, &ctx->sigmask, &info); + if (signo != 0) + break; + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + signalfd_unlock(&lk); + schedule(); + locked = signalfd_lock(ctx, &lk); + if (unlikely(!locked)) { + /* + * Let the caller read zero byte, ala socket + * recv() when the peer disconnect. This test + * must be done before doing a dequeue_signal(), + * because if the sighand has been orphaned, + * the dequeue_signal() call is going to crash. + */ + res = 0; + break; + } + } + remove_wait_queue(&ctx->wqh, &wait); + __set_current_state(TASK_RUNNING); + } + if (likely(locked)) + signalfd_unlock(&lk); + if (likely(signo)) + res = signalfd_copyinfo((struct signalfd_siginfo __user *) buf, + &info); + + return res; +} + +static const struct file_operations signalfd_fops = { + .release = signalfd_release, + .poll = signalfd_poll, + .read = signalfd_read, +}; + +/* + * Create a file descriptor that is associated with our signal + * state. We can pass it around to others if we want to, but + * it will always be _our_ signal state. + */ +asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask) +{ + int error; + sigset_t sigmask; + struct signalfd_ctx *ctx; + struct sighand_struct *sighand; + struct file *file; + struct inode *inode; + struct signalfd_lockctx lk; + + if (sizemask != sizeof(sigset_t) || + copy_from_user(&sigmask, user_mask, sizeof(sigmask))) + return error = -EINVAL; + sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); + signotset(&sigmask); + + if (ufd == -1) { + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + init_waitqueue_head(&ctx->wqh); + ctx->sigmask = sigmask; + ctx->tsk = current; + + sighand = current->sighand; + /* + * Add this fd to the list of signal listeners. + */ + spin_lock_irq(&sighand->siglock); + list_add_tail(&ctx->lnk, &sighand->signalfd_list); + spin_unlock_irq(&sighand->siglock); + + /* + * When we call this, the initialization must be complete, since + * anon_inode_getfd() will install the fd. + */ + error = anon_inode_getfd(&ufd, &inode, &file, "[signalfd]", + &signalfd_fops, ctx); + if (error) + goto err_fdalloc; + } else { + file = fget(ufd); + if (!file) + return -EBADF; + ctx = file->private_data; + if (file->f_op != &signalfd_fops) { + fput(file); + return -EINVAL; + } + /* + * We need to be prepared of the fact that the sighand this fd + * is attached to, has been detched. In that case signalfd_lock() + * will return 0, and we'll just skip setting the new mask. + */ + if (signalfd_lock(ctx, &lk)) { + ctx->sigmask = sigmask; + signalfd_unlock(&lk); + } + wake_up(&ctx->wqh); + fput(file); + } + + return ufd; + +err_fdalloc: + signalfd_cleanup(ctx); + return error; +} + diff --git a/fs/timerfd.c b/fs/timerfd.c new file mode 100644 index 00000000000..e329e37f15a --- /dev/null +++ b/fs/timerfd.c @@ -0,0 +1,227 @@ +/* + * fs/timerfd.c + * + * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> + * + * + * Thanks to Thomas Gleixner for code reviews and useful comments. + * + */ + +#include <linux/file.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/time.h> +#include <linux/hrtimer.h> +#include <linux/anon_inodes.h> +#include <linux/timerfd.h> + +struct timerfd_ctx { + struct hrtimer tmr; + ktime_t tintv; + spinlock_t lock; + wait_queue_head_t wqh; + int expired; +}; + +/* + * This gets called when the timer event triggers. We set the "expired" + * flag, but we do not re-arm the timer (in case it's necessary, + * tintv.tv64 != 0) until the timer is read. + */ +static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) +{ + struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, tmr); + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + ctx->expired = 1; + wake_up_locked(&ctx->wqh); + spin_unlock_irqrestore(&ctx->lock, flags); + + return HRTIMER_NORESTART; +} + +static void timerfd_setup(struct timerfd_ctx *ctx, int clockid, int flags, + const struct itimerspec *ktmr) +{ + enum hrtimer_mode htmode; + ktime_t texp; + + htmode = (flags & TFD_TIMER_ABSTIME) ? + HRTIMER_MODE_ABS: HRTIMER_MODE_REL; + + texp = timespec_to_ktime(ktmr->it_value); + ctx->expired = 0; + ctx->tintv = timespec_to_ktime(ktmr->it_interval); + hrtimer_init(&ctx->tmr, clockid, htmode); + ctx->tmr.expires = texp; + ctx->tmr.function = timerfd_tmrproc; + if (texp.tv64 != 0) + hrtimer_start(&ctx->tmr, texp, htmode); +} + +static int timerfd_release(struct inode *inode, struct file *file) +{ + struct timerfd_ctx *ctx = file->private_data; + + hrtimer_cancel(&ctx->tmr); + kfree(ctx); + return 0; +} + +static unsigned int timerfd_poll(struct file *file, poll_table *wait) +{ + struct timerfd_ctx *ctx = file->private_data; + unsigned int events = 0; + unsigned long flags; + + poll_wait(file, &ctx->wqh, wait); + + spin_lock_irqsave(&ctx->lock, flags); + if (ctx->expired) + events |= POLLIN; + spin_unlock_irqrestore(&ctx->lock, flags); + + return events; +} + +static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct timerfd_ctx *ctx = file->private_data; + ssize_t res; + u32 ticks = 0; + DECLARE_WAITQUEUE(wait, current); + + if (count < sizeof(ticks)) + return -EINVAL; + spin_lock_irq(&ctx->lock); + res = -EAGAIN; + if (!ctx->expired && !(file->f_flags & O_NONBLOCK)) { + __add_wait_queue(&ctx->wqh, &wait); + for (res = 0;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (ctx->expired) { + res = 0; + break; + } + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + spin_unlock_irq(&ctx->lock); + schedule(); + spin_lock_irq(&ctx->lock); + } + __remove_wait_queue(&ctx->wqh, &wait); + __set_current_state(TASK_RUNNING); + } + if (ctx->expired) { + ctx->expired = 0; + if (ctx->tintv.tv64 != 0) { + /* + * If tintv.tv64 != 0, this is a periodic timer that + * needs to be re-armed. We avoid doing it in the timer + * callback to avoid DoS attacks specifying a very + * short timer period. + */ + ticks = (u32) + hrtimer_forward(&ctx->tmr, + hrtimer_cb_get_time(&ctx->tmr), + ctx->tintv); + hrtimer_restart(&ctx->tmr); + } else + ticks = 1; + } + spin_unlock_irq(&ctx->lock); + if (ticks) + res = put_user(ticks, buf) ? -EFAULT: sizeof(ticks); + return res; +} + +static const struct file_operations timerfd_fops = { + .release = timerfd_release, + .poll = timerfd_poll, + .read = timerfd_read, +}; + +asmlinkage long sys_timerfd(int ufd, int clockid, int flags, + const struct itimerspec __user *utmr) +{ + int error; + struct timerfd_ctx *ctx; + struct file *file; + struct inode *inode; + struct itimerspec ktmr; + + if (copy_from_user(&ktmr, utmr, sizeof(ktmr))) + return -EFAULT; + + if (clockid != CLOCK_MONOTONIC && + clockid != CLOCK_REALTIME) + return -EINVAL; + if (!timespec_valid(&ktmr.it_value) || + !timespec_valid(&ktmr.it_interval)) + return -EINVAL; + + if (ufd == -1) { + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + init_waitqueue_head(&ctx->wqh); + spin_lock_init(&ctx->lock); + + timerfd_setup(ctx, clockid, flags, &ktmr); + + /* + * When we call this, the initialization must be complete, since + * anon_inode_getfd() will install the fd. + */ + error = anon_inode_getfd(&ufd, &inode, &file, "[timerfd]", + &timerfd_fops, ctx); + if (error) + goto err_tmrcancel; + } else { + file = fget(ufd); + if (!file) + return -EBADF; + ctx = file->private_data; + if (file->f_op != &timerfd_fops) { + fput(file); + return -EINVAL; + } + /* + * We need to stop the existing timer before reprogramming + * it to the new values. + */ + for (;;) { + spin_lock_irq(&ctx->lock); + if (hrtimer_try_to_cancel(&ctx->tmr) >= 0) + break; + spin_unlock_irq(&ctx->lock); + cpu_relax(); + } + /* + * Re-program the timer to the new value ... + */ + timerfd_setup(ctx, clockid, flags, &ktmr); + + spin_unlock_irq(&ctx->lock); + fput(file); + } + + return ufd; + +err_tmrcancel: + hrtimer_cancel(&ctx->tmr); + kfree(ctx); + return error; +} + |