aboutsummaryrefslogtreecommitdiff
path: root/fs/ntfs/file.c
diff options
context:
space:
mode:
authorRichard Knutsson <ricknu-0@student.ltu.se>2006-09-30 23:27:12 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-01 00:39:19 -0700
commitc49c31115067bc7c9a51ffdc735a515151dfa3eb (patch)
tree1ffeddacb5cf06fbdb8a1fc385eb9ee5f1c3174c /fs/ntfs/file.c
parent6e21828743247270d09a86756a0c11702500dbfb (diff)
[PATCH] fs/ntfs: Conversion to generic boolean
Conversion of booleans to: generic-boolean.patch (2006-08-23) Signed-off-by: Richard Knutsson <ricknu-0@student.ltu.se> Signed-off-by: Anton Altaparmakov <aia21@cantab.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/ntfs/file.c')
-rw-r--r--fs/ntfs/file.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 2e42c2dcae1..585a79d39c9 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -509,7 +509,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
u32 attr_rec_len = 0;
unsigned blocksize, u;
int err, mp_size;
- BOOL rl_write_locked, was_hole, is_retry;
+ bool rl_write_locked, was_hole, is_retry;
unsigned char blocksize_bits;
struct {
u8 runlist_merged:1;
@@ -543,13 +543,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
return -ENOMEM;
}
} while (++u < nr_pages);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
rl = NULL;
err = 0;
vcn = lcn = -1;
vcn_len = 0;
lcn_block = -1;
- was_hole = FALSE;
+ was_hole = false;
cpos = pos >> vol->cluster_size_bits;
end = pos + bytes;
cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
@@ -760,7 +760,7 @@ map_buffer_cached:
}
continue;
}
- is_retry = FALSE;
+ is_retry = false;
if (!rl) {
down_read(&ni->runlist.lock);
retry_remap:
@@ -776,7 +776,7 @@ retry_remap:
* Successful remap, setup the map cache and
* use that to deal with the buffer.
*/
- was_hole = FALSE;
+ was_hole = false;
vcn = bh_cpos;
vcn_len = rl[1].vcn - vcn;
lcn_block = lcn << (vol->cluster_size_bits -
@@ -792,7 +792,7 @@ retry_remap:
if (likely(vcn + vcn_len >= cend)) {
if (rl_write_locked) {
up_write(&ni->runlist.lock);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
} else
up_read(&ni->runlist.lock);
rl = NULL;
@@ -818,13 +818,13 @@ retry_remap:
*/
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
- rl_write_locked = TRUE;
+ rl_write_locked = true;
goto retry_remap;
}
err = ntfs_map_runlist_nolock(ni, bh_cpos,
NULL);
if (likely(!err)) {
- is_retry = TRUE;
+ is_retry = true;
goto retry_remap;
}
/*
@@ -903,7 +903,7 @@ rl_not_mapped_enoent:
if (!rl_write_locked) {
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
- rl_write_locked = TRUE;
+ rl_write_locked = true;
goto retry_remap;
}
/* Find the previous last allocated cluster. */
@@ -917,7 +917,7 @@ rl_not_mapped_enoent:
}
}
rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
- FALSE);
+ false);
if (IS_ERR(rl2)) {
err = PTR_ERR(rl2);
ntfs_debug("Failed to allocate cluster, error code %i.",
@@ -1093,7 +1093,7 @@ rl_not_mapped_enoent:
status.mft_attr_mapped = 0;
status.mp_rebuilt = 0;
/* Setup the map cache and use that to deal with the buffer. */
- was_hole = TRUE;
+ was_hole = true;
vcn = bh_cpos;
vcn_len = 1;
lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
@@ -1105,7 +1105,7 @@ rl_not_mapped_enoent:
*/
if (likely(vcn + vcn_len >= cend)) {
up_write(&ni->runlist.lock);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
rl = NULL;
}
goto map_buffer_cached;
@@ -1117,7 +1117,7 @@ rl_not_mapped_enoent:
if (likely(!err)) {
if (unlikely(rl_write_locked)) {
up_write(&ni->runlist.lock);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
} else if (unlikely(rl))
up_read(&ni->runlist.lock);
rl = NULL;
@@ -1528,19 +1528,19 @@ static inline int ntfs_commit_pages_after_non_resident_write(
do {
s64 bh_pos;
struct page *page;
- BOOL partial;
+ bool partial;
page = pages[u];
bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
bh = head = page_buffers(page);
- partial = FALSE;
+ partial = false;
do {
s64 bh_end;
bh_end = bh_pos + blocksize;
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh))
- partial = TRUE;
+ partial = true;
} else {
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
@@ -1997,7 +1997,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
*/
down_read(&ni->runlist.lock);
lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
- vol->cluster_size_bits, FALSE);
+ vol->cluster_size_bits, false);
up_read(&ni->runlist.lock);
if (unlikely(lcn < LCN_HOLE)) {
status = -EIO;