diff options
author | David Chinner <dgc@sgi.com> | 2007-02-10 18:36:17 +1100 |
---|---|---|
committer | Tim Shimmin <tes@sgi.com> | 2007-02-10 18:36:17 +1100 |
commit | dbcabad19aa91dc9bc7176fd2853fa74f724cd2f (patch) | |
tree | b65139c6c19541503444817af740ba265f8b838f /fs | |
parent | 20f4ebf2bf2f57c1a9abb3655391336cc90314b3 (diff) |
[XFS] Fix block reservation mechanism.
The block reservation mechanism has been broken since the per-cpu
superblock counters were introduced. Make the block reservation code work
with the per-cpu counters by syncing the counters, snapshotting the amount
of available space and then doing a modifcation of the counter state
according to the result. Continue in a loop until we either have no space
available or we reserve some space.
SGI-PV: 956323
SGI-Modid: xfs-linux-melb:xfs-kern:27895a
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/xfs/xfs_fsops.c | 54 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.c | 16 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_vfsops.c | 2 |
4 files changed, 54 insertions, 20 deletions
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index c064e72ada9..bfde9e6d67e 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -460,7 +460,7 @@ xfs_fs_counts( { unsigned long s; - xfs_icsb_sync_counters_lazy(mp); + xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); s = XFS_SB_LOCK(mp); cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); cnt->freertx = mp->m_sb.sb_frextents; @@ -491,7 +491,7 @@ xfs_reserve_blocks( __uint64_t *inval, xfs_fsop_resblks_t *outval) { - __int64_t lcounter, delta; + __int64_t lcounter, delta, fdblks_delta; __uint64_t request; unsigned long s; @@ -504,17 +504,35 @@ xfs_reserve_blocks( } request = *inval; + + /* + * With per-cpu counters, this becomes an interesting + * problem. we needto work out if we are freeing or allocation + * blocks first, then we can do the modification as necessary. + * + * We do this under the XFS_SB_LOCK so that if we are near + * ENOSPC, we will hold out any changes while we work out + * what to do. This means that the amount of free space can + * change while we do this, so we need to retry if we end up + * trying to reserve more space than is available. + * + * We also use the xfs_mod_incore_sb() interface so that we + * don't have to care about whether per cpu counter are + * enabled, disabled or even compiled in.... + */ +retry: s = XFS_SB_LOCK(mp); + xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED); /* * If our previous reservation was larger than the current value, * then move any unused blocks back to the free pool. */ - + fdblks_delta = 0; if (mp->m_resblks > request) { lcounter = mp->m_resblks_avail - request; if (lcounter > 0) { /* release unused blocks */ - mp->m_sb.sb_fdblocks += lcounter; + fdblks_delta = lcounter; mp->m_resblks_avail -= lcounter; } mp->m_resblks = request; @@ -522,24 +540,50 @@ xfs_reserve_blocks( __int64_t free; free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); + if (!free) + goto out; /* ENOSPC and fdblks_delta = 0 */ + delta = request - mp->m_resblks; lcounter = free - delta; if (lcounter < 0) { /* We can't satisfy the request, just get what we can */ mp->m_resblks += free; mp->m_resblks_avail += free; + fdblks_delta = -free; mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp); } else { + fdblks_delta = -delta; mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); mp->m_resblks = request; mp->m_resblks_avail += delta; } } - +out: outval->resblks = mp->m_resblks; outval->resblks_avail = mp->m_resblks_avail; XFS_SB_UNLOCK(mp, s); + + if (fdblks_delta) { + /* + * If we are putting blocks back here, m_resblks_avail is + * already at it's max so this will put it in the free pool. + * + * If we need space, we'll either succeed in getting it + * from the free block count or we'll get an enospc. If + * we get a ENOSPC, it means things changed while we were + * calculating fdblks_delta and so we should try again to + * see if there is anything left to reserve. + * + * Don't set the reserved flag here - we don't want to reserve + * the extra reserve blocks from the reserve..... + */ + int error; + error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0); + if (error == ENOSPC) + goto retry; + } + return 0; } diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 0df07c1df76..30a5781a46d 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1979,8 +1979,8 @@ xfs_icsb_enable_counter( xfs_icsb_unlock_all_counters(mp); } -STATIC void -xfs_icsb_sync_counters_int( +void +xfs_icsb_sync_counters_flags( xfs_mount_t *mp, int flags) { @@ -2012,17 +2012,7 @@ STATIC void xfs_icsb_sync_counters( xfs_mount_t *mp) { - xfs_icsb_sync_counters_int(mp, 0); -} - -/* - * lazy addition used for things like df, background sb syncs, etc - */ -void -xfs_icsb_sync_counters_lazy( - xfs_mount_t *mp) -{ - xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT); + xfs_icsb_sync_counters_flags(mp, 0); } /* diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 9a8e7151b65..b65dae61eb8 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -307,7 +307,7 @@ typedef struct xfs_icsb_cnts { #define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ extern int xfs_icsb_init_counters(struct xfs_mount *); -extern void xfs_icsb_sync_counters_lazy(struct xfs_mount *); +extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int); #else #define xfs_icsb_init_counters(mp) (0) diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index aec4e8d9cba..f5ea74b999b 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -806,7 +806,7 @@ xfs_statvfs( statp->f_type = XFS_SB_MAGIC; - xfs_icsb_sync_counters_lazy(mp); + xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); s = XFS_SB_LOCK(mp); statp->f_bsize = sbp->sb_blocksize; lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; |