aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorDavid Chinner <david@fromorbit.com>2008-10-30 17:39:12 +1100
committerLachlan McIlroy <lachlan@sgi.com>2008-10-30 17:39:12 +1100
commit7b2e2a31f5c23b5f028af8c895137b4c512cc1c8 (patch)
treec37aa7117a16ebcd01d0ac2687b944e5dd8a7361 /fs
parent5b00f14fbd60d42441f78c0e414a539cbfba5cb9 (diff)
[XFS] Allow 64 bit machines to avoid the AIL lock during flushes
When copying lsn's from the log item to the inode or dquot flush lsn, we currently grab the AIL lock. We do this because the LSN is a 64 bit quantity and it needs to be read atomically. The lock is used to guarantee atomicity for 32 bit platforms. Make the LSN copying a small function, and make the function used conditional on BITS_PER_LONG so that 64 bit machines don't need to take the AIL lock in these places. SGI-PV: 988143 SGI-Modid: xfs-linux-melb:xfs-kern:32349a Signed-off-by: David Chinner <david@fromorbit.com> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/quota/xfs_dquot.c6
-rw-r--r--fs/xfs/xfs_inode.c17
-rw-r--r--fs/xfs/xfs_trans_priv.h23
3 files changed, 32 insertions, 14 deletions
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 1e6bf392564..59c1081412e 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -1272,10 +1272,8 @@ xfs_qm_dqflush(
dqp->dq_flags &= ~(XFS_DQ_DIRTY);
mp = dqp->q_mount;
- /* lsn is 64 bits */
- spin_lock(&mp->m_ail_lock);
- dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn;
- spin_unlock(&mp->m_ail_lock);
+ xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
+ &dqp->q_logitem.qli_item.li_lsn);
/*
* Attach an iodone routine so that we can remove this dquot from the
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 4eb629f0513..2951ffd8306 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2214,9 +2214,9 @@ xfs_ifree_cluster(
iip = (xfs_inode_log_item_t *)lip;
ASSERT(iip->ili_logged == 1);
lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
- spin_lock(&mp->m_ail_lock);
- iip->ili_flush_lsn = iip->ili_item.li_lsn;
- spin_unlock(&mp->m_ail_lock);
+ xfs_trans_ail_copy_lsn(mp->m_ail,
+ &iip->ili_flush_lsn,
+ &iip->ili_item.li_lsn);
xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
pre_flushed++;
}
@@ -2237,9 +2237,8 @@ xfs_ifree_cluster(
iip->ili_last_fields = iip->ili_format.ilf_fields;
iip->ili_format.ilf_fields = 0;
iip->ili_logged = 1;
- spin_lock(&mp->m_ail_lock);
- iip->ili_flush_lsn = iip->ili_item.li_lsn;
- spin_unlock(&mp->m_ail_lock);
+ xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
+ &iip->ili_item.li_lsn);
xfs_buf_attach_iodone(bp,
(void(*)(xfs_buf_t*,xfs_log_item_t*))
@@ -3476,10 +3475,8 @@ xfs_iflush_int(
iip->ili_format.ilf_fields = 0;
iip->ili_logged = 1;
- ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */
- spin_lock(&mp->m_ail_lock);
- iip->ili_flush_lsn = iip->ili_item.li_lsn;
- spin_unlock(&mp->m_ail_lock);
+ xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
+ &iip->ili_item.li_lsn);
/*
* Attach the function xfs_iflush_done to the inode's
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index aa585350252..708cff72d20 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -106,4 +106,27 @@ void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t);
int xfsaild_start(struct xfs_ail *);
void xfsaild_stop(struct xfs_ail *);
+#if BITS_PER_LONG != 64
+static inline void
+xfs_trans_ail_copy_lsn(
+ struct xfs_ail *ailp,
+ xfs_lsn_t *dst,
+ xfs_lsn_t *src)
+{
+ ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */
+ spin_lock(&ailp->xa_mount->m_ail_lock);
+ *dst = *src;
+ spin_unlock(&ailp->xa_mount->m_ail_lock);
+}
+#else
+static inline void
+xfs_trans_ail_copy_lsn(
+ struct xfs_ail *ailp,
+ xfs_lsn_t *dst,
+ xfs_lsn_t *src)
+{
+ ASSERT(sizeof(xfs_lsn_t) == 8);
+ *dst = *src;
+}
+#endif
#endif /* __XFS_TRANS_PRIV_H__ */