From 0bfefc46dc028df60120acdb92062169c9328769 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Mon, 14 May 2007 18:24:23 +1000 Subject: [XFS] Barriers need to be dynamically checked and switched off If the underlying block device suddenly stops supporting barriers, we need to handle the -EOPNOTSUPP error in a sane manner rather than shutting down the filesystem. If we get this error, clear the barrier flag, reissue the I/O, and tell the world bad things are occurring. SGI-PV: 964544 SGI-Modid: xfs-linux-melb:xfs-kern:28568a Signed-off-by: David Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Tim Shimmin --- fs/xfs/linux-2.6/xfs_buf.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'fs/xfs/linux-2.6/xfs_buf.c') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index b0f0e58866d..8d9298c9976 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -997,7 +997,18 @@ xfs_buf_iodone_work( xfs_buf_t *bp = container_of(work, xfs_buf_t, b_iodone_work); - if (bp->b_iodone) + /* + * We can get an EOPNOTSUPP to ordered writes. Here we clear the + * ordered flag and reissue them. Because we can't tell the higher + * layers directly that they should not issue ordered I/O anymore, they + * need to check if the ordered flag was cleared during I/O completion. + */ + if ((bp->b_error == EOPNOTSUPP) && + (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) { + XB_TRACE(bp, "ordered_retry", bp->b_iodone); + bp->b_flags &= ~XBF_ORDERED; + xfs_buf_iorequest(bp); + } else if (bp->b_iodone) (*(bp->b_iodone))(bp); else if (bp->b_flags & XBF_ASYNC) xfs_buf_relse(bp); -- cgit v1.2.3 From 7f015072348a14f16d548be557ee58c5c55df0aa Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 17 Oct 2007 13:55:03 +1000 Subject: [XFS] eagerly remove vmap mappings to avoid upsetting Xen XFS leaves stray mappings around when it vmaps memory to make it virtually contigious. This upsets Xen if one of those pages is being recycled into a pagetable, since it finds an extra writable mapping of the page. This patch solves the problem in a brute force way, by making XFS always eagerly unmap its mappings. SGI-PV: 971902 SGI-Modid: xfs-linux-melb:xfs-kern:29886a Signed-off-by: Jeremy Fitzhardinge Signed-off-by: David Chinner Signed-off-by: Tim Shimmin --- fs/xfs/linux-2.6/xfs_buf.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'fs/xfs/linux-2.6/xfs_buf.c') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 8d9298c9976..d5b2d2bbf5f 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -187,6 +187,19 @@ free_address( { a_list_t *aentry; +#ifdef CONFIG_XEN + /* + * Xen needs to be able to make sure it can get an exclusive + * RO mapping of pages it wants to turn into a pagetable. If + * a newly allocated page is also still being vmap()ed by xfs, + * it will cause pagetable construction to fail. This is a + * quick workaround to always eagerly unmap pages so that Xen + * is happy. + */ + vunmap(addr); + return; +#endif + aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT); if (likely(aentry)) { spin_lock(&as_lock); -- cgit v1.2.3