diff options
author | Christoph Hellwig <hch@infradead.org> | 2009-12-14 23:14:59 +0000 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2009-12-14 23:08:16 -0600 |
commit | 0b1b213fcf3a8486ada99a2bab84ab8c6f51b264 (patch) | |
tree | 661fd8da7487231224c77b95c33986cb7d7b41ef | |
parent | 6ef3554422e2c7e7aa424ba63737da498881dff4 (diff) |
xfs: event tracing support
Convert the old xfs tracing support that could only be used with the
out of tree kdb and xfsidbg patches to use the generic event tracer.
To use it make sure CONFIG_EVENT_TRACING is enabled and then enable
all xfs trace channels by:
echo 1 > /sys/kernel/debug/tracing/events/xfs/enable
or alternatively enable single events by just doing the same in one
event subdirectory, e.g.
echo 1 > /sys/kernel/debug/tracing/events/xfs/xfs_ihold/enable
or set more complex filters, etc. In Documentation/trace/events.txt
all this is desctribed in more detail. To reads the events do a
cat /sys/kernel/debug/tracing/trace
Compared to the last posting this patch converts the tracing mostly to
the one tracepoint per callsite model that other users of the new
tracing facility also employ. This allows a very fine-grained control
of the tracing, a cleaner output of the traces and also enables the
perf tool to use each tracepoint as a virtual performance counter,
allowing us to e.g. count how often certain workloads git various
spots in XFS. Take a look at
http://lwn.net/Articles/346470/
for some examples.
Also the btree tracing isn't included at all yet, as it will require
additional core tracing features not in mainline yet, I plan to
deliver it later.
And the really nice thing about this patch is that it actually removes
many lines of code while adding this nice functionality:
fs/xfs/Makefile | 8
fs/xfs/linux-2.6/xfs_acl.c | 1
fs/xfs/linux-2.6/xfs_aops.c | 52 -
fs/xfs/linux-2.6/xfs_aops.h | 2
fs/xfs/linux-2.6/xfs_buf.c | 117 +--
fs/xfs/linux-2.6/xfs_buf.h | 33
fs/xfs/linux-2.6/xfs_fs_subr.c | 3
fs/xfs/linux-2.6/xfs_ioctl.c | 1
fs/xfs/linux-2.6/xfs_ioctl32.c | 1
fs/xfs/linux-2.6/xfs_iops.c | 1
fs/xfs/linux-2.6/xfs_linux.h | 1
fs/xfs/linux-2.6/xfs_lrw.c | 87 --
fs/xfs/linux-2.6/xfs_lrw.h | 45 -
fs/xfs/linux-2.6/xfs_super.c | 104 ---
fs/xfs/linux-2.6/xfs_super.h | 7
fs/xfs/linux-2.6/xfs_sync.c | 1
fs/xfs/linux-2.6/xfs_trace.c | 75 ++
fs/xfs/linux-2.6/xfs_trace.h | 1369 +++++++++++++++++++++++++++++++++++++++++
fs/xfs/linux-2.6/xfs_vnode.h | 4
fs/xfs/quota/xfs_dquot.c | 110 ---
fs/xfs/quota/xfs_dquot.h | 21
fs/xfs/quota/xfs_qm.c | 40 -
fs/xfs/quota/xfs_qm_syscalls.c | 4
fs/xfs/support/ktrace.c | 323 ---------
fs/xfs/support/ktrace.h | 85 --
fs/xfs/xfs.h | 16
fs/xfs/xfs_ag.h | 14
fs/xfs/xfs_alloc.c | 230 +-----
fs/xfs/xfs_alloc.h | 27
fs/xfs/xfs_alloc_btree.c | 1
fs/xfs/xfs_attr.c | 107 ---
fs/xfs/xfs_attr.h | 10
fs/xfs/xfs_attr_leaf.c | 14
fs/xfs/xfs_attr_sf.h | 40 -
fs/xfs/xfs_bmap.c | 507 +++------------
fs/xfs/xfs_bmap.h | 49 -
fs/xfs/xfs_bmap_btree.c | 6
fs/xfs/xfs_btree.c | 5
fs/xfs/xfs_btree_trace.h | 17
fs/xfs/xfs_buf_item.c | 87 --
fs/xfs/xfs_buf_item.h | 20
fs/xfs/xfs_da_btree.c | 3
fs/xfs/xfs_da_btree.h | 7
fs/xfs/xfs_dfrag.c | 2
fs/xfs/xfs_dir2.c | 8
fs/xfs/xfs_dir2_block.c | 20
fs/xfs/xfs_dir2_leaf.c | 21
fs/xfs/xfs_dir2_node.c | 27
fs/xfs/xfs_dir2_sf.c | 26
fs/xfs/xfs_dir2_trace.c | 216 ------
fs/xfs/xfs_dir2_trace.h | 72 --
fs/xfs/xfs_filestream.c | 8
fs/xfs/xfs_fsops.c | 2
fs/xfs/xfs_iget.c | 111 ---
fs/xfs/xfs_inode.c | 67 --
fs/xfs/xfs_inode.h | 76 --
fs/xfs/xfs_inode_item.c | 5
fs/xfs/xfs_iomap.c | 85 --
fs/xfs/xfs_iomap.h | 8
fs/xfs/xfs_log.c | 181 +----
fs/xfs/xfs_log_priv.h | 20
fs/xfs/xfs_log_recover.c | 1
fs/xfs/xfs_mount.c | 2
fs/xfs/xfs_quota.h | 8
fs/xfs/xfs_rename.c | 1
fs/xfs/xfs_rtalloc.c | 1
fs/xfs/xfs_rw.c | 3
fs/xfs/xfs_trans.h | 47 +
fs/xfs/xfs_trans_buf.c | 62 -
fs/xfs/xfs_vnodeops.c | 8
70 files changed, 2151 insertions(+), 2592 deletions(-)
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
70 files changed, 2151 insertions, 2592 deletions
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 7a59daed178..56641fe52a2 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -26,6 +26,8 @@ endif obj-$(CONFIG_XFS_FS) += xfs.o +xfs-y += linux-2.6/xfs_trace.o + xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \ xfs_dquot.o \ xfs_dquot_item.o \ @@ -90,8 +92,7 @@ xfs-y += xfs_alloc.o \ xfs_rw.o \ xfs_dmops.o -xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \ - xfs_dir2_trace.o +xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o # Objects in linux/ xfs-y += $(addprefix $(XFS_LINUX)/, \ @@ -113,6 +114,3 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \ xfs-y += $(addprefix support/, \ debug.o \ uuid.o) - -xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o - diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c index b23a5450644..69e598b6986 100644 --- a/fs/xfs/linux-2.6/xfs_acl.c +++ b/fs/xfs/linux-2.6/xfs_acl.c @@ -21,6 +21,7 @@ #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" #include <linux/xattr.h> #include <linux/posix_acl_xattr.h> diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 87813e405ce..d798c54296e 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -38,6 +38,7 @@ #include "xfs_rw.h" #include "xfs_iomap.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" #include <linux/mpage.h> #include <linux/pagevec.h> #include <linux/writeback.h> @@ -76,7 +77,7 @@ xfs_ioend_wake( wake_up(to_ioend_wq(ip)); } -STATIC void +void xfs_count_page_state( struct page *page, int *delalloc, @@ -98,48 +99,6 @@ xfs_count_page_state( } while ((bh = bh->b_this_page) != head); } -#if defined(XFS_RW_TRACE) -void -xfs_page_trace( - int tag, - struct inode *inode, - struct page *page, - unsigned long pgoff) -{ - xfs_inode_t *ip; - loff_t isize = i_size_read(inode); - loff_t offset = page_offset(page); - int delalloc = -1, unmapped = -1, unwritten = -1; - - if (page_has_buffers(page)) - xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - - ip = XFS_I(inode); - if (!ip->i_rwtrace) - return; - - ktrace_enter(ip->i_rwtrace, - (void *)((unsigned long)tag), - (void *)ip, - (void *)inode, - (void *)page, - (void *)pgoff, - (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), - (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), - (void *)((unsigned long)((isize >> 32) & 0xffffffff)), - (void *)((unsigned long)(isize & 0xffffffff)), - (void *)((unsigned long)((offset >> 32) & 0xffffffff)), - (void *)((unsigned long)(offset & 0xffffffff)), - (void *)((unsigned long)delalloc), - (void *)((unsigned long)unmapped), - (void *)((unsigned long)unwritten), - (void *)((unsigned long)current_pid()), - (void *)NULL); -} -#else -#define xfs_page_trace(tag, inode, page, pgoff) -#endif - STATIC struct block_device * xfs_find_bdev_for_inode( struct xfs_inode *ip) @@ -1202,7 +1161,7 @@ xfs_vm_writepage( int delalloc, unmapped, unwritten; struct inode *inode = page->mapping->host; - xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); + trace_xfs_writepage(inode, page, 0); /* * We need a transaction if: @@ -1307,7 +1266,7 @@ xfs_vm_releasepage( .nr_to_write = 1, }; - xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0); + trace_xfs_releasepage(inode, page, 0); if (!page_has_buffers(page)) return 0; @@ -1587,8 +1546,7 @@ xfs_vm_invalidatepage( struct page *page, unsigned long offset) { - xfs_page_trace(XFS_INVALIDPAGE_ENTER, - page->mapping->host, page, offset); + trace_xfs_invalidatepage(page->mapping->host, page, offset); block_invalidatepage(page, offset); } diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index 221b3e66cee..4cfc6ea87df 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h @@ -45,4 +45,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int); extern void xfs_ioend_init(void); extern void xfs_ioend_wait(struct xfs_inode *); +extern void xfs_count_page_state(struct page *, int *, int *, int *); + #endif /* __XFS_AOPS_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 4ddc973aea7..b4c7d4248aa 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -39,6 +39,7 @@ #include "xfs_ag.h" #include "xfs_dmapi.h" #include "xfs_mount.h" +#include "xfs_trace.h" static kmem_zone_t *xfs_buf_zone; STATIC int xfsbufd(void *); @@ -53,34 +54,6 @@ static struct workqueue_struct *xfslogd_workqueue; struct workqueue_struct *xfsdatad_workqueue; struct workqueue_struct *xfsconvertd_workqueue; -#ifdef XFS_BUF_TRACE -void -xfs_buf_trace( - xfs_buf_t *bp, - char *id, - void *data, - void *ra) -{ - ktrace_enter(xfs_buf_trace_buf, - bp, id, - (void *)(unsigned long)bp->b_flags, - (void *)(unsigned long)bp->b_hold.counter, - (void *)(unsigned long)bp->b_sema.count, - (void *)current, - data, ra, - (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff), - (void *)(unsigned long)(bp->b_file_offset & 0xffffffff), - (void *)(unsigned long)bp->b_buffer_length, - NULL, NULL, NULL, NULL, NULL); -} -ktrace_t *xfs_buf_trace_buf; -#define XFS_BUF_TRACE_SIZE 4096 -#define XB_TRACE(bp, id, data) \ - xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0)) -#else -#define XB_TRACE(bp, id, data) do { } while (0) -#endif - #ifdef XFS_BUF_LOCK_TRACKING # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) @@ -279,7 +252,8 @@ _xfs_buf_initialize( init_waitqueue_head(&bp->b_waiters); XFS_STATS_INC(xb_create); - XB_TRACE(bp, "initialize", target); + + trace_xfs_buf_init(bp, _RET_IP_); } /* @@ -332,7 +306,7 @@ void xfs_buf_free( xfs_buf_t *bp) { - XB_TRACE(bp, "free", 0); + trace_xfs_buf_free(bp, _RET_IP_); ASSERT(list_empty(&bp->b_hash_list)); @@ -445,7 +419,6 @@ _xfs_buf_lookup_pages( if (page_count == bp->b_page_count) bp->b_flags |= XBF_DONE; - XB_TRACE(bp, "lookup_pages", (long)page_count); return error; } @@ -548,7 +521,6 @@ found: if (down_trylock(&bp->b_sema)) { if (!(flags & XBF_TRYLOCK)) { /* wait for buffer ownership */ - XB_TRACE(bp, "get_lock", 0); xfs_buf_lock(bp); XFS_STATS_INC(xb_get_locked_waited); } else { @@ -571,7 +543,8 @@ found: ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); bp->b_flags &= XBF_MAPPED; } - XB_TRACE(bp, "got_lock", 0); + + trace_xfs_buf_find(bp, flags, _RET_IP_); XFS_STATS_INC(xb_get_locked); return bp; } @@ -627,7 +600,7 @@ xfs_buf_get( bp->b_bn = ioff; bp->b_count_desired = bp->b_buffer_length; - XB_TRACE(bp, "get", (unsigned long)flags); + trace_xfs_buf_get(bp, flags, _RET_IP_); return bp; no_buffer: @@ -644,8 +617,6 @@ _xfs_buf_read( { int status; - XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags); - ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); @@ -673,19 +644,18 @@ xfs_buf_read( bp = xfs_buf_get(target, ioff, isize, flags); if (bp) { + trace_xfs_buf_read(bp, flags, _RET_IP_); + if (!XFS_BUF_ISDONE(bp)) { - XB_TRACE(bp, "read", (unsigned long)flags); XFS_STATS_INC(xb_get_read); _xfs_buf_read(bp, flags); } else if (flags & XBF_ASYNC) { - XB_TRACE(bp, "read_async", (unsigned long)flags); /* * Read ahead call which is already satisfied, * drop the buffer */ goto no_buffer; } else { - XB_TRACE(bp, "read_done", (unsigned long)flags); /* We do not want read in the flags */ bp->b_flags &= ~XBF_READ; } @@ -823,7 +793,7 @@ xfs_buf_get_noaddr( xfs_buf_unlock(bp); - XB_TRACE(bp, "no_daddr", len); + trace_xfs_buf_get_noaddr(bp, _RET_IP_); return bp; fail_free_mem: @@ -845,8 +815,8 @@ void xfs_buf_hold( xfs_buf_t *bp) { + trace_xfs_buf_hold(bp, _RET_IP_); atomic_inc(&bp->b_hold); - XB_TRACE(bp, "hold", 0); } /* @@ -859,7 +829,7 @@ xfs_buf_rele( { xfs_bufhash_t *hash = bp->b_hash; - XB_TRACE(bp, "rele", bp->b_relse); + trace_xfs_buf_rele(bp, _RET_IP_); if (unlikely(!hash)) { ASSERT(!bp->b_relse); @@ -909,21 +879,19 @@ xfs_buf_cond_lock( int locked; locked = down_trylock(&bp->b_sema) == 0; - if (locked) { + if (locked) XB_SET_OWNER(bp); - } - XB_TRACE(bp, "cond_lock", (long)locked); + + trace_xfs_buf_cond_lock(bp, _RET_IP_); return locked ? 0 : -EBUSY; } -#if defined(DEBUG) || defined(XFS_BLI_TRACE) int xfs_buf_lock_value( xfs_buf_t *bp) { return bp->b_sema.count; } -#endif /* * Locks a buffer object. @@ -935,12 +903,14 @@ void xfs_buf_lock( xfs_buf_t *bp) { - XB_TRACE(bp, "lock", 0); + trace_xfs_buf_lock(bp, _RET_IP_); + if (atomic_read(&bp->b_io_remaining)) blk_run_address_space(bp->b_target->bt_mapping); down(&bp->b_sema); XB_SET_OWNER(bp); - XB_TRACE(bp, "locked", 0); + + trace_xfs_buf_lock_done(bp, _RET_IP_); } /* @@ -962,7 +932,8 @@ xfs_buf_unlock( XB_CLEAR_OWNER(bp); up(&bp->b_sema); - XB_TRACE(bp, "unlock", 0); + + trace_xfs_buf_unlock(bp, _RET_IP_); } @@ -974,17 +945,18 @@ void xfs_buf_pin( xfs_buf_t *bp) { + trace_xfs_buf_pin(bp, _RET_IP_); atomic_inc(&bp->b_pin_count); - XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter); } void xfs_buf_unpin( xfs_buf_t *bp) { + trace_xfs_buf_unpin(bp, _RET_IP_); + if (atomic_dec_and_test(&bp->b_pin_count)) wake_up_all(&bp->b_waiters); - XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter); } int @@ -1035,7 +1007,7 @@ xfs_buf_iodone_work( */ if ((bp->b_error == EOPNOTSUPP) && (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) { - XB_TRACE(bp, "ordered_retry", bp->b_iodone); + trace_xfs_buf_ordered_retry(bp, _RET_IP_); bp->b_flags &= ~XBF_ORDERED; bp->b_flags |= _XFS_BARRIER_FAILED; xfs_buf_iorequest(bp); @@ -1050,12 +1022,12 @@ xfs_buf_ioend( xfs_buf_t *bp, int schedule) { + trace_xfs_buf_iodone(bp, _RET_IP_); + bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); if (bp->b_error == 0) bp->b_flags |= XBF_DONE; - XB_TRACE(bp, "iodone", bp->b_iodone); - if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { if (schedule) { INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); @@ -1075,7 +1047,7 @@ xfs_buf_ioerror( { ASSERT(error >= 0 && error <= 0xffff); bp->b_error = (unsigned short)error; - XB_TRACE(bp, "ioerror", (unsigned long)error); + trace_xfs_buf_ioerror(bp, error, _RET_IP_); } int @@ -1083,7 +1055,7 @@ xfs_bawrite( void *mp, struct xfs_buf *bp) { - XB_TRACE(bp, "bawrite", 0); + trace_xfs_buf_bawrite(bp, _RET_IP_); ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); @@ -1102,7 +1074,7 @@ xfs_bdwrite( void *mp, struct xfs_buf *bp) { - XB_TRACE(bp, "bdwrite", 0); + trace_xfs_buf_bdwrite(bp, _RET_IP_); bp->b_strat = xfs_bdstrat_cb; bp->b_mount = mp; @@ -1253,7 +1225,7 @@ int xfs_buf_iorequest( xfs_buf_t *bp) { - XB_TRACE(bp, "iorequest", 0); + trace_xfs_buf_iorequest(bp, _RET_IP_); if (bp->b_flags & XBF_DELWRI) { xfs_buf_delwri_queue(bp, 1); @@ -1287,11 +1259,13 @@ int xfs_buf_iowait( xfs_buf_t *bp) { - XB_TRACE(bp, "iowait", 0); + trace_xfs_buf_iowait(bp, _RET_IP_); + if (atomic_read(&bp->b_io_remaining)) blk_run_address_space(bp->b_target->bt_mapping); wait_for_completion(&bp->b_iowait); - XB_TRACE(bp, "iowaited", (long)bp->b_error); + + trace_xfs_buf_iowait_done(bp, _RET_IP_); return bp->b_error; } @@ -1604,7 +1578,8 @@ xfs_buf_delwri_queue( struct list_head *dwq = &bp->b_target->bt_delwrite_queue; spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; - XB_TRACE(bp, "delwri_q", (long)unlock); + trace_xfs_buf_delwri_queue(bp, _RET_IP_); + ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC)); spin_lock(dwlk); @@ -1644,7 +1619,7 @@ xfs_buf_delwri_dequeue( if (dequeued) xfs_buf_rele(bp); - XB_TRACE(bp, "delwri_dq", (long)dequeued); + trace_xfs_buf_delwri_dequeue(bp, _RET_IP_); } STATIC void @@ -1692,7 +1667,7 @@ xfs_buf_delwri_split( INIT_LIST_HEAD(list); spin_lock(dwlk); list_for_each_entry_safe(bp, n, dwq, b_list) { - XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp)); + trace_xfs_buf_delwri_split(bp, _RET_IP_); ASSERT(bp->b_flags & XBF_DELWRI); if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { @@ -1816,14 +1791,10 @@ xfs_flush_buftarg( int __init xfs_buf_init(void) { -#ifdef XFS_BUF_TRACE - xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS); -#endif - xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", KM_ZONE_HWALIGN, NULL); if (!xfs_buf_zone) - goto out_free_trace_buf; + goto out; xfslogd_workqueue = create_workqueue("xfslogd"); if (!xfslogd_workqueue) @@ -1846,10 +1817,7 @@ xfs_buf_init(void) destroy_workqueue(xfslogd_workqueue); out_free_buf_zone: kmem_zone_destroy(xfs_buf_zone); - out_free_trace_buf: -#ifdef XFS_BUF_TRACE - ktrace_free(xfs_buf_trace_buf); -#endif + out: return -ENOMEM; } @@ -1861,9 +1829,6 @@ xfs_buf_terminate(void) destroy_workqueue(xfsdatad_workqueue); destroy_workqueue(xfslogd_workqueue); kmem_zone_destroy(xfs_buf_zone); -#ifdef XFS_BUF_TRACE - ktrace_free(xfs_buf_trace_buf); -#endif } #ifdef CONFIG_KDB_MODULES diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 5f07dd91c5f..a509f4addc2 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -95,6 +95,28 @@ typedef enum { _XFS_BARRIER_FAILED = (1 << 23), } xfs_buf_flags_t; +#define XFS_BUF_FLAGS \ + { XBF_READ, "READ" }, \ + { XBF_WRITE, "WRITE" }, \ + { XBF_MAPPED, "MAPPED" }, \ + { XBF_ASYNC, "ASYNC" }, \ + { XBF_DONE, "DONE" }, \ + { XBF_DELWRI, "DELWRI" }, \ + { XBF_STALE, "STALE" }, \ + { XBF_FS_MANAGED, "FS_MANAGED" }, \ + { XBF_ORDERED, "ORDERED" }, \ + { XBF_READ_AHEAD, "READ_AHEAD" }, \ + { XBF_LOCK, "LOCK" }, /* should never be set */\ + { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\ + { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\ + { _XBF_PAGE_CACHE, "PAGE_CACHE" }, \ + { _XBF_PAGES, "PAGES" }, \ + { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \ + { _XBF_DELWRI_Q, "DELWRI_Q" }, \ + { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }, \ + { _XFS_BARRIER_FAILED, "BARRIER_FAILED" } + + typedef enum { XBT_FORCE_SLEEP = 0, XBT_FORCE_FLUSH = 1, @@ -243,13 +265,6 @@ extern void xfs_buf_delwri_dequeue(xfs_buf_t *); extern int xfs_buf_init(void); extern void xfs_buf_terminate(void); -#ifdef XFS_BUF_TRACE -extern ktrace_t *xfs_buf_trace_buf; -extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); -#else -#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0) -#endif - #define xfs_buf_target_name(target) \ ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; }) @@ -365,10 +380,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp) #define xfs_bpin(bp) xfs_buf_pin(bp) #define xfs_bunpin(bp) xfs_buf_unpin(bp) - -#define xfs_buftrace(id, bp) \ - xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0)) - #define xfs_biodone(bp) xfs_buf_ioend(bp, 0) #define xfs_biomove(bp, off, len, data, rw) \ diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index 08be36d7326..7501b85fd86 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c @@ -19,6 +19,7 @@ #include "xfs_vnodeops.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" +#include "xfs_trace.h" int fs_noerr(void) { return 0; } int fs_nosys(void) { return ENOSYS; } @@ -51,6 +52,8 @@ xfs_flushinval_pages( struct address_space *mapping = VFS_I(ip)->i_mapping; int ret = 0; + trace_xfs_pagecache_inval(ip, first, last); + if (mapping->nrpages) { xfs_iflags_clear(ip, XFS_ITRUNCATED); ret = filemap_write_and_wait(mapping); diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 5bb523d7f37..a034cf62443 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -51,6 +51,7 @@ #include "xfs_quota.h" #include "xfs_inode_item.h" #include "xfs_export.h" +#include "xfs_trace.h" #include <linux/capability.h> #include <linux/dcache.h> diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index eafcc7c1870..be1527b1670 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -46,6 +46,7 @@ #include "xfs_attr.h" #include "xfs_ioctl.h" #include "xfs_ioctl32.h" +#include "xfs_trace.h" #define _NATIVE_IOC(cmd, type) \ _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 1f3b4b8f7dd..1d5b298ba8b 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -47,6 +47,7 @@ #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" #include <linux/capability.h> #include <linux/xattr.h> diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 6127e24062d..5af0c81ca1a 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -40,7 +40,6 @@ #include <sv.h> #include <time.h> -#include <support/ktrace.h> #include <support/debug.h> #include <support/uuid.h> diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 1bf47f219c9..0d32457abef 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -48,73 +48,12 @@ #include "xfs_utils.h" #include "xfs_iomap.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" #include <linux/capability.h> #include <linux/writeback.h> -#if defined(XFS_RW_TRACE) -void -xfs_rw_enter_trace( - int tag, - xfs_inode_t *ip, - void *data, - size_t segs, - loff_t offset, - int ioflags) -{ - if (ip->i_rwtrace == NULL) - return; - ktrace_enter(ip->i_rwtrace, - (void *)(unsigned long)tag, - (void *)ip, - (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), - (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), - (void *)data, - (void *)((unsigned long)segs), - (void *)((unsigned long)((offset >> 32) & 0xffffffff)), - (void *)((unsigned long)(offset & 0xffffffff)), - (void *)((unsigned long)ioflags), - (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)), - (void *)((unsigned long)(ip->i_new_size & 0xffffffff)), - (void *)((unsigned long)current_pid()), - (void *)NULL, - (void *)NULL, - (void *)NULL, - (void *)NULL); -} - -void -xfs_inval_cached_trace( - xfs_inode_t *ip, - xfs_off_t offset, - xfs_off_t len, - xfs_off_t first, - xfs_off_t last) -{ - - if (ip->i_rwtrace == NULL) - return; - ktrace_enter(ip->i_rwtrace, - (void *)(__psint_t)XFS_INVAL_CACHED, - (void *)ip, - (void *)((unsigned long)((offset >> 32) & 0xffffffff)), - (void *)((unsigned long)(offset & 0xffffffff)), - (void *)((unsigned long)((len >> 32) & 0xffffffff)), - (void *)((unsigned long)(len & 0xffffffff)), - (void *)((unsigned long)((first >> 32) & 0xffffffff)), - (void *)((unsigned long)(first & 0xffffffff)), - (void *)((unsigned long)((last >> 32) & 0xffffffff)), - (void *)((unsigned long)(last & 0xffffffff)), - (void *)((unsigned long)current_pid()), - (void *)NULL, - (void *)NULL, - (void *)NULL, - (void *)NULL, - (void *)NULL); -} -#endif - /* * xfs_iozero * @@ -250,8 +189,7 @@ xfs_read( } } - xfs_rw_enter_trace(XFS_READ_ENTER, ip, - (void *)iovp, segs, *offset, ioflags); + trace_xfs_file_read(ip, size, *offset, ioflags); iocb->ki_pos = *offset; ret = generic_file_aio_read(iocb, iovp, segs, *offset); @@ -292,8 +230,9 @@ xfs_splice_read( return -error; } } - xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip, - pipe, count, *ppos, ioflags); + + trace_xfs_file_splice_read(ip, count, *ppos, ioflags); + ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); @@ -342,8 +281,8 @@ xfs_splice_write( ip->i_new_size = new_size; xfs_iunlock(ip, XFS_ILOCK_EXCL); - xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip, - pipe, count, *ppos, ioflags); + trace_xfs_file_splice_write(ip, count, *ppos, ioflags); + ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); if (ret > 0) XFS_STATS_ADD(xs_write_bytes, ret); @@ -710,8 +649,6 @@ start: if ((ioflags & IO_ISDIRECT)) { if (mapping->nrpages) { WARN_ON(need_i_mutex == 0); - xfs_inval_cached_trace(xip, pos, -1, - (pos & PAGE_CACHE_MASK), -1); error = xfs_flushinval_pages(xip, (pos & PAGE_CACHE_MASK), -1, FI_REMAPF_LOCKED); @@ -728,8 +665,7 @@ start: need_i_mutex = 0; } - xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs, - *offset, ioflags); + trace_xfs_file_direct_write(xip, count, *offset, ioflags); ret = generic_file_direct_write(iocb, iovp, &segs, pos, offset, count, ocount); @@ -752,8 +688,7 @@ start: ssize_t ret2 = 0; write_retry: - xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs, - *offset, ioflags); + trace_xfs_file_buffered_write(xip, count, *offset, ioflags); ret2 = generic_file_buffered_write(iocb, iovp, segs, pos, offset, count, ret); /* @@ -858,7 +793,7 @@ int xfs_bdstrat_cb(struct xfs_buf *bp) { if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { - xfs_buftrace("XFS__BDSTRAT IOERROR", bp); + trace_xfs_bdstrat_shut(bp, _RET_IP_); /* * Metadata write that didn't get logged but * written delayed anyway. These aren't associated @@ -891,7 +826,7 @@ xfsbdstrat( return; } - xfs_buftrace("XFSBDSTRAT IOERROR", bp); + trace_xfs_bdstrat_shut(bp, _RET_IP_); xfs_bioerror_relse(bp); } diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h index e6be37dbd0e..d1f7789c7ff 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.h +++ b/fs/xfs/linux-2.6/xfs_lrw.h @@ -20,52 +20,7 @@ struct xfs_mount; struct xfs_inode; -struct xfs_bmbt_irec; struct xfs_buf; -struct xfs_iomap; - -#if defined(XFS_RW_TRACE) -/* - * Defines for the trace mechanisms in xfs_lrw.c. - */ -#define XFS_RW_KTRACE_SIZE 128 - -#define XFS_READ_ENTER 1 -#define XFS_WRITE_ENTER 2 -#define XFS_IOMAP_READ_ENTER 3 -#define XFS_IOMAP_WRITE_ENTER 4 -#define XFS_IOMAP_READ_MAP 5 -#define XFS_IOMAP_WRITE_MAP 6 -#define XFS_IOMAP_WRITE_NOSPACE 7 -#define XFS_ITRUNC_START 8 -#define XFS_ITRUNC_FINISH1 9 -#define XFS_ITRUNC_FINISH2 10 -#define XFS_CTRUNC1 11 -#define XFS_CTRUNC2 12 -#define XFS_CTRUNC3 13 -#define XFS_CTRUNC4 14 -#define XFS_CTRUNC5 15 -#define XFS_CTRUNC6 16 -#define XFS_BUNMAP 17 -#define XFS_INVAL_CACHED 18 -#define XFS_DIORD_ENTER 19 -#define XFS_DIOWR_ENTER 20 -#define XFS_WRITEPAGE_ENTER 22 -#define XFS_RELEASEPAGE_ENTER 23 -#define XFS_INVALIDPAGE_ENTER 24 -#define XFS_IOMAP_ALLOC_ENTER 25 -#define XFS_IOMAP_ALLOC_MAP 26 -#define XFS_IOMAP_UNWRITTEN 27 -#define XFS_SPLICE_READ_ENTER 28 -#define XFS_SPLICE_WRITE_ENTER 29 -extern void xfs_rw_enter_trace(int, struct xfs_inode *, - void *, size_t, loff_t, int); -extern void xfs_inval_cached_trace(struct xfs_inode *, - xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t); -#else -#define xfs_rw_enter_trace(tag, ip, data, size, offset, ioflags) -#define xfs_inval_cached_trace(ip, offset, len, first, last) -#endif /* errors from xfsbdstrat() must be extracted from the buffer */ extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 1bfb0e98019..09783cc444a 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -15,6 +15,7 @@ * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + #include "xfs.h" #include "xfs_bit.h" #include "xfs_log.h" @@ -52,11 +53,11 @@ #include "xfs_trans_priv.h" #include "xfs_filestream.h" #include "xfs_da_btree.h" -#include "xfs_dir2_trace.h" #include "xfs_extfree_item.h" #include "xfs_mru_cache.h" #include "xfs_inode_item.h" #include "xfs_sync.h" +#include "xfs_trace.h" #include <linux/namei.h> #include <linux/init.h> @@ -1525,8 +1526,6 @@ xfs_fs_fill_super( goto fail_vnrele; kfree(mtpt); - - xfs_itrace_exit(XFS_I(sb->s_root->d_inode)); return 0; out_filestream_unmount: @@ -1602,94 +1601,6 @@ static struct file_system_type xfs_fs_type = { }; STATIC int __init -xfs_alloc_trace_bufs(void) -{ -#ifdef XFS_ALLOC_TRACE - xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL); - if (!xfs_alloc_trace_buf) - goto out; -#endif -#ifdef XFS_BMAP_TRACE - xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL); - if (!xfs_bmap_trace_buf) - goto out_free_alloc_trace; -#endif -#ifdef XFS_BTREE_TRACE - xfs_allocbt_trace_buf = ktrace_alloc(XFS_ALLOCBT_TRACE_SIZE, - KM_MAYFAIL); - if (!xfs_allocbt_trace_buf) - goto out_free_bmap_trace; - - xfs_inobt_trace_buf = ktrace_alloc(XFS_INOBT_TRACE_SIZE, KM_MAYFAIL); - if (!xfs_inobt_trace_buf) - goto out_free_allocbt_trace; - - xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL); - if (!xfs_bmbt_trace_buf) - goto out_free_inobt_trace; -#endif -#ifdef XFS_ATTR_TRACE - xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL); - if (!xfs_attr_trace_buf) - goto out_free_bmbt_trace; -#endif -#ifdef XFS_DIR2_TRACE - xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL); - if (!xfs_dir2_trace_buf) - goto out_free_attr_trace; -#endif - - return 0; - -#ifdef XFS_DIR2_TRACE - out_free_attr_trace: -#endif -#ifdef XFS_ATTR_TRACE - ktrace_free(xfs_attr_trace_buf); - out_free_bmbt_trace: -#endif -#ifdef XFS_BTREE_TRACE - ktrace_free(xfs_bmbt_trace_buf); - out_free_inobt_trace: - ktrace_free(xfs_inobt_trace_buf); - out_free_allocbt_trace: - ktrace_free(xfs_allocbt_trace_buf); - out_free_bmap_trace: -#endif -#ifdef XFS_BMAP_TRACE - ktrace_free(xfs_bmap_trace_buf); - out_free_alloc_trace: -#endif -#ifdef XFS_ALLOC_TRACE - ktrace_free(xfs_alloc_trace_buf); - out: -#endif - return -ENOMEM; -} - -STATIC void -xfs_free_trace_bufs(void) -{ -#ifdef XFS_DIR2_TRACE - ktrace_free(xfs_dir2_trace_buf); -#endif -#ifdef XFS_ATTR_TRACE - ktrace_free(xfs_attr_trace_buf); -#endif -#ifdef XFS_BTREE_TRACE - ktrace_free(xfs_bmbt_trace_buf); - ktrace_free(xfs_inobt_trace_buf); - ktrace_free(xfs_allocbt_trace_buf); -#endif -#ifdef XFS_BMAP_TRACE - ktrace_free(xfs_bmap_trace_buf); -#endif -#ifdef XFS_ALLOC_TRACE - ktrace_free(xfs_alloc_trace_buf); -#endif -} - -STATIC int __init xfs_init_zones(void) { @@ -1830,7 +1741,6 @@ init_xfs_fs(void) printk(KERN_INFO XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n"); - ktrace_init(64); xfs_ioend_init(); xfs_dir_startup(); @@ -1838,13 +1748,9 @@ init_xfs_fs(void) if (error) goto out; - error = xfs_alloc_trace_bufs(); - if (error) - goto out_destroy_zones; - error = xfs_mru_cache_init(); if (error) - goto out_free_trace_buffers; + goto out_destroy_zones; error = xfs_filestream_init(); if (error) @@ -1879,8 +1785,6 @@ init_xfs_fs(void) xfs_filestream_uninit(); out_mru_cache_uninit: xfs_mru_cache_uninit(); - out_free_trace_buffers: - xfs_free_trace_bufs(); out_destroy_zones: xfs_destroy_zones(); out: @@ -1897,9 +1801,7 @@ exit_xfs_fs(void) xfs_buf_terminate(); xfs_filestream_uninit(); xfs_mru_cache_uninit(); - xfs_free_trace_bufs(); xfs_destroy_zones(); - ktrace_uninit(); } module_init(init_xfs_fs); diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index 18175ebd58e..233d4b9881b 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h @@ -56,12 +56,6 @@ extern void xfs_qm_exit(void); # define XFS_BIGFS_STRING #endif -#ifdef CONFIG_XFS_TRACE -# define XFS_TRACE_STRING "tracing, " -#else -# define XFS_TRACE_STRING -#endif - #ifdef CONFIG_XFS_DMAPI # define XFS_DMAPI_STRING "dmapi support, " #else @@ -78,7 +72,6 @@ extern void xfs_qm_exit(void); XFS_SECURITY_STRING \ XFS_REALTIME_STRING \ XFS_BIGFS_STRING \ - XFS_TRACE_STRING \ XFS_DMAPI_STRING \ XFS_DBG_STRING /* DBG must be last */ diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index d895a3a960f..6fed97a8cd3 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -44,6 +44,7 @@ #include "xfs_inode_item.h" #include "xfs_rw.h" #include "xfs_quota.h" +#include "xfs_trace.h" #include <linux/kthread.h> #include <linux/freezer.h> diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c new file mode 100644 index 00000000000..856eb3c8d60 --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_trace.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2009, Christoph Hellwig + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_types.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir2.h" +#include "xfs_da_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_dir2_sf.h" +#include "xfs_attr_sf.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_btree.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_ialloc.h" +#include "xfs_itable.h" +#include "xfs_alloc.h" +#include "xfs_bmap.h" +#include "xfs_attr.h" +#include "xfs_attr_sf.h" +#include "xfs_attr_leaf.h" +#include "xfs_log_priv.h" +#include "xfs_buf_item.h" +#include "xfs_quota.h" +#include "xfs_iomap.h" +#include "xfs_aops.h" +#include "quota/xfs_dquot_item.h" +#include "quota/xfs_dquot.h" + +/* + * Format fsblock number into a static buffer & return it. + */ +STATIC char *xfs_fmtfsblock(xfs_fsblock_t bno) +{ + static char rval[50]; + + if (bno == NULLFSBLOCK) + sprintf(rval, "NULLFSBLOCK"); + else if (isnullstartblock(bno)) + sprintf(rval, "NULLSTARTBLOCK(%lld)", startblockval(bno)); + else + sprintf(rval, "%lld", (xfs_dfsbno_t)bno); + return rval; +} + +/* + * We include this last to have the helpers above available for the trace + * event implementations. + */ +#define CREATE_TRACE_POINTS +#include "xfs_trace.h" diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h new file mode 100644 index 00000000000..c40834bdee5 --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -0,0 +1,1369 @@ +/* + * Copyright (c) 2009, Christoph Hellwig + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM xfs + +#if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_XFS_H + +#include <linux/tracepoint.h> + +struct xfs_agf; +struct xfs_alloc_arg; +struct xfs_attr_list_context; +struct xfs_buf_log_item; +struct xfs_da_args; +struct xfs_da_node_entry; +struct xfs_dquot; +struct xlog_ticket; +struct log; + +#define DEFINE_ATTR_LIST_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_attr_list_context *ctx), \ + TP_ARGS(ctx), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(u32, hashval) \ + __field(u32, blkno) \ + __field(u32, offset) \ + __field(void *, alist) \ + __field(int, bufsize) \ + __field(int, count) \ + __field(int, firstu) \ + __field(int, dupcnt) \ + __field(int, flags) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; \ + __entry->ino = ctx->dp->i_ino; \ + __entry->hashval = ctx->cursor->hashval; \ + __entry->blkno = ctx->cursor->blkno; \ + __entry->offset = ctx->cursor->offset; \ + __entry->alist = ctx->alist; \ + __entry->bufsize = ctx->bufsize; \ + __entry->count = ctx->count; \ + __entry->firstu = ctx->firstu; \ + __entry->flags = ctx->flags; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " \ + "alist 0x%p size %u count %u firstu %u flags %d %s", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __entry->hashval, \ + __entry->blkno, \ + __entry->offset, \ + __entry->dupcnt, \ + __entry->alist, \ + __entry->bufsize, \ + __entry->count, \ + __entry->firstu, \ + __entry->flags, \ + __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) \ + ) \ +) +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound); + +TRACE_EVENT(xfs_attr_list_node_descend, + TP_PROTO(struct xfs_attr_list_context *ctx, + struct xfs_da_node_entry *btree), + TP_ARGS(ctx, btree), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(u32, hashval) + __field(u32, blkno) + __field(u32, offset) + __field(void *, alist) + __field(int, bufsize) + __field(int, count) + __field(int, firstu) + __field(int, dupcnt) + __field(int, flags) + __field(u32, bt_hashval) + __field(u32, bt_before) + ), + TP_fast_assign( + __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; + __entry->ino = ctx->dp->i_ino; + __entry->hashval = ctx->cursor->hashval; + __entry->blkno = ctx->cursor->blkno; + __entry->offset = ctx->cursor->offset; + __entry->alist = ctx->alist; + __entry->bufsize = ctx->bufsize; + __entry->count = ctx->count; + __entry->firstu = ctx->firstu; + __entry->flags = ctx->flags; + __entry->bt_hashval = be32_to_cpu(btree->hashval); + __entry->bt_before = be32_to_cpu(btree->before); + ), + TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " + "alist 0x%p size %u count %u firstu %u flags %d %s " + "node hashval %u, node before %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->hashval, + __entry->blkno, + __entry->offset, + __entry->dupcnt, + __entry->alist, + __entry->bufsize, + __entry->count, + __entry->firstu, + __entry->flags, + __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS), + __entry->bt_hashval, + __entry->bt_before) +); + +TRACE_EVENT(xfs_iext_insert, + TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, + struct xfs_bmbt_irec *r, int state, unsigned long caller_ip), + TP_ARGS(ip, idx, r, state, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_extnum_t, idx) + __field(xfs_fileoff_t, startoff) + __field(xfs_fsblock_t, startblock) + __field(xfs_filblks_t, blockcount) + __field(xfs_exntst_t, state) + __field(int, bmap_state) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->idx = idx; + __entry->startoff = r->br_startoff; + __entry->startblock = r->br_startblock; + __entry->blockcount = r->br_blockcount; + __entry->state = r->br_state; + __entry->bmap_state = state; + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " + "offset %lld block %s count %lld flag %d caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), + (long)__entry->idx, + __entry->startoff, + xfs_fmtfsblock(__entry->startblock), + __entry->blockcount, + __entry->state, + (char *)__entry->caller_ip) +); + +#define DEFINE_BMAP_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \ + unsigned long caller_ip), \ + TP_ARGS(ip, idx, state, caller_ip), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(xfs_extnum_t, idx) \ + __field(xfs_fileoff_t, startoff) \ + __field(xfs_fsblock_t, startblock) \ + __field(xfs_filblks_t, blockcount) \ + __field(xfs_exntst_t, state) \ + __field(int, bmap_state) \ + __field(unsigned long, caller_ip) \ + ), \ + TP_fast_assign( \ + struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? \ + ip->i_afp : &ip->i_df; \ + struct xfs_bmbt_irec r; \ + \ + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); \ + __entry->dev = VFS_I(ip)->i_sb->s_dev; \ + __entry->ino = ip->i_ino; \ + __entry->idx = idx; \ + __entry->startoff = r.br_startoff; \ + __entry->startblock = r.br_startblock; \ + __entry->blockcount = r.br_blockcount; \ + __entry->state = r.br_state; \ + __entry->bmap_state = state; \ + __entry->caller_ip = caller_ip; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " \ + "offset %lld block %s count %lld flag %d caller %pf", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), \ + (long)__entry->idx, \ + __entry->startoff, \ + xfs_fmtfsblock(__entry->startblock), \ + __entry->blockcount, \ + __entry->state, \ + (char *)__entry->caller_ip) \ +) + +DEFINE_BMAP_EVENT(xfs_iext_remove); +DEFINE_BMAP_EVENT(xfs_bmap_pre_update); +DEFINE_BMAP_EVENT(xfs_bmap_post_update); +DEFINE_BMAP_EVENT(xfs_extlist); + +#define DEFINE_BUF_EVENT(tname) \ +TRACE_EVENT(tname, \ + TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \ + TP_ARGS(bp, caller_ip), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_daddr_t, bno) \ + __field(size_t, buffer_length) \ + __field(int, hold) \ + __field(int, pincount) \ + __field(unsigned, lockval) \ + __field(unsigned, flags) \ + __field(unsigned long, caller_ip) \ + ), \ + TP_fast_assign( \ + __entry->dev = bp->b_target->bt_dev; \ + __entry->bno = bp->b_bn; \ + __entry->buffer_length = bp->b_buffer_length; \ + __entry->hold = atomic_read(&bp->b_hold); \ + __entry->pincount = atomic_read(&bp->b_pin_count); \ + __entry->lockval = xfs_buf_lock_value(bp); \ + __entry->flags = bp->b_flags; \ + __entry->caller_ip = caller_ip; \ + ), \ + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \ + "lock %d flags %s caller %pf", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + (unsigned long long)__entry->bno, \ + __entry->buffer_length, \ + __entry->hold, \ + __entry->pincount, \ + __entry->lockval, \ + __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \ + (void *)__entry->caller_ip) \ +) +DEFINE_BUF_EVENT(xfs_buf_init); +DEFINE_BUF_EVENT(xfs_buf_free); +DEFINE_BUF_EVENT(xfs_buf_hold); +DEFINE_BUF_EVENT(xfs_buf_rele); +DEFINE_BUF_EVENT(xfs_buf_pin); +DEFINE_BUF_EVENT(xfs_buf_unpin); +DEFINE_BUF_EVENT(xfs_buf_iodone); +DEFINE_BUF_EVENT(xfs_buf_iorequest); +DEFINE_BUF_EVENT(xfs_buf_bawrite); +DEFINE_BUF_EVENT(xfs_buf_bdwrite); +DEFINE_BUF_EVENT(xfs_buf_lock); +DEFINE_BUF_EVENT(xfs_buf_lock_done); +DEFINE_BUF_EVENT(xfs_buf_cond_lock); +DEFINE_BUF_EVENT(xfs_buf_unlock); +DEFINE_BUF_EVENT(xfs_buf_ordered_retry); +DEFINE_BUF_EVENT(xfs_buf_iowait); +DEFINE_BUF_EVENT(xfs_buf_iowait_done); +DEFINE_BUF_EVENT(xfs_buf_delwri_queue); +DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue); +DEFINE_BUF_EVENT(xfs_buf_delwri_split); +DEFINE_BUF_EVENT(xfs_buf_get_noaddr); +DEFINE_BUF_EVENT(xfs_bdstrat_shut); +DEFINE_BUF_EVENT(xfs_buf_item_relse); +DEFINE_BUF_EVENT(xfs_buf_item_iodone); +DEFINE_BUF_EVENT(xfs_buf_item_iodone_async); +DEFINE_BUF_EVENT(xfs_buf_error_relse); +DEFINE_BUF_EVENT(xfs_trans_read_buf_io); +DEFINE_BUF_EVENT(xfs_trans_read_buf_shut); + +/* not really buffer traces, but the buf provides useful information */ +DEFINE_BUF_EVENT(xfs_btree_corrupt); +DEFINE_BUF_EVENT(xfs_da_btree_corrupt); +DEFINE_BUF_EVENT(xfs_reset_dqcounts); +DEFINE_BUF_EVENT(xfs_inode_item_push); + +/* pass flags explicitly */ +#define DEFINE_BUF_FLAGS_EVENT(tname) \ +TRACE_EVENT(tname, \ + TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \ + TP_ARGS(bp, flags, caller_ip), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_daddr_t, bno) \ + __field(size_t, buffer_length) \ + __field(int, hold) \ + __field(int, pincount) \ + __field(unsigned, lockval) \ + __field(unsigned, flags) \ + __field(unsigned long, caller_ip) \ + ), \ + TP_fast_assign( \ + __entry->dev = bp->b_target->bt_dev; \ + __entry->bno = bp->b_bn; \ + __entry->buffer_length = bp->b_buffer_length; \ + __entry->flags = flags; \ + __entry->hold = atomic_read(&bp->b_hold); \ + __entry->pincount = atomic_read(&bp->b_pin_count); \ + __entry->lockval = xfs_buf_lock_value(bp); \ + __entry->caller_ip = caller_ip; \ + ), \ + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \ + "lock %d flags %s caller %pf", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + (unsigned long long)__entry->bno, \ + __entry->buffer_length, \ + __entry->hold, \ + __entry->pincount, \ + __entry->lockval, \ + __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \ + (void *)__entry->caller_ip) \ +) +DEFINE_BUF_FLAGS_EVENT(xfs_buf_find); +DEFINE_BUF_FLAGS_EVENT(xfs_buf_get); +DEFINE_BUF_FLAGS_EVENT(xfs_buf_read); + +TRACE_EVENT(xfs_buf_ioerror, + TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip), + TP_ARGS(bp, error, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_daddr_t, bno) + __field(size_t, buffer_length) + __field(unsigned, flags) + __field(int, hold) + __field(int, pincount) + __field(unsigned, lockval) + __field(int, error) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = bp->b_target->bt_dev; + __entry->bno = bp->b_bn; + __entry->buffer_length = bp->b_buffer_length; + __entry->hold = atomic_read(&bp->b_hold); + __entry->pincount = atomic_read(&bp->b_pin_count); + __entry->lockval = xfs_buf_lock_value(bp); + __entry->error = error; + __entry->flags = bp->b_flags; + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " + "lock %d error %d flags %s caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->bno, + __entry->buffer_length, + __entry->hold, + __entry->pincount, + __entry->lockval, + __entry->error, + __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), + (void *)__entry->caller_ip) +); + +#define DEFINE_BUF_ITEM_EVENT(tname) \ +TRACE_EVENT(tname, \ + TP_PROTO(struct xfs_buf_log_item *bip), \ + TP_ARGS(bip), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_daddr_t, buf_bno) \ + __field(size_t, buf_len) \ + __field(int, buf_hold) \ + __field(int, buf_pincount) \ + __field(int, buf_lockval) \ + __field(unsigned, buf_flags) \ + __field(unsigned, bli_recur) \ + __field(int, bli_refcount) \ + __field(unsigned, bli_flags) \ + __field(void *, li_desc) \ + __field(unsigned, li_flags) \ + ), \ + TP_fast_assign( \ + __entry->dev = bip->bli_buf->b_target->bt_dev; \ + __entry->bli_flags = bip->bli_flags; \ + __entry->bli_recur = bip->bli_recur; \ + __entry->bli_refcount = atomic_read(&bip->bli_refcount); \ + __entry->buf_bno = bip->bli_buf->b_bn; \ + __entry->buf_len = bip->bli_buf->b_buffer_length; \ + __entry->buf_flags = bip->bli_buf->b_flags; \ + __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); \ + __entry->buf_pincount = \ + atomic_read(&bip->bli_buf->b_pin_count); \ + __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); \ + __entry->li_desc = bip->bli_item.li_desc; \ + __entry->li_flags = bip->bli_item.li_flags; \ + ), \ + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \ + "lock %d flags %s recur %d refcount %d bliflags %s " \ + "lidesc 0x%p liflags %s", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + (unsigned long long)__entry->buf_bno, \ + __entry->buf_len, \ + __entry->buf_hold, \ + __entry->buf_pincount, \ + __entry->buf_lockval, \ + __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), \ + __entry->bli_recur, \ + __entry->bli_refcount, \ + __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), \ + __entry->li_desc, \ + __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) \ +) +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push); +DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf); +DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur); +DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb); +DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur); +DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf); +DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur); +DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf); +DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse); +DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin); +DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold); +DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release); +DEFINE_BUF_ITEM_EVENT(xfs_trans_binval); + +#define DEFINE_LOCK_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \ + unsigned long caller_ip), \ + TP_ARGS(ip, lock_flags, caller_ip), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(int, lock_flags) \ + __field(unsigned long, caller_ip) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(ip)->i_sb->s_dev; \ + __entry->ino = ip->i_ino; \ + __entry->lock_flags = lock_flags; \ + __entry->caller_ip = caller_ip; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), \ + (void *)__entry->caller_ip) \ +) + +DEFINE_LOCK_EVENT(xfs_ilock); +DEFINE_LOCK_EVENT(xfs_ilock_nowait); +DEFINE_LOCK_EVENT(xfs_ilock_demote); +DEFINE_LOCK_EVENT(xfs_iunlock); + +#define DEFINE_IGET_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_inode *ip), \ + TP_ARGS(ip), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(ip)->i_sb->s_dev; \ + __entry->ino = ip->i_ino; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino) \ +) +DEFINE_IGET_EVENT(xfs_iget_skip); +DEFINE_IGET_EVENT(xfs_iget_reclaim); +DEFINE_IGET_EVENT(xfs_iget_found); +DEFINE_IGET_EVENT(xfs_iget_alloc); + +#define DEFINE_INODE_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \ + TP_ARGS(ip, caller_ip), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(int, count) \ + __field(unsigned long, caller_ip) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(ip)->i_sb->s_dev; \ + __entry->ino = ip->i_ino; \ + __entry->count = atomic_read(&VFS_I(ip)->i_count); \ + __entry->caller_ip = caller_ip; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __entry->count, \ + (char *)__entry->caller_ip) \ +) +DEFINE_INODE_EVENT(xfs_ihold); +DEFINE_INODE_EVENT(xfs_irele); +/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */ +DEFINE_INODE_EVENT(xfs_inode); +#define xfs_itrace_entry(ip) \ + trace_xfs_inode(ip, _THIS_IP_) + +#define DEFINE_DQUOT_EVENT(tname) \ +TRACE_EVENT(tname, \ + TP_PROTO(struct xfs_dquot *dqp), \ + TP_ARGS(dqp), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(__be32, id) \ + __field(unsigned, flags) \ + __field(unsigned, nrefs) \ + __field(unsigned long long, res_bcount) \ + __field(unsigned long long, bcount) \ + __field(unsigned long long, icount) \ + __field(unsigned long long, blk_hardlimit) \ + __field(unsigned long long, blk_softlimit) \ + __field(unsigned long long, ino_hardlimit) \ + __field(unsigned long long, ino_softlimit) \ + ), \ + TP_fast_assign( \ + __entry->dev = dqp->q_mount->m_super->s_dev; \ + __entry->id = dqp->q_core.d_id; \ + __entry->flags = dqp->dq_flags; \ + __entry->nrefs = dqp->q_nrefs; \ + __entry->res_bcount = dqp->q_res_bcount; \ + __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); \ + __entry->icount = be64_to_cpu(dqp->q_core.d_icount); \ + __entry->blk_hardlimit = \ + be64_to_cpu(dqp->q_core.d_blk_hardlimit); \ + __entry->blk_softlimit = \ + be64_to_cpu(dqp->q_core.d_blk_softlimit); \ + __entry->ino_hardlimit = \ + be64_to_cpu(dqp->q_core.d_ino_hardlimit); \ + __entry->ino_softlimit = \ + be64_to_cpu(dqp->q_core.d_ino_softlimit); \ + ), \ + TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " \ + "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " \ + "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + be32_to_cpu(__entry->id), \ + __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), \ + __entry->nrefs, \ + __entry->res_bcount, \ + __entry->bcount, \ + __entry->blk_hardlimit, \ + __entry->blk_softlimit, \ + __entry->icount, \ + __entry->ino_hardlimit, \ + __entry->ino_softlimit) \ +) +DEFINE_DQUOT_EVENT(xfs_dqadjust); +DEFINE_DQUOT_EVENT(xfs_dqshake_dirty); +DEFINE_DQUOT_EVENT(xfs_dqshake_unlink); +DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); +DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); +DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink); +DEFINE_DQUOT_EVENT(xfs_dqattach_found); +DEFINE_DQUOT_EVENT(xfs_dqattach_get); +DEFINE_DQUOT_EVENT(xfs_dqinit); +DEFINE_DQUOT_EVENT(xfs_dqreuse); +DEFINE_DQUOT_EVENT(xfs_dqalloc); +DEFINE_DQUOT_EVENT(xfs_dqtobp_read); +DEFINE_DQUOT_EVENT(xfs_dqread); +DEFINE_DQUOT_EVENT(xfs_dqread_fail); +DEFINE_DQUOT_EVENT(xfs_dqlookup_found); +DEFINE_DQUOT_EVENT(xfs_dqlookup_want); +DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist); +DEFINE_DQUOT_EVENT(xfs_dqlookup_move); +DEFINE_DQUOT_EVENT(xfs_dqlookup_done); +DEFINE_DQUOT_EVENT(xfs_dqget_hit); +DEFINE_DQUOT_EVENT(xfs_dqget_miss); +DEFINE_DQUOT_EVENT(xfs_dqput); +DEFINE_DQUOT_EVENT(xfs_dqput_wait); +DEFINE_DQUOT_EVENT(xfs_dqput_free); +DEFINE_DQUOT_EVENT(xfs_dqrele); +DEFINE_DQUOT_EVENT(xfs_dqflush); +DEFINE_DQUOT_EVENT(xfs_dqflush_force); +DEFINE_DQUOT_EVENT(xfs_dqflush_done); +/* not really iget events, but we re-use the format */ +DEFINE_IGET_EVENT(xfs_dquot_dqalloc); +DEFINE_IGET_EVENT(xfs_dquot_dqdetach); + + +#define DEFINE_LOGGRANT_EVENT(tname) \ +TRACE_EVENT(tname, \ + TP_PROTO(struct log *log, struct xlog_ticket *tic), \ + TP_ARGS(log, tic), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(unsigned, trans_type) \ + __field(char, ocnt) \ + __field(char, cnt) \ + __field(int, curr_res) \ + __field(int, unit_res) \ + __field(unsigned int, flags) \ + __field(void *, reserve_headq) \ + __field(void *, write_headq) \ + __field(int, grant_reserve_cycle) \ + __field(int, grant_reserve_bytes) \ + __field(int, grant_write_cycle) \ + __field(int, grant_write_bytes) \ + __field(int, curr_cycle) \ + __field(int, curr_block) \ + __field(xfs_lsn_t, tail_lsn) \ + ), \ + TP_fast_assign( \ + __entry->dev = log->l_mp->m_super->s_dev; \ + __entry->trans_type = tic->t_trans_type; \ + __entry->ocnt = tic->t_ocnt; \ + __entry->cnt = tic->t_cnt; \ + __entry->curr_res = tic->t_curr_res; \ + __entry->unit_res = tic->t_unit_res; \ + __entry->flags = tic->t_flags; \ + __entry->reserve_headq = log->l_reserve_headq; \ + __entry->write_headq = log->l_write_headq; \ + __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; \ + __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; \ + __entry->grant_write_cycle = log->l_grant_write_cycle; \ + __entry->grant_write_bytes = log->l_grant_write_bytes; \ + __entry->curr_cycle = log->l_curr_cycle; \ + __entry->curr_block = log->l_curr_block; \ + __entry->tail_lsn = log->l_tail_lsn; \ + ), \ + TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " \ + "t_unit_res %u t_flags %s reserve_headq 0x%p " \ + "write_headq 0x%p grant_reserve_cycle %d " \ + "grant_reserve_bytes %d grant_write_cycle %d " \ + "grant_write_bytes %d curr_cycle %d curr_block %d " \ + "tail_cycle %d tail_block %d", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), \ + __entry->ocnt, \ + __entry->cnt, \ + __entry->curr_res, \ + __entry->unit_res, \ + __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), \ + __entry->reserve_headq, \ + __entry->write_headq, \ + __entry->grant_reserve_cycle, \ + __entry->grant_reserve_bytes, \ + __entry->grant_write_cycle, \ + __entry->grant_write_bytes, \ + __entry->curr_cycle, \ + __entry->curr_block, \ + CYCLE_LSN(__entry->tail_lsn), \ + BLOCK_LSN(__entry->tail_lsn) \ + ) \ +) +DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); +DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); +DEFINE_LOGGRANT_EVENT(xfs_log_reserve); +DEFINE_LOGGRANT_EVENT(xfs_log_umount_write); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_error); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub); +DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter); +DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit); +DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub); + +#define DEFINE_RW_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \ + TP_ARGS(ip, count, offset, flags), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(xfs_fsize_t, size) \ + __field(xfs_fsize_t, new_size) \ + __field(loff_t, offset) \ + __field(size_t, count) \ + __field(int, flags) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(ip)->i_sb->s_dev; \ + __entry->ino = ip->i_ino; \ + __entry->size = ip->i_d.di_size; \ + __entry->new_size = ip->i_new_size; \ + __entry->offset = offset; \ + __entry->count = count; \ + __entry->flags = flags; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \ + "offset 0x%llx count 0x%zx ioflags %s", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __entry->size, \ + __entry->new_size, \ + __entry->offset, \ + __entry->count, \ + __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) \ +) +DEFINE_RW_EVENT(xfs_file_read); +DEFINE_RW_EVENT(xfs_file_buffered_write); +DEFINE_RW_EVENT(xfs_file_direct_write); +DEFINE_RW_EVENT(xfs_file_splice_read); +DEFINE_RW_EVENT(xfs_file_splice_write); + + +#define DEFINE_PAGE_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \ + TP_ARGS(inode, page, off), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(pgoff_t, pgoff) \ + __field(loff_t, size) \ + __field(unsigned long, offset) \ + __field(int, delalloc) \ + __field(int, unmapped) \ + __field(int, unwritten) \ + ), \ + TP_fast_assign( \ + int delalloc = -1, unmapped = -1, unwritten = -1; \ + \ + if (page_has_buffers(page)) \ + xfs_count_page_state(page, &delalloc, \ + &unmapped, &unwritten); \ + __entry->dev = inode->i_sb->s_dev; \ + __entry->ino = XFS_I(inode)->i_ino; \ + __entry->pgoff = page_offset(page); \ + __entry->size = i_size_read(inode); \ + __entry->offset = off; \ + __entry->delalloc = delalloc; \ + __entry->unmapped = unmapped; \ + __entry->unwritten = unwritten; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " \ + "delalloc %d unmapped %d unwritten %d", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __entry->pgoff, \ + __entry->size, \ + __entry->offset, \ + __entry->delalloc, \ + __entry->unmapped, \ + __entry->unwritten) \ +) +DEFINE_PAGE_EVENT(xfs_writepage); +DEFINE_PAGE_EVENT(xfs_releasepage); +DEFINE_PAGE_EVENT(xfs_invalidatepage); + +#define DEFINE_IOMAP_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ + int flags, struct xfs_bmbt_irec *irec), \ + TP_ARGS(ip, offset, count, flags, irec), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(loff_t, size) \ + __field(loff_t, new_size) \ + __field(loff_t, offset) \ + __field(size_t, count) \ + __field(int, flags) \ + __field(xfs_fileoff_t, startoff) \ + __field(xfs_fsblock_t, startblock) \ + __field(xfs_filblks_t, blockcount) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(ip)->i_sb->s_dev; \ + __entry->ino = ip->i_ino; \ + __entry->size = ip->i_d.di_size; \ + __entry->new_size = ip->i_new_size; \ + __entry->offset = offset; \ + __entry->count = count; \ + __entry->flags = flags; \ + __entry->startoff = irec ? irec->br_startoff : 0; \ + __entry->startblock = irec ? irec->br_startblock : 0; \ + __entry->blockcount = irec ? irec->br_blockcount : 0; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \ + "offset 0x%llx count %zd flags %s " \ + "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __entry->size, \ + __entry->new_size, \ + __entry->offset, \ + __entry->count, \ + __print_flags(__entry->flags, "|", BMAPI_FLAGS), \ + __entry->startoff, \ + __entry->startblock, \ + __entry->blockcount) \ +) +DEFINE_IOMAP_EVENT(xfs_iomap_enter); +DEFINE_IOMAP_EVENT(xfs_iomap_found); +DEFINE_IOMAP_EVENT(xfs_iomap_alloc); + +#define DEFINE_SIMPLE_IO_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \ + TP_ARGS(ip, offset, count), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(loff_t, size) \ + __field(loff_t, new_size) \ + __field(loff_t, offset) \ + __field(size_t, count) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(ip)->i_sb->s_dev; \ + __entry->ino = ip->i_ino; \ + __entry->size = ip->i_d.di_size; \ + __entry->new_size = ip->i_new_size; \ + __entry->offset = offset; \ + __entry->count = count; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \ + "offset 0x%llx count %zd", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __entry->size, \ + __entry->new_size, \ + __entry->offset, \ + __entry->count) \ +); +DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); +DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); + + +TRACE_EVENT(xfs_itruncate_start, + TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag, + xfs_off_t toss_start, xfs_off_t toss_finish), + TP_ARGS(ip, new_size, flag, toss_start, toss_finish), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_fsize_t, size) + __field(xfs_fsize_t, new_size) + __field(xfs_off_t, toss_start) + __field(xfs_off_t, toss_finish) + __field(int, flag) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->size = ip->i_d.di_size; + __entry->new_size = new_size; + __entry->toss_start = toss_start; + __entry->toss_finish = toss_finish; + __entry->flag = flag; + ), + TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx " + "toss start 0x%llx toss finish 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS), + __entry->size, + __entry->new_size, + __entry->toss_start, + __entry->toss_finish) +); + +#define DEFINE_ITRUNC_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \ + TP_ARGS(ip, new_size), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(xfs_fsize_t, size) \ + __field(xfs_fsize_t, new_size) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(ip)->i_sb->s_dev; \ + __entry->ino = ip->i_ino; \ + __entry->size = ip->i_d.di_size; \ + __entry->new_size = new_size; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __entry->size, \ + __entry->new_size) \ +) +DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start); +DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end); + +TRACE_EVENT(xfs_pagecache_inval, + TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish), + TP_ARGS(ip, start, finish), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_fsize_t, size) + __field(xfs_off_t, start) + __field(xfs_off_t, finish) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->size = ip->i_d.di_size; + __entry->start = start; + __entry->finish = finish; + ), + TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->size, + __entry->start, + __entry->finish) +); + +TRACE_EVENT(xfs_bunmap, + TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, + int flags, unsigned long caller_ip), + TP_ARGS(ip, bno, len, flags, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_fsize_t, size) + __field(xfs_fileoff_t, bno) + __field(xfs_filblks_t, len) + __field(unsigned long, caller_ip) + __field(int, flags) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->size = ip->i_d.di_size; + __entry->bno = bno; + __entry->len = len; + __entry->caller_ip = caller_ip; + __entry->flags = flags; + ), + TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx" + "flags %s caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->size, + __entry->bno, + __entry->len, + __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS), + (void *)__entry->caller_ip) + +); + +TRACE_EVENT(xfs_alloc_busy, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, + xfs_extlen_t len, int slot), + TP_ARGS(mp, agno, agbno, len, slot), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + __field(int, slot) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + __entry->slot = slot; + ), + TP_printk("dev %d:%d agno %u agbno %u len %u slot %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len, + __entry->slot) + +); + +#define XFS_BUSY_STATES \ + { 0, "found" }, \ + { 1, "missing" } + +TRACE_EVENT(xfs_alloc_unbusy, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + int slot, int found), + TP_ARGS(mp, agno, slot, found), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(int, slot) + __field(int, found) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->slot = slot; + __entry->found = found; + ), + TP_printk("dev %d:%d agno %u slot %d %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->slot, + __print_symbolic(__entry->found, XFS_BUSY_STATES)) +); + +TRACE_EVENT(xfs_alloc_busysearch, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, + xfs_extlen_t len, int found), + TP_ARGS(mp, agno, agbno, len, found), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + __field(int, found) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + __entry->found = found; + ), + TP_printk("dev %d:%d agno %u agbno %u len %u %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len, + __print_symbolic(__entry->found, XFS_BUSY_STATES)) +); + +TRACE_EVENT(xfs_agf, + TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, + unsigned long caller_ip), + TP_ARGS(mp, agf, flags, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(int, flags) + __field(__u32, length) + __field(__u32, bno_root) + __field(__u32, cnt_root) + __field(__u32, bno_level) + __field(__u32, cnt_level) + __field(__u32, flfirst) + __field(__u32, fllast) + __field(__u32, flcount) + __field(__u32, freeblks) + __field(__u32, longest) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = be32_to_cpu(agf->agf_seqno), + __entry->flags = flags; + __entry->length = be32_to_cpu(agf->agf_length), + __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]), + __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]), + __entry->bno_level = + be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]), + __entry->cnt_level = + be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]), + __entry->flfirst = be32_to_cpu(agf->agf_flfirst), + __entry->fllast = be32_to_cpu(agf->agf_fllast), + __entry->flcount = be32_to_cpu(agf->agf_flcount), + __entry->freeblks = be32_to_cpu(agf->agf_freeblks), + __entry->longest = be32_to_cpu(agf->agf_longest); + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u " + "levels b %u c %u flfirst %u fllast %u flcount %u " + "freeblks %u longest %u caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __print_flags(__entry->flags, "|", XFS_AGF_FLAGS), + __entry->length, + __entry->bno_root, + __entry->cnt_root, + __entry->bno_level, + __entry->cnt_level, + __entry->flfirst, + __entry->fllast, + __entry->flcount, + __entry->freeblks, + __entry->longest, + (void *)__entry->caller_ip) +); + +TRACE_EVENT(xfs_free_extent, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, + xfs_extlen_t len, bool isfl, int haveleft, int haveright), + TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + __field(int, isfl) + __field(int, haveleft) + __field(int, haveright) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + __entry->isfl = isfl; + __entry->haveleft = haveleft; + __entry->haveright = haveright; + ), + TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len, + __entry->isfl, + __entry->haveleft ? + (__entry->haveright ? "both" : "left") : + (__entry->haveright ? "right" : "none")) + +); + +#define DEFINE_ALLOC_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_alloc_arg *args), \ + TP_ARGS(args), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_agnumber_t, agno) \ + __field(xfs_agblock_t, agbno) \ + __field(xfs_extlen_t, minlen) \ + __field(xfs_extlen_t, maxlen) \ + __field(xfs_extlen_t, mod) \ + __field(xfs_extlen_t, prod) \ + __field(xfs_extlen_t, minleft) \ + __field(xfs_extlen_t, total) \ + __field(xfs_extlen_t, alignment) \ + __field(xfs_extlen_t, minalignslop) \ + __field(xfs_extlen_t, len) \ + __field(short, type) \ + __field(short, otype) \ + __field(char, wasdel) \ + __field(char, wasfromfl) \ + __field(char, isfl) \ + __field(char, userdata) \ + __field(xfs_fsblock_t, firstblock) \ + ), \ + TP_fast_assign( \ + __entry->dev = args->mp->m_super->s_dev; \ + __entry->agno = args->agno; \ + __entry->agbno = args->agbno; \ + __entry->minlen = args->minlen; \ + __entry->maxlen = args->maxlen; \ + __entry->mod = args->mod; \ + __entry->prod = args->prod; \ + __entry->minleft = args->minleft; \ + __entry->total = args->total; \ + __entry->alignment = args->alignment; \ + __entry->minalignslop = args->minalignslop; \ + __entry->len = args->len; \ + __entry->type = args->type; \ + __entry->otype = args->otype; \ + __entry->wasdel = args->wasdel; \ + __entry->wasfromfl = args->wasfromfl; \ + __entry->isfl = args->isfl; \ + __entry->userdata = args->userdata; \ + __entry->firstblock = args->firstblock; \ + ), \ + TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \ + "prod %u minleft %u total %u alignment %u minalignslop %u " \ + "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \ + "userdata %d firstblock 0x%llx", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->agno, \ + __entry->agbno, \ + __entry->minlen, \ + __entry->maxlen, \ + __entry->mod, \ + __entry->prod, \ + __entry->minleft, \ + __entry->total, \ + __entry->alignment, \ + __entry->minalignslop, \ + __entry->len, \ + __print_symbolic(__entry->type, XFS_ALLOC_TYPES), \ + __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), \ + __entry->wasdel, \ + __entry->wasfromfl, \ + __entry->isfl, \ + __entry->userdata, \ + __entry->firstblock) \ +) + +DEFINE_ALLOC_EVENT(xfs_alloc_exact_done); +DEFINE_ALLOC_EVENT(xfs_alloc_exact_error); +DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft); +DEFINE_ALLOC_EVENT(xfs_alloc_near_first); +DEFINE_ALLOC_EVENT(xfs_alloc_near_greater); +DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser); +DEFINE_ALLOC_EVENT(xfs_alloc_near_error); +DEFINE_ALLOC_EVENT(xfs_alloc_size_neither); +DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry); +DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft); +DEFINE_ALLOC_EVENT(xfs_alloc_size_done); +DEFINE_ALLOC_EVENT(xfs_alloc_size_error); +DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist); +DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough); +DEFINE_ALLOC_EVENT(xfs_alloc_small_done); +DEFINE_ALLOC_EVENT(xfs_alloc_small_error); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed); + +#define DEFINE_DIR2_TRACE(tname) \ +TRACE_EVENT(tname, \ + TP_PROTO(struct xfs_da_args *args), \ + TP_ARGS(args), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __dynamic_array(char, name, args->namelen) \ + __field(int, namelen) \ + __field(xfs_dahash_t, hashval) \ + __field(xfs_ino_t, inumber) \ + __field(int, op_flags) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \ + __entry->ino = args->dp->i_ino; \ + if (args->namelen) \ + memcpy(__get_str(name), args->name, args->namelen); \ + __entry->namelen = args->namelen; \ + __entry->hashval = args->hashval; \ + __entry->inumber = args->inumber; \ + __entry->op_flags = args->op_flags; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " \ + "inumber 0x%llx op_flags %s", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __entry->namelen, \ + __entry->namelen ? __get_str(name) : NULL, \ + __entry->namelen, \ + __entry->hashval, \ + __entry->inumber, \ + __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) \ +) +DEFINE_DIR2_TRACE(xfs_dir2_sf_addname); +DEFINE_DIR2_TRACE(xfs_dir2_sf_create); +DEFINE_DIR2_TRACE(xfs_dir2_sf_lookup); +DEFINE_DIR2_TRACE(xfs_dir2_sf_replace); +DEFINE_DIR2_TRACE(xfs_dir2_sf_removename); +DEFINE_DIR2_TRACE(xfs_dir2_sf_toino4); +DEFINE_DIR2_TRACE(xfs_dir2_sf_toino8); +DEFINE_DIR2_TRACE(xfs_dir2_sf_to_block); +DEFINE_DIR2_TRACE(xfs_dir2_block_addname); +DEFINE_DIR2_TRACE(xfs_dir2_block_lookup); +DEFINE_DIR2_TRACE(xfs_dir2_block_replace); +DEFINE_DIR2_TRACE(xfs_dir2_block_removename); +DEFINE_DIR2_TRACE(xfs_dir2_block_to_sf); +DEFINE_DIR2_TRACE(xfs_dir2_block_to_leaf); +DEFINE_DIR2_TRACE(xfs_dir2_leaf_addname); +DEFINE_DIR2_TRACE(xfs_dir2_leaf_lookup); +DEFINE_DIR2_TRACE(xfs_dir2_leaf_replace); +DEFINE_DIR2_TRACE(xfs_dir2_leaf_removename); +DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_block); +DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_node); +DEFINE_DIR2_TRACE(xfs_dir2_node_addname); +DEFINE_DIR2_TRACE(xfs_dir2_node_lookup); +DEFINE_DIR2_TRACE(xfs_dir2_node_replace); +DEFINE_DIR2_TRACE(xfs_dir2_node_removename); +DEFINE_DIR2_TRACE(xfs_dir2_node_to_leaf); + +#define DEFINE_DIR2_SPACE_TRACE(tname) \ +TRACE_EVENT(tname, \ + TP_PROTO(struct xfs_da_args *args, int idx), \ + TP_ARGS(args, idx), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_ino_t, ino) \ + __field(int, op_flags) \ + __field(int, idx) \ + ), \ + TP_fast_assign( \ + __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \ + __entry->ino = args->dp->i_ino; \ + __entry->op_flags = args->op_flags; \ + __entry->idx = idx; \ + ), \ + TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, \ + __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), \ + __entry->idx) \ +) +DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_add); +DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_remove); +DEFINE_DIR2_SPACE_TRACE(xfs_dir2_grow_inode); +DEFINE_DIR2_SPACE_TRACE(xfs_dir2_shrink_inode); + +TRACE_EVENT(xfs_dir2_leafn_moveents, + TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count), + TP_ARGS(args, src_idx, dst_idx, count), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(int, op_flags) + __field(int, src_idx) + __field(int, dst_idx) + __field(int, count) + ), + TP_fast_assign( + __entry->dev = VFS_I(args->dp)->i_sb->s_dev; + __entry->ino = args->dp->i_ino; + __entry->op_flags = args->op_flags; + __entry->src_idx = src_idx; + __entry->dst_idx = dst_idx; + __entry->count = count; + ), + TP_printk("dev %d:%d ino 0x%llx op_flags %s " + "src_idx %d dst_idx %d count %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), + __entry->src_idx, + __entry->dst_idx, + __entry->count) +); + +#endif /* _TRACE_XFS_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE xfs_trace +#include <trace/define_trace.h> diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 00cabf5354d..7c220b4227b 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -39,6 +39,10 @@ struct attrlist_cursor_kern; #define IO_ISDIRECT 0x00004 /* bypass page cache */ #define IO_INVIS 0x00020 /* don't update inode timestamps */ +#define XFS_IO_FLAGS \ + { IO_ISDIRECT, "DIRECT" }, \ + { IO_INVIS, "INVIS"} + /* * Flush/Invalidate options for vop_toss/flush/flushinval_pages. */ diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 2f3f2229eaa..d7c7eea09fc 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -47,6 +47,7 @@ #include "xfs_trans_space.h" #include "xfs_trans_priv.h" #include "xfs_qm.h" +#include "xfs_trace.h" /* @@ -112,10 +113,7 @@ xfs_qm_dqinit( init_completion(&dqp->q_flush); complete(&dqp->q_flush); -#ifdef XFS_DQUOT_TRACE - dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS); - xfs_dqtrace_entry(dqp, "DQINIT"); -#endif + trace_xfs_dqinit(dqp); } else { /* * Only the q_core portion was zeroed in dqreclaim_one(). @@ -136,10 +134,7 @@ xfs_qm_dqinit( dqp->q_hash = NULL; ASSERT(dqp->dq_flnext == dqp->dq_flprev); -#ifdef XFS_DQUOT_TRACE - ASSERT(dqp->q_trace); - xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT"); -#endif + trace_xfs_dqreuse(dqp); } /* @@ -167,13 +162,8 @@ xfs_qm_dqdestroy( mutex_destroy(&dqp->q_qlock); sv_destroy(&dqp->q_pinwait); - -#ifdef XFS_DQUOT_TRACE - if (dqp->q_trace) - ktrace_free(dqp->q_trace); - dqp->q_trace = NULL; -#endif kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); + atomic_dec(&xfs_Gqm->qm_totaldquots); } @@ -195,49 +185,6 @@ xfs_qm_dqinit_core( d->dd_diskdq.d_flags = type; } - -#ifdef XFS_DQUOT_TRACE -/* - * Dquot tracing for debugging. - */ -/* ARGSUSED */ -void -__xfs_dqtrace_entry( - xfs_dquot_t *dqp, - char *func, - void *retaddr, - xfs_inode_t *ip) -{ - xfs_dquot_t *udqp = NULL; - xfs_ino_t ino = 0; - - ASSERT(dqp->q_trace); - if (ip) { - ino = ip->i_ino; - udqp = ip->i_udquot; - } - ktrace_enter(dqp->q_trace, - (void *)(__psint_t)DQUOT_KTRACE_ENTRY, - (void *)func, - (void *)(__psint_t)dqp->q_nrefs, - (void *)(__psint_t)dqp->dq_flags, - (void *)(__psint_t)dqp->q_res_bcount, - (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_bcount), - (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_icount), - (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_hardlimit), - (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_softlimit), - (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_hardlimit), - (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_softlimit), - (void *)(__psint_t)be32_to_cpu(dqp->q_core.d_id), - (void *)(__psint_t)current_pid(), - (void *)(__psint_t)ino, - (void *)(__psint_t)retaddr, - (void *)(__psint_t)udqp); - return; -} -#endif - - /* * If default limits are in force, push them into the dquot now. * We overwrite the dquot limits only if they are zero and this @@ -425,7 +372,8 @@ xfs_qm_dqalloc( xfs_trans_t *tp = *tpp; ASSERT(tp != NULL); - xfs_dqtrace_entry(dqp, "DQALLOC"); + + trace_xfs_dqalloc(dqp); /* * Initialize the bmap freelist prior to calling bmapi code. @@ -612,7 +560,8 @@ xfs_qm_dqtobp( * (in which case we already have the buf). */ if (! newdquot) { - xfs_dqtrace_entry(dqp, "DQTOBP READBUF"); + trace_xfs_dqtobp_read(dqp); + if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno, XFS_QI_DQCHUNKLEN(mp), @@ -670,11 +619,12 @@ xfs_qm_dqread( ASSERT(tpp); + trace_xfs_dqread(dqp); + /* * get a pointer to the on-disk dquot and the buffer containing it * dqp already knows its own type (GROUP/USER). */ - xfs_dqtrace_entry(dqp, "DQREAD"); if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) { return (error); } @@ -763,7 +713,7 @@ xfs_qm_idtodq( * or if the dquot didn't exist on disk and we ask to * allocate (ENOENT). */ - xfs_dqtrace_entry(dqp, "DQREAD FAIL"); + trace_xfs_dqread_fail(dqp); cancelflags |= XFS_TRANS_ABORT; goto error0; } @@ -817,7 +767,8 @@ xfs_qm_dqlookup( * id can't be modified without the hashlock anyway. */ if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) { - xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP"); + trace_xfs_dqlookup_found(dqp); + /* * All in core dquots must be on the dqlist of mp */ @@ -827,7 +778,7 @@ xfs_qm_dqlookup( if (dqp->q_nrefs == 0) { ASSERT (XFS_DQ_IS_ON_FREELIST(dqp)); if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { - xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT"); + trace_xfs_dqlookup_want(dqp); /* * We may have raced with dqreclaim_one() @@ -857,8 +808,7 @@ xfs_qm_dqlookup( /* * take it off the freelist */ - xfs_dqtrace_entry(dqp, - "DQLOOKUP: TAKEOFF FL"); + trace_xfs_dqlookup_freelist(dqp); XQM_FREELIST_REMOVE(dqp); /* xfs_qm_freelist_print(&(xfs_Gqm-> qm_dqfreelist), @@ -878,8 +828,7 @@ xfs_qm_dqlookup( */ ASSERT(mutex_is_locked(&qh->qh_lock)); if (dqp->HL_PREVP != &qh->qh_next) { - xfs_dqtrace_entry(dqp, - "DQLOOKUP: HASH MOVETOFRONT"); + trace_xfs_dqlookup_move(dqp); if ((d = dqp->HL_NEXT)) d->HL_PREVP = dqp->HL_PREVP; *(dqp->HL_PREVP) = d; @@ -889,7 +838,7 @@ xfs_qm_dqlookup( dqp->HL_PREVP = &qh->qh_next; qh->qh_next = dqp; } - xfs_dqtrace_entry(dqp, "LOOKUP END"); + trace_xfs_dqlookup_done(dqp); *O_dqpp = dqp; ASSERT(mutex_is_locked(&qh->qh_lock)); return (0); @@ -971,7 +920,7 @@ xfs_qm_dqget( ASSERT(*O_dqpp); ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); mutex_unlock(&h->qh_lock); - xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)"); + trace_xfs_dqget_hit(*O_dqpp); return (0); /* success */ } XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); @@ -1104,7 +1053,7 @@ xfs_qm_dqget( mutex_unlock(&h->qh_lock); dqret: ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); - xfs_dqtrace_entry(dqp, "DQGET DONE"); + trace_xfs_dqget_miss(dqp); *O_dqpp = dqp; return (0); } @@ -1124,7 +1073,8 @@ xfs_qm_dqput( ASSERT(dqp->q_nrefs > 0); ASSERT(XFS_DQ_IS_LOCKED(dqp)); - xfs_dqtrace_entry(dqp, "DQPUT"); + + trace_xfs_dqput(dqp); if (dqp->q_nrefs != 1) { dqp->q_nrefs--; @@ -1137,7 +1087,7 @@ xfs_qm_dqput( * in the right order; but try to get it out-of-order first */ if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { - xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT"); + trace_xfs_dqput_wait(dqp); xfs_dqunlock(dqp); xfs_qm_freelist_lock(xfs_Gqm); xfs_dqlock(dqp); @@ -1148,7 +1098,8 @@ xfs_qm_dqput( /* We can't depend on nrefs being == 1 here */ if (--dqp->q_nrefs == 0) { - xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST"); + trace_xfs_dqput_free(dqp); + /* * insert at end of the freelist. */ @@ -1196,7 +1147,7 @@ xfs_qm_dqrele( if (!dqp) return; - xfs_dqtrace_entry(dqp, "DQRELE"); + trace_xfs_dqrele(dqp); xfs_dqlock(dqp); /* @@ -1229,7 +1180,7 @@ xfs_qm_dqflush( ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(!completion_done(&dqp->q_flush)); - xfs_dqtrace_entry(dqp, "DQFLUSH"); + trace_xfs_dqflush(dqp); /* * If not dirty, or it's pinned and we are not supposed to @@ -1259,7 +1210,6 @@ xfs_qm_dqflush( * the ondisk-dquot has already been allocated for. */ if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { - xfs_dqtrace_entry(dqp, "DQTOBP FAIL"); ASSERT(error != ENOENT); /* * Quotas could have gotten turned off (ESRCH) @@ -1297,7 +1247,7 @@ xfs_qm_dqflush( * get stuck waiting in the write for too long. */ if (XFS_BUF_ISPINNED(bp)) { - xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE"); + trace_xfs_dqflush_force(dqp); xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); } @@ -1308,7 +1258,9 @@ xfs_qm_dqflush( } else { error = xfs_bwrite(mp, bp); } - xfs_dqtrace_entry(dqp, "DQFLUSH END"); + + trace_xfs_dqflush_done(dqp); + /* * dqp is still locked, but caller is free to unlock it now. */ @@ -1483,7 +1435,7 @@ xfs_qm_dqpurge( */ if (XFS_DQ_IS_DIRTY(dqp)) { int error; - xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); + /* dqflush unlocks dqflock */ /* * Given that dqpurge is a very rare occurrence, it is OK diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h index a2c16bcee90..a0f7da586d1 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/quota/xfs_dquot.h @@ -85,9 +85,6 @@ typedef struct xfs_dquot { struct completion q_flush; /* flush completion queue */ atomic_t q_pincount; /* dquot pin count */ wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ -#ifdef XFS_DQUOT_TRACE - struct ktrace *q_trace; /* trace header structure */ -#endif } xfs_dquot_t; @@ -144,24 +141,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp) (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ (XFS_IS_OQUOTA_ON((d)->q_mount)))) -#ifdef XFS_DQUOT_TRACE -/* - * Dquot Tracing stuff. - */ -#define DQUOT_TRACE_SIZE 64 -#define DQUOT_KTRACE_ENTRY 1 - -extern void __xfs_dqtrace_entry(xfs_dquot_t *dqp, char *func, - void *, xfs_inode_t *); -#define xfs_dqtrace_entry_ino(a,b,ip) \ - __xfs_dqtrace_entry((a), (b), (void*)__return_address, (ip)) -#define xfs_dqtrace_entry(a,b) \ - __xfs_dqtrace_entry((a), (b), (void*)__return_address, NULL) -#else -#define xfs_dqtrace_entry(a,b) -#define xfs_dqtrace_entry_ino(a,b,ip) -#endif - #ifdef QUOTADEBUG extern void xfs_qm_dqprint(xfs_dquot_t *); #else diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 45b1bfef738..9e627a8b5b0 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -47,6 +47,7 @@ #include "xfs_trans_space.h" #include "xfs_utils.h" #include "xfs_qm.h" +#include "xfs_trace.h" /* * The global quota manager. There is only one of these for the entire @@ -453,7 +454,7 @@ again: xfs_dqunlock(dqp); continue; } - xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY"); + /* XXX a sentinel would be better */ recl = XFS_QI_MPLRECLAIMS(mp); if (!xfs_dqflock_nowait(dqp)) { @@ -651,7 +652,7 @@ xfs_qm_dqattach_one( */ dqp = *IO_idqpp; if (dqp) { - xfs_dqtrace_entry(dqp, "DQATTACH: found in ip"); + trace_xfs_dqattach_found(dqp); return 0; } @@ -704,7 +705,7 @@ xfs_qm_dqattach_one( if (error) return error; - xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget"); + trace_xfs_dqattach_get(dqp); /* * dqget may have dropped and re-acquired the ilock, but it guarantees @@ -890,15 +891,15 @@ xfs_qm_dqdetach( if (!(ip->i_udquot || ip->i_gdquot)) return; + trace_xfs_dquot_dqdetach(ip); + ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); if (ip->i_udquot) { - xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip); xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } if (ip->i_gdquot) { - xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip); xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } @@ -977,7 +978,6 @@ xfs_qm_sync( * across a disk write */ xfs_qm_mplist_unlock(mp); - xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH"); error = xfs_qm_dqflush(dqp, flush_flags); xfs_dqunlock(dqp); if (error && XFS_FORCED_SHUTDOWN(mp)) @@ -1350,7 +1350,8 @@ xfs_qm_reset_dqcounts( xfs_disk_dquot_t *ddq; int j; - xfs_buftrace("RESET DQUOTS", bp); + trace_xfs_reset_dqcounts(bp, _RET_IP_); + /* * Reset all counters and timers. They'll be * started afresh by xfs_qm_quotacheck. @@ -1543,7 +1544,9 @@ xfs_qm_quotacheck_dqadjust( xfs_qcnt_t rtblks) { ASSERT(XFS_DQ_IS_LOCKED(dqp)); - xfs_dqtrace_entry(dqp, "QCHECK DQADJUST"); + + trace_xfs_dqadjust(dqp); + /* * Adjust the inode count and the block count to reflect this inode's * resource usage. @@ -1994,7 +1997,9 @@ xfs_qm_shake_freelist( */ if (XFS_DQ_IS_DIRTY(dqp)) { int error; - xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY"); + + trace_xfs_dqshake_dirty(dqp); + /* * We flush it delayed write, so don't bother * releasing the mplock. @@ -2038,7 +2043,9 @@ xfs_qm_shake_freelist( return nreclaimed; goto tryagain; } - xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING"); + + trace_xfs_dqshake_unlink(dqp); + #ifdef QUOTADEBUG cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n", dqp, be32_to_cpu(dqp->q_core.d_id)); @@ -2125,7 +2132,9 @@ xfs_qm_dqreclaim_one(void) */ if (dqp->dq_flags & XFS_DQ_WANT) { ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); - xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT"); + + trace_xfs_dqreclaim_want(dqp); + xfs_dqunlock(dqp); xfs_qm_freelist_unlock(xfs_Gqm); if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) @@ -2171,7 +2180,9 @@ xfs_qm_dqreclaim_one(void) */ if (XFS_DQ_IS_DIRTY(dqp)) { int error; - xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY"); + + trace_xfs_dqreclaim_dirty(dqp); + /* * We flush it delayed write, so don't bother * releasing the freelist lock. @@ -2194,8 +2205,9 @@ xfs_qm_dqreclaim_one(void) if (!mutex_trylock(&dqp->q_hash->qh_lock)) goto mplistunlock; + trace_xfs_dqreclaim_unlink(dqp); + ASSERT(dqp->q_nrefs == 0); - xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING"); XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp); XQM_HASHLIST_REMOVE(dqp->q_hash, dqp); XQM_FREELIST_REMOVE(dqp); @@ -2430,7 +2442,7 @@ xfs_qm_vop_dqalloc( } } if (uq) - xfs_dqtrace_entry_ino(uq, "DQALLOC", ip); + trace_xfs_dquot_dqalloc(ip); xfs_iunlock(ip, lockflags); if (O_udqpp) diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 5d1a3b98a6e..71af76fe8a2 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -49,6 +49,7 @@ #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_qm.h" +#include "xfs_trace.h" #ifdef DEBUG # define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args) @@ -496,7 +497,6 @@ xfs_qm_scall_setqlim( ASSERT(error != ENOENT); return (error); } - xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET"); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; @@ -602,7 +602,6 @@ xfs_qm_scall_setqlim( dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); - xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT"); error = xfs_trans_commit(tp, 0); xfs_qm_dqprint(dqp); xfs_qm_dqrele(dqp); @@ -630,7 +629,6 @@ xfs_qm_scall_getquota( return (error); } - xfs_dqtrace_entry(dqp, "Q_GETQUOTA SUCCESS"); /* * If everything's NULL, this dquot doesn't quite exist as far as * our utility programs are concerned. diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c deleted file mode 100644 index 2d494c26717..00000000000 --- a/fs/xfs/support/ktrace.c +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include <xfs.h> - -static kmem_zone_t *ktrace_hdr_zone; -static kmem_zone_t *ktrace_ent_zone; -static int ktrace_zentries; - -void __init -ktrace_init(int zentries) -{ - ktrace_zentries = roundup_pow_of_two(zentries); - - ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), - "ktrace_hdr"); - ASSERT(ktrace_hdr_zone); - - ktrace_ent_zone = kmem_zone_init(ktrace_zentries - * sizeof(ktrace_entry_t), - "ktrace_ent"); - ASSERT(ktrace_ent_zone); -} - -void __exit -ktrace_uninit(void) -{ - kmem_zone_destroy(ktrace_hdr_zone); - kmem_zone_destroy(ktrace_ent_zone); -} - -/* - * ktrace_alloc() - * - * Allocate a ktrace header and enough buffering for the given - * number of entries. Round the number of entries up to a - * power of 2 so we can do fast masking to get the index from - * the atomic index counter. - */ -ktrace_t * -ktrace_alloc(int nentries, unsigned int __nocast sleep) -{ - ktrace_t *ktp; - ktrace_entry_t *ktep; - int entries; - - ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); - - if (ktp == (ktrace_t*)NULL) { - /* - * KM_SLEEP callers don't expect failure. - */ - if (sleep & KM_SLEEP) - panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); - - return NULL; - } - - /* - * Special treatment for buffers with the ktrace_zentries entries - */ - entries = roundup_pow_of_two(nentries); - if (entries == ktrace_zentries) { - ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, - sleep); - } else { - ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)), - sleep | KM_LARGE); - } - - if (ktep == NULL) { - /* - * KM_SLEEP callers don't expect failure. - */ - if (sleep & KM_SLEEP) - panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); - - kmem_free(ktp); - - return NULL; - } - - ktp->kt_entries = ktep; - ktp->kt_nentries = entries; - ASSERT(is_power_of_2(entries)); - ktp->kt_index_mask = entries - 1; - atomic_set(&ktp->kt_index, 0); - ktp->kt_rollover = 0; - return ktp; -} - - -/* - * ktrace_free() - * - * Free up the ktrace header and buffer. It is up to the caller - * to ensure that no-one is referencing it. - */ -void -ktrace_free(ktrace_t *ktp) -{ - if (ktp == (ktrace_t *)NULL) - return; - - /* - * Special treatment for the Vnode trace buffer. - */ - if (ktp->kt_nentries == ktrace_zentries) - kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); - else - kmem_free(ktp->kt_entries); - - kmem_zone_free(ktrace_hdr_zone, ktp); -} - - -/* - * Enter the given values into the "next" entry in the trace buffer. - * kt_index is always the index of the next entry to be filled. - */ -void -ktrace_enter( - ktrace_t *ktp, - void *val0, - void *val1, - void *val2, - void *val3, - void *val4, - void *val5, - void *val6, - void *val7, - void *val8, - void *val9, - void *val10, - void *val11, - void *val12, - void *val13, - void *val14, - void *val15) -{ - int index; - ktrace_entry_t *ktep; - - ASSERT(ktp != NULL); - - /* - * Grab an entry by pushing the index up to the next one. - */ - index = atomic_add_return(1, &ktp->kt_index); - index = (index - 1) & ktp->kt_index_mask; - if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) - ktp->kt_rollover = 1; - - ASSERT((index >= 0) && (index < ktp->kt_nentries)); - - ktep = &(ktp->kt_entries[index]); - - ktep->val[0] = val0; - ktep->val[1] = val1; - ktep->val[2] = val2; - ktep->val[3] = val3; - ktep->val[4] = val4; - ktep->val[5] = val5; - ktep->val[6] = val6; - ktep->val[7] = val7; - ktep->val[8] = val8; - ktep->val[9] = val9; - ktep->val[10] = val10; - ktep->val[11] = val11; - ktep->val[12] = val12; - ktep->val[13] = val13; - ktep->val[14] = val14; - ktep->val[15] = val15; -} - -/* - * Return the number of entries in the trace buffer. - */ -int -ktrace_nentries( - ktrace_t *ktp) -{ - int index; - if (ktp == NULL) - return 0; - - index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; - return (ktp->kt_rollover ? ktp->kt_nentries : index); -} - -/* - * ktrace_first() - * - * This is used to find the start of the trace buffer. - * In conjunction with ktrace_next() it can be used to - * iterate through the entire trace buffer. This code does - * not do any locking because it is assumed that it is called - * from the debugger. - * - * The caller must pass in a pointer to a ktrace_snap - * structure in which we will keep some state used to - * iterate through the buffer. This state must not touched - * by any code outside of this module. - */ -ktrace_entry_t * -ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) -{ - ktrace_entry_t *ktep; - int index; - int nentries; - - if (ktp->kt_rollover) - index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; - else - index = 0; - - ktsp->ks_start = index; - ktep = &(ktp->kt_entries[index]); - - nentries = ktrace_nentries(ktp); - index++; - if (index < nentries) { - ktsp->ks_index = index; - } else { - ktsp->ks_index = 0; - if (index > nentries) - ktep = NULL; - } - return ktep; -} - -/* - * ktrace_next() - * - * This is used to iterate through the entries of the given - * trace buffer. The caller must pass in the ktrace_snap_t - * structure initialized by ktrace_first(). The return value - * will be either a pointer to the next ktrace_entry or NULL - * if all of the entries have been traversed. - */ -ktrace_entry_t * -ktrace_next( - ktrace_t *ktp, - ktrace_snap_t *ktsp) -{ - int index; - ktrace_entry_t *ktep; - - index = ktsp->ks_index; - if (index == ktsp->ks_start) { - ktep = NULL; - } else { - ktep = &ktp->kt_entries[index]; - } - - index++; - if (index == ktrace_nentries(ktp)) { - ktsp->ks_index = 0; - } else { - ktsp->ks_index = index; - } - - return ktep; -} - -/* - * ktrace_skip() - * - * Skip the next "count" entries and return the entry after that. - * Return NULL if this causes us to iterate past the beginning again. - */ -ktrace_entry_t * -ktrace_skip( - ktrace_t *ktp, - int count, - ktrace_snap_t *ktsp) -{ - int index; - int new_index; - ktrace_entry_t *ktep; - int nentries = ktrace_nentries(ktp); - - index = ktsp->ks_index; - new_index = index + count; - while (new_index >= nentries) { - new_index -= nentries; - } - if (index == ktsp->ks_start) { - /* - * We've iterated around to the start, so we're done. - */ - ktep = NULL; - } else if ((new_index < index) && (index < ktsp->ks_index)) { - /* - * We've skipped past the start again, so we're done. - */ - ktep = NULL; - ktsp->ks_index = ktsp->ks_start; - } else { - ktep = &(ktp->kt_entries[new_index]); - new_index++; - if (new_index == nentries) { - ktsp->ks_index = 0; - } else { - ktsp->ks_index = new_index; - } - } - return ktep; -} diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h deleted file mode 100644 index 741d6947ca6..00000000000 --- a/fs/xfs/support/ktrace.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_SUPPORT_KTRACE_H__ -#define __XFS_SUPPORT_KTRACE_H__ - -/* - * Trace buffer entry structure. - */ -typedef struct ktrace_entry { - void *val[16]; -} ktrace_entry_t; - -/* - * Trace buffer header structure. - */ -typedef struct ktrace { - int kt_nentries; /* number of entries in trace buf */ - atomic_t kt_index; /* current index in entries */ - unsigned int kt_index_mask; - int kt_rollover; - ktrace_entry_t *kt_entries; /* buffer of entries */ -} ktrace_t; - -/* - * Trace buffer snapshot structure. - */ -typedef struct ktrace_snap { - int ks_start; /* kt_index at time of snap */ - int ks_index; /* current index */ -} ktrace_snap_t; - - -#ifdef CONFIG_XFS_TRACE - -extern void ktrace_init(int zentries); -extern void ktrace_uninit(void); - -extern ktrace_t *ktrace_alloc(int, unsigned int __nocast); -extern void ktrace_free(ktrace_t *); - -extern void ktrace_enter( - ktrace_t *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *); - -extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *); -extern int ktrace_nentries(ktrace_t *); -extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *); -extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *); - -#else -#define ktrace_init(x) do { } while (0) -#define ktrace_uninit() do { } while (0) -#endif /* CONFIG_XFS_TRACE */ - -#endif /* __XFS_SUPPORT_KTRACE_H__ */ diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h index 17254b529c5..5ad8ad3a1dc 100644 --- a/fs/xfs/xfs.h +++ b/fs/xfs/xfs.h @@ -25,21 +25,5 @@ /* #define QUOTADEBUG 1 */ #endif -#ifdef CONFIG_XFS_TRACE -#define XFS_ALLOC_TRACE 1 -#define XFS_ATTR_TRACE 1 -#define XFS_BLI_TRACE 1 -#define XFS_BMAP_TRACE 1 -#define XFS_BTREE_TRACE 1 -#define XFS_DIR2_TRACE 1 -#define XFS_DQUOT_TRACE 1 -#define XFS_ILOCK_TRACE 1 -#define XFS_LOG_TRACE 1 -#define XFS_RW_TRACE 1 -#define XFS_BUF_TRACE 1 -#define XFS_INODE_TRACE 1 -#define XFS_FILESTREAMS_TRACE 1 -#endif - #include <linux-2.6/xfs_linux.h> #endif /* __XFS_H__ */ diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index a5d54bf4931..6702bd86581 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -86,6 +86,20 @@ typedef struct xfs_agf { #define XFS_AGF_NUM_BITS 12 #define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) +#define XFS_AGF_FLAGS \ + { XFS_AGF_MAGICNUM, "MAGICNUM" }, \ + { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \ + { XFS_AGF_SEQNO, "SEQNO" }, \ + { XFS_AGF_LENGTH, "LENGTH" }, \ + { XFS_AGF_ROOTS, "ROOTS" }, \ + { XFS_AGF_LEVELS, "LEVELS" }, \ + { XFS_AGF_FLFIRST, "FLFIRST" }, \ + { XFS_AGF_FLLAST, "FLLAST" }, \ + { XFS_AGF_FLCOUNT, "FLCOUNT" }, \ + { XFS_AGF_FREEBLKS, "FREEBLKS" }, \ + { XFS_AGF_LONGEST, "LONGEST" }, \ + { XFS_AGF_BTREEBLKS, "BTREEBLKS" } + /* disk block (xfs_daddr_t) in the AG */ #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) #define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 2cf944eb796..a1c65fc6d9c 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -38,6 +38,7 @@ #include "xfs_ialloc.h" #include "xfs_alloc.h" #include "xfs_error.h" +#include "xfs_trace.h" #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) @@ -51,30 +52,6 @@ xfs_alloc_search_busy(xfs_trans_t *tp, xfs_agblock_t bno, xfs_extlen_t len); -#if defined(XFS_ALLOC_TRACE) -ktrace_t *xfs_alloc_trace_buf; - -#define TRACE_ALLOC(s,a) \ - xfs_alloc_trace_alloc(__func__, s, a, __LINE__) -#define TRACE_FREE(s,a,b,x,f) \ - xfs_alloc_trace_free(__func__, s, mp, a, b, x, f, __LINE__) -#define TRACE_MODAGF(s,a,f) \ - xfs_alloc_trace_modagf(__func__, s, mp, a, f, __LINE__) -#define TRACE_BUSY(__func__,s,ag,agb,l,sl,tp) \ - xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__) -#define TRACE_UNBUSY(__func__,s,ag,sl,tp) \ - xfs_alloc_trace_busy(__func__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__) -#define TRACE_BUSYSEARCH(__func__,s,ag,agb,l,tp) \ - xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, 0, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__) -#else -#define TRACE_ALLOC(s,a) -#define TRACE_FREE(s,a,b,x,f) -#define TRACE_MODAGF(s,a,f) -#define TRACE_BUSY(s,a,ag,agb,l,sl,tp) -#define TRACE_UNBUSY(fname,s,ag,sl,tp) -#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,tp) -#endif /* XFS_ALLOC_TRACE */ - /* * Prototypes for per-ag allocation routines */ @@ -498,124 +475,6 @@ xfs_alloc_read_agfl( return 0; } -#if defined(XFS_ALLOC_TRACE) -/* - * Add an allocation trace entry for an alloc call. - */ -STATIC void -xfs_alloc_trace_alloc( - const char *name, /* function tag string */ - char *str, /* additional string */ - xfs_alloc_arg_t *args, /* allocation argument structure */ - int line) /* source line number */ -{ - ktrace_enter(xfs_alloc_trace_buf, - (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)), - (void *)name, - (void *)str, - (void *)args->mp, - (void *)(__psunsigned_t)args->agno, - (void *)(__psunsigned_t)args->agbno, - (void *)(__psunsigned_t)args->minlen, - (void *)(__psunsigned_t)args->maxlen, - (void *)(__psunsigned_t)args->mod, - (void *)(__psunsigned_t)args->prod, - (void *)(__psunsigned_t)args->minleft, - (void *)(__psunsigned_t)args->total, - (void *)(__psunsigned_t)args->alignment, - (void *)(__psunsigned_t)args->len, - (void *)((((__psint_t)args->type) << 16) | - (__psint_t)args->otype), - (void *)(__psint_t)((args->wasdel << 3) | - (args->wasfromfl << 2) | - (args->isfl << 1) | - (args->userdata << 0))); -} - -/* - * Add an allocation trace entry for a free call. - */ -STATIC void -xfs_alloc_trace_free( - const char *name, /* function tag string */ - char *str, /* additional string */ - xfs_mount_t *mp, /* file system mount point */ - xfs_agnumber_t agno, /* allocation group number */ - xfs_agblock_t agbno, /* a.g. relative block number */ - xfs_extlen_t len, /* length of extent */ - int isfl, /* set if is freelist allocation/free */ - int line) /* source line number */ -{ - ktrace_enter(xfs_alloc_trace_buf, - (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)), - (void *)name, - (void *)str, - (void *)mp, - (void *)(__psunsigned_t)agno, - (void *)(__psunsigned_t)agbno, - (void *)(__psunsigned_t)len, - (void *)(__psint_t)isfl, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -} - -/* - * Add an allocation trace entry for modifying an agf. - */ -STATIC void -xfs_alloc_trace_modagf( - const char *name, /* function tag string */ - char *str, /* additional string */ - xfs_mount_t *mp, /* file system mount point */ - xfs_agf_t *agf, /* new agf value */ - int flags, /* logging flags for agf */ - int line) /* source line number */ -{ - ktrace_enter(xfs_alloc_trace_buf, - (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)), - (void *)name, - (void *)str, - (void *)mp, - (void *)(__psint_t)flags, - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_length), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks), - (void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest)); -} - -STATIC void -xfs_alloc_trace_busy( - const char *name, /* function tag string */ - char *str, /* additional string */ - xfs_mount_t *mp, /* file system mount point */ - xfs_agnumber_t agno, /* allocation group number */ - xfs_agblock_t agbno, /* a.g. relative block number */ - xfs_extlen_t len, /* length of extent */ - int slot, /* perag Busy slot */ - xfs_trans_t *tp, - int trtype, /* type: add, delete, search */ - int line) /* source line number */ -{ - ktrace_enter(xfs_alloc_trace_buf, - (void *)(__psint_t)(trtype | (line << 16)), - (void *)name, - (void *)str, - (void *)mp, - (void *)(__psunsigned_t)agno, - (void *)(__psunsigned_t)agbno, - (void *)(__psunsigned_t)len, - (void *)(__psint_t)slot, - (void *)tp, - NULL, NULL, NULL, NULL, NULL, NULL, NULL); -} -#endif /* XFS_ALLOC_TRACE */ - /* * Allocation group level functions. */ @@ -665,9 +524,6 @@ xfs_alloc_ag_vextent( */ if (args->agbno != NULLAGBLOCK) { xfs_agf_t *agf; /* allocation group freelist header */ -#ifdef XFS_ALLOC_TRACE - xfs_mount_t *mp = args->mp; -#endif long slen = (long)args->len; ASSERT(args->len >= args->minlen && args->len <= args->maxlen); @@ -682,7 +538,6 @@ xfs_alloc_ag_vextent( args->pag->pagf_freeblks -= args->len; ASSERT(be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length)); - TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); xfs_alloc_log_agf(args->tp, args->agbp, XFS_AGF_FREEBLKS); /* search the busylist for these blocks */ @@ -792,13 +647,14 @@ xfs_alloc_ag_vextent_exact( } xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); - TRACE_ALLOC("normal", args); + + trace_xfs_alloc_exact_done(args); args->wasfromfl = 0; return 0; error0: xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); - TRACE_ALLOC("error", args); + trace_xfs_alloc_exact_error(args); return error; } @@ -958,7 +814,7 @@ xfs_alloc_ag_vextent_near( args->len = blen; if (!xfs_alloc_fix_minleft(args)) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); - TRACE_ALLOC("nominleft", args); + trace_xfs_alloc_near_nominleft(args); return 0; } blen = args->len; @@ -981,7 +837,8 @@ xfs_alloc_ag_vextent_near( goto error0; xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); - TRACE_ALLOC("first", args); + + trace_xfs_alloc_near_first(args); return 0; } /* @@ -1272,7 +1129,7 @@ xfs_alloc_ag_vextent_near( * If we couldn't get anything, give up. */ if (bno_cur_lt == NULL && bno_cur_gt == NULL) { - TRACE_ALLOC("neither", args); + trace_xfs_alloc_size_neither(args); args->agbno = NULLAGBLOCK; return 0; } @@ -1299,7 +1156,7 @@ xfs_alloc_ag_vextent_near( args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); xfs_alloc_fix_len(args); if (!xfs_alloc_fix_minleft(args)) { - TRACE_ALLOC("nominleft", args); + trace_xfs_alloc_near_nominleft(args); xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); return 0; @@ -1314,13 +1171,18 @@ xfs_alloc_ag_vextent_near( if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, ltnew, rlen, XFSA_FIXUP_BNO_OK))) goto error0; - TRACE_ALLOC(j ? "gt" : "lt", args); + + if (j) + trace_xfs_alloc_near_greater(args); + else + trace_xfs_alloc_near_lesser(args); + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); return 0; error0: - TRACE_ALLOC("error", args); + trace_xfs_alloc_near_error(args); if (cnt_cur != NULL) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); if (bno_cur_lt != NULL) @@ -1371,7 +1233,7 @@ xfs_alloc_ag_vextent_size( goto error0; if (i == 0 || flen == 0) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); - TRACE_ALLOC("noentry", args); + trace_xfs_alloc_size_noentry(args); return 0; } ASSERT(i == 1); @@ -1448,7 +1310,7 @@ xfs_alloc_ag_vextent_size( xfs_alloc_fix_len(args); if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); - TRACE_ALLOC("nominleft", args); + trace_xfs_alloc_size_nominleft(args); args->agbno = NULLAGBLOCK; return 0; } @@ -1471,11 +1333,11 @@ xfs_alloc_ag_vextent_size( args->agbno + args->len <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), error0); - TRACE_ALLOC("normal", args); + trace_xfs_alloc_size_done(args); return 0; error0: - TRACE_ALLOC("error", args); + trace_xfs_alloc_size_error(args); if (cnt_cur) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); if (bno_cur) @@ -1534,7 +1396,7 @@ xfs_alloc_ag_vextent_small( be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), error0); args->wasfromfl = 1; - TRACE_ALLOC("freelist", args); + trace_xfs_alloc_small_freelist(args); *stat = 0; return 0; } @@ -1556,17 +1418,17 @@ xfs_alloc_ag_vextent_small( */ if (flen < args->minlen) { args->agbno = NULLAGBLOCK; - TRACE_ALLOC("notenough", args); + trace_xfs_alloc_small_notenough(args); flen = 0; } *fbnop = fbno; *flenp = flen; *stat = 1; - TRACE_ALLOC("normal", args); + trace_xfs_alloc_small_done(args); return 0; error0: - TRACE_ALLOC("error", args); + trace_xfs_alloc_small_error(args); return error; } @@ -1809,17 +1671,14 @@ xfs_free_ag_extent( be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length), error0); - TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); if (!isfl) xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); XFS_STATS_INC(xs_freex); XFS_STATS_ADD(xs_freeb, len); } - TRACE_FREE(haveleft ? - (haveright ? "both" : "left") : - (haveright ? "right" : "none"), - agno, bno, len, isfl); + + trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright); /* * Since blocks move to the free list without the coordination @@ -1836,7 +1695,7 @@ xfs_free_ag_extent( return 0; error0: - TRACE_FREE("error", agno, bno, len, isfl); + trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1); if (bno_cur) xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); if (cnt_cur) @@ -2122,7 +1981,6 @@ xfs_alloc_get_freelist( logflags |= XFS_AGF_BTREEBLKS; } - TRACE_MODAGF(NULL, agf, logflags); xfs_alloc_log_agf(tp, agbp, logflags); *bnop = bno; @@ -2165,6 +2023,8 @@ xfs_alloc_log_agf( sizeof(xfs_agf_t) }; + trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_); + xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); } @@ -2230,13 +2090,11 @@ xfs_alloc_put_freelist( logflags |= XFS_AGF_BTREEBLKS; } - TRACE_MODAGF(NULL, agf, logflags); xfs_alloc_log_agf(tp, agbp, logflags); ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)]; *blockp = cpu_to_be32(bno); - TRACE_MODAGF(NULL, agf, logflags); xfs_alloc_log_agf(tp, agbp, logflags); xfs_trans_log_buf(tp, agflbp, (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl), @@ -2399,7 +2257,7 @@ xfs_alloc_vextent( args->minlen > args->maxlen || args->minlen > agsize || args->mod >= args->prod) { args->fsbno = NULLFSBLOCK; - TRACE_ALLOC("badargs", args); + trace_xfs_alloc_vextent_badargs(args); return 0; } minleft = args->minleft; @@ -2418,12 +2276,12 @@ xfs_alloc_vextent( error = xfs_alloc_fix_freelist(args, 0); args->minleft = minleft; if (error) { - TRACE_ALLOC("nofix", args); + trace_xfs_alloc_vextent_nofix(args); goto error0; } if (!args->agbp) { up_read(&mp->m_peraglock); - TRACE_ALLOC("noagbp", args); + trace_xfs_alloc_vextent_noagbp(args); break; } args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); @@ -2488,7 +2346,7 @@ xfs_alloc_vextent( error = xfs_alloc_fix_freelist(args, flags); args->minleft = minleft; if (error) { - TRACE_ALLOC("nofix", args); + trace_xfs_alloc_vextent_nofix(args); goto error0; } /* @@ -2499,7 +2357,9 @@ xfs_alloc_vextent( goto error0; break; } - TRACE_ALLOC("loopfailed", args); + + trace_xfs_alloc_vextent_loopfailed(args); + /* * Didn't work, figure out the next iteration. */ @@ -2526,7 +2386,7 @@ xfs_alloc_vextent( if (args->agno == sagno) { if (no_min == 1) { args->agbno = NULLAGBLOCK; - TRACE_ALLOC("allfailed", args); + trace_xfs_alloc_vextent_allfailed(args); break; } if (flags == 0) { @@ -2642,16 +2502,16 @@ xfs_alloc_mark_busy(xfs_trans_t *tp, } } + trace_xfs_alloc_busy(mp, agno, bno, len, n); + if (n < XFS_PAGB_NUM_SLOTS) { bsy = &mp->m_perag[agno].pagb_list[n]; mp->m_perag[agno].pagb_count++; - TRACE_BUSY("xfs_alloc_mark_busy", "got", agno, bno, len, n, tp); bsy->busy_start = bno; bsy->busy_length = len; bsy->busy_tp = tp; xfs_trans_add_busy(tp, agno, n); } else { - TRACE_BUSY("xfs_alloc_mark_busy", "FULL", agno, bno, len, -1, tp); /* * The busy list is full! Since it is now not possible to * track the free block, make this a synchronous transaction @@ -2678,12 +2538,12 @@ xfs_alloc_clear_busy(xfs_trans_t *tp, list = mp->m_perag[agno].pagb_list; ASSERT(idx < XFS_PAGB_NUM_SLOTS); + + trace_xfs_alloc_unbusy(mp, agno, idx, list[idx].busy_tp == tp); + if (list[idx].busy_tp == tp) { - TRACE_UNBUSY("xfs_alloc_clear_busy", "found", agno, idx, tp); list[idx].busy_tp = NULL; mp->m_perag[agno].pagb_count--; - } else { - TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp); } spin_unlock(&mp->m_perag[agno].pagb_lock); @@ -2724,24 +2584,22 @@ xfs_alloc_search_busy(xfs_trans_t *tp, if ((bno > bend) || (uend < bsy->busy_start)) { cnt--; } else { - TRACE_BUSYSEARCH("xfs_alloc_search_busy", - "found1", agno, bno, len, tp); break; } } } + trace_xfs_alloc_busysearch(mp, agno, bno, len, !!cnt); + /* * If a block was found, force the log through the LSN of the * transaction that freed the block */ if (cnt) { - TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp); lsn = bsy->busy_tp->t_commit_lsn; spin_unlock(&mp->m_perag[agno].pagb_lock); xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); } else { - TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp); spin_unlock(&mp->m_perag[agno].pagb_lock); } } diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h index e704caee10d..599bffa3978 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/xfs_alloc.h @@ -37,6 +37,15 @@ typedef enum xfs_alloctype XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */ } xfs_alloctype_t; +#define XFS_ALLOC_TYPES \ + { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \ + { XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \ + { XFS_ALLOCTYPE_START_AG, "START_AG" }, \ + { XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \ + { XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \ + { XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \ + { XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" } + /* * Flags for xfs_alloc_fix_freelist. */ @@ -109,24 +118,6 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp, #ifdef __KERNEL__ -#if defined(XFS_ALLOC_TRACE) -/* - * Allocation tracing buffer size. - */ -#define XFS_ALLOC_TRACE_SIZE 4096 -extern ktrace_t *xfs_alloc_trace_buf; - -/* - * Types for alloc tracing. - */ -#define XFS_ALLOC_KTRACE_ALLOC 1 -#define XFS_ALLOC_KTRACE_FREE 2 -#define XFS_ALLOC_KTRACE_MODAGF 3 -#define XFS_ALLOC_KTRACE_BUSY 4 -#define XFS_ALLOC_KTRACE_UNBUSY 5 -#define XFS_ALLOC_KTRACE_BUSYSEARCH 6 -#endif - void xfs_alloc_mark_busy(xfs_trans_t *tp, xfs_agnumber_t agno, diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index c10c3a292d3..adbd9141aea 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -39,6 +39,7 @@ #include "xfs_ialloc.h" #include "xfs_alloc.h" #include "xfs_error.h" +#include "xfs_trace.h" STATIC struct xfs_btree_cur * diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 8fe6f6b78a4..e953b6cfb2a 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -47,6 +47,7 @@ #include "xfs_trans_space.h" #include "xfs_rw.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" /* * xfs_attr.c @@ -89,10 +90,6 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args); #define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */ -#if defined(XFS_ATTR_TRACE) -ktrace_t *xfs_attr_trace_buf; -#endif - STATIC int xfs_attr_name_to_xname( struct xfs_name *xname, @@ -640,7 +637,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context) return EIO; xfs_ilock(dp, XFS_ILOCK_SHARED); - xfs_attr_trace_l_c("syscall start", context); /* * Decide on what work routines to call based on the inode size. @@ -656,7 +652,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context) } xfs_iunlock(dp, XFS_ILOCK_SHARED); - xfs_attr_trace_l_c("syscall end", context); return error; } @@ -702,7 +697,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags, context->count * sizeof(alist->al_offset[0]); context->firstu -= ATTR_ENTSIZE(namelen); if (context->firstu < arraytop) { - xfs_attr_trace_l_c("buffer full", context); + trace_xfs_attr_list_full(context); alist->al_more = 1; context->seen_enough = 1; return 1; @@ -714,7 +709,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags, aep->a_name[namelen] = 0; alist->al_offset[context->count++] = context->firstu; alist->al_count = context->count; - xfs_attr_trace_l_c("add", context); + trace_xfs_attr_list_add(context); return 0; } @@ -1853,7 +1848,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) node = bp->data; switch (be16_to_cpu(node->hdr.info.magic)) { case XFS_DA_NODE_MAGIC: - xfs_attr_trace_l_cn("wrong blk", context, node); + trace_xfs_attr_list_wrong_blk(context); xfs_da_brelse(NULL, bp); bp = NULL; break; @@ -1861,20 +1856,18 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) leaf = bp->data; if (cursor->hashval > be32_to_cpu(leaf->entries[ be16_to_cpu(leaf->hdr.count)-1].hashval)) { - xfs_attr_trace_l_cl("wrong blk", - context, leaf); + trace_xfs_attr_list_wrong_blk(context); xfs_da_brelse(NULL, bp); bp = NULL; } else if (cursor->hashval <= be32_to_cpu(leaf->entries[0].hashval)) { - xfs_attr_trace_l_cl("maybe wrong blk", - context, leaf); + trace_xfs_attr_list_wrong_blk(context); xfs_da_brelse(NULL, bp); bp = NULL; } break; default: - xfs_attr_trace_l_c("wrong blk - ??", context); + trace_xfs_attr_list_wrong_blk(context); xfs_da_brelse(NULL, bp); bp = NULL; } @@ -1919,8 +1912,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) if (cursor->hashval <= be32_to_cpu(btree->hashval)) { cursor->blkno = be32_to_cpu(btree->before); - xfs_attr_trace_l_cb("descending", - context, btree); + trace_xfs_attr_list_node_descend(context, + btree); break; } } @@ -2270,85 +2263,3 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) } return(0); } - -#if defined(XFS_ATTR_TRACE) -/* - * Add a trace buffer entry for an attr_list context structure. - */ -void -xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context) -{ - xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, context, - (__psunsigned_t)NULL, - (__psunsigned_t)NULL, - (__psunsigned_t)NULL); -} - -/* - * Add a trace buffer entry for a context structure and a Btree node. - */ -void -xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, - struct xfs_da_intnode *node) -{ - xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, context, - (__psunsigned_t)be16_to_cpu(node->hdr.count), - (__psunsigned_t)be32_to_cpu(node->btree[0].hashval), - (__psunsigned_t)be32_to_cpu(node->btree[ - be16_to_cpu(node->hdr.count)-1].hashval)); -} - -/* - * Add a trace buffer entry for a context structure and a Btree element. - */ -void -xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, - struct xfs_da_node_entry *btree) -{ - xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, context, - (__psunsigned_t)be32_to_cpu(btree->hashval), - (__psunsigned_t)be32_to_cpu(btree->before), - (__psunsigned_t)NULL); -} - -/* - * Add a trace buffer entry for a context structure and a leaf block. - */ -void -xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, - struct xfs_attr_leafblock *leaf) -{ - xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, context, - (__psunsigned_t)be16_to_cpu(leaf->hdr.count), - (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval), - (__psunsigned_t)be32_to_cpu(leaf->entries[ - be16_to_cpu(leaf->hdr.count)-1].hashval)); -} - -/* - * Add a trace buffer entry for the arguments given to the routine, - * generic form. - */ -void -xfs_attr_trace_enter(int type, char *where, - struct xfs_attr_list_context *context, - __psunsigned_t a13, __psunsigned_t a14, - __psunsigned_t a15) -{ - ASSERT(xfs_attr_trace_buf); - ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type), - (void *)((__psunsigned_t)where), - (void *)((__psunsigned_t)context->dp), - (void *)((__psunsigned_t)context->cursor->hashval), - (void *)((__psunsigned_t)context->cursor->blkno), - (void *)((__psunsigned_t)context->cursor->offset), - (void *)((__psunsigned_t)context->alist), - (void *)((__psunsigned_t)context->bufsize), - (void *)((__psunsigned_t)context->count), - (void *)((__psunsigned_t)context->firstu), - NULL, - (void *)((__psunsigned_t)context->dupcnt), - (void *)((__psunsigned_t)context->flags), - (void *)a13, (void *)a14, (void *)a15); -} -#endif /* XFS_ATTR_TRACE */ diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h index 12f0be3a73d..59b410ce69a 100644 --- a/fs/xfs/xfs_attr.h +++ b/fs/xfs/xfs_attr.h @@ -48,6 +48,16 @@ struct xfs_attr_list_context; #define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */ #define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */ +#define XFS_ATTR_FLAGS \ + { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \ + { ATTR_ROOT, "ROOT" }, \ + { ATTR_TRUST, "TRUST" }, \ + { ATTR_SECURE, "SECURE" }, \ + { ATTR_CREATE, "CREATE" }, \ + { ATTR_REPLACE, "REPLACE" }, \ + { ATTR_KERNOTIME, "KERNOTIME" }, \ + { ATTR_KERNOVAL, "KERNOVAL" } + /* * The maximum size (into the kernel or returned from the kernel) of an * attribute value or the buffer used for an attr_list() call. Larger diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index 0b687351293..baf41b5af75 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -42,6 +42,7 @@ #include "xfs_attr.h" #include "xfs_attr_leaf.h" #include "xfs_error.h" +#include "xfs_trace.h" /* * xfs_attr_leaf.c @@ -594,7 +595,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) cursor = context->cursor; ASSERT(cursor != NULL); - xfs_attr_trace_l_c("sf start", context); + trace_xfs_attr_list_sf(context); /* * If the buffer is large enough and the cursor is at the start, @@ -627,7 +628,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) return error; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); } - xfs_attr_trace_l_c("sf big-gulp", context); + trace_xfs_attr_list_sf_all(context); return(0); } @@ -653,7 +654,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, sfe); - xfs_attr_trace_l_c("sf corrupted", context); kmem_free(sbuf); return XFS_ERROR(EFSCORRUPTED); } @@ -693,7 +693,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) } if (i == nsbuf) { kmem_free(sbuf); - xfs_attr_trace_l_c("blk end", context); return(0); } @@ -719,7 +718,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) } kmem_free(sbuf); - xfs_attr_trace_l_c("sf E-O-F", context); return(0); } @@ -2323,7 +2321,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) cursor = context->cursor; cursor->initted = 1; - xfs_attr_trace_l_cl("blk start", context, leaf); + trace_xfs_attr_list_leaf(context); /* * Re-find our place in the leaf block if this is a new syscall. @@ -2344,7 +2342,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) } } if (i == be16_to_cpu(leaf->hdr.count)) { - xfs_attr_trace_l_c("not found", context); + trace_xfs_attr_list_notfound(context); return(0); } } else { @@ -2419,7 +2417,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) break; cursor->offset++; } - xfs_attr_trace_l_cl("blk end", context, leaf); + trace_xfs_attr_list_leaf_end(context); return(retval); } diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h index ea22839caed..76ab7b0cbb3 100644 --- a/fs/xfs/xfs_attr_sf.h +++ b/fs/xfs/xfs_attr_sf.h @@ -25,8 +25,6 @@ * to fit into the literal area of the inode. */ -struct xfs_inode; - /* * Entries are packed toward the top as tight as possible. */ @@ -69,42 +67,4 @@ typedef struct xfs_attr_sf_sort { (be16_to_cpu(((xfs_attr_shortform_t *) \ ((dp)->i_afp->if_u1.if_data))->hdr.totsize)) -#if defined(XFS_ATTR_TRACE) -/* - * Kernel tracing support for attribute lists - */ -struct xfs_attr_list_context; -struct xfs_da_intnode; -struct xfs_da_node_entry; -struct xfs_attr_leafblock; - -#define XFS_ATTR_TRACE_SIZE 4096 /* size of global trace buffer */ -extern ktrace_t *xfs_attr_trace_buf; - -/* - * Trace record types. - */ -#define XFS_ATTR_KTRACE_L_C 1 /* context */ -#define XFS_ATTR_KTRACE_L_CN 2 /* context, node */ -#define XFS_ATTR_KTRACE_L_CB 3 /* context, btree */ -#define XFS_ATTR_KTRACE_L_CL 4 /* context, leaf */ - -void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context); -void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, - struct xfs_da_intnode *node); -void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, - struct xfs_da_node_entry *btree); -void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, - struct xfs_attr_leafblock *leaf); -void xfs_attr_trace_enter(int type, char *where, - struct xfs_attr_list_context *context, - __psunsigned_t a13, __psunsigned_t a14, - __psunsigned_t a15); -#else -#define xfs_attr_trace_l_c(w,c) -#define xfs_attr_trace_l_cn(w,c,n) -#define xfs_attr_trace_l_cb(w,c,b) -#define xfs_attr_trace_l_cl(w,c,l) -#endif /* XFS_ATTR_TRACE */ - #endif /* __XFS_ATTR_SF_H__ */ diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 5874098ca9c..98251cdc52a 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -54,6 +54,7 @@ #include "xfs_buf_item.h" #include "xfs_filestream.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" #ifdef DEBUG @@ -272,71 +273,6 @@ xfs_bmap_isaeof( int whichfork, /* data or attribute fork */ char *aeof); /* return value */ -#ifdef XFS_BMAP_TRACE -/* - * Add bmap trace entry prior to a call to xfs_iext_remove. - */ -STATIC void -xfs_bmap_trace_delete( - const char *fname, /* function name */ - char *desc, /* operation description */ - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* index of entry(entries) deleted */ - xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */ - int whichfork); /* data or attr fork */ - -/* - * Add bmap trace entry prior to a call to xfs_iext_insert, or - * reading in the extents list from the disk (in the btree). - */ -STATIC void -xfs_bmap_trace_insert( - const char *fname, /* function name */ - char *desc, /* operation description */ - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* index of entry(entries) inserted */ - xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */ - xfs_bmbt_irec_t *r1, /* inserted record 1 */ - xfs_bmbt_irec_t *r2, /* inserted record 2 or null */ - int whichfork); /* data or attr fork */ - -/* - * Add bmap trace entry after updating an extent record in place. - */ -STATIC void -xfs_bmap_trace_post_update( - const char *fname, /* function name */ - char *desc, /* operation description */ - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* index of entry updated */ - int whichfork); /* data or attr fork */ - -/* - * Add bmap trace entry prior to updating an extent record in place. - */ -STATIC void -xfs_bmap_trace_pre_update( - const char *fname, /* function name */ - char *desc, /* operation description */ - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* index of entry to be updated */ - int whichfork); /* data or attr fork */ - -#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \ - xfs_bmap_trace_delete(__func__,d,ip,i,c,w) -#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \ - xfs_bmap_trace_insert(__func__,d,ip,i,c,r1,r2,w) -#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \ - xfs_bmap_trace_post_update(__func__,d,ip,i,w) -#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \ - xfs_bmap_trace_pre_update(__func__,d,ip,i,w) -#else -#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) -#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) -#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) -#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) -#endif /* XFS_BMAP_TRACE */ - /* * Compute the worst-case number of indirect blocks that will be used * for ip's delayed extent of length "len". @@ -363,18 +299,6 @@ xfs_bmap_validate_ret( #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) #endif /* DEBUG */ -#if defined(XFS_RW_TRACE) -STATIC void -xfs_bunmap_trace( - xfs_inode_t *ip, - xfs_fileoff_t bno, - xfs_filblks_t len, - int flags, - inst_t *ra); -#else -#define xfs_bunmap_trace(ip, bno, len, flags, ra) -#endif /* XFS_RW_TRACE */ - STATIC int xfs_bmap_count_tree( xfs_mount_t *mp, @@ -590,8 +514,6 @@ xfs_bmap_add_extent( * already extents in the list. */ if (nextents == 0) { - XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL, - whichfork); xfs_iext_insert(ip, 0, 1, new, whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); @@ -843,14 +765,12 @@ xfs_bmap_add_extent_delay_real( * Filling in all of a previously delayed allocation extent. * The left and right neighbors are both contiguous with new. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1, - XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1, - XFS_DATA_FORK); - XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + xfs_iext_remove(ip, idx, 2, state); ip->i_df.if_lastex = idx - 1; ip->i_d.di_nextents--; @@ -889,14 +809,12 @@ xfs_bmap_add_extent_delay_real( * Filling in all of a previously delayed allocation extent. * The left neighbor is contiguous, the right is not. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1, - XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + ip->i_df.if_lastex = idx - 1; - XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); xfs_iext_remove(ip, idx, 1, state); if (cur == NULL) rval = XFS_ILOG_DEXT; @@ -925,13 +843,13 @@ xfs_bmap_add_extent_delay_real( * Filling in all of a previously delayed allocation extent. * The right neighbor is contiguous, the left is not. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_startblock(ep, new->br_startblock); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount + RIGHT.br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + ip->i_df.if_lastex = idx; - XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); xfs_iext_remove(ip, idx + 1, 1, state); if (cur == NULL) rval = XFS_ILOG_DEXT; @@ -961,9 +879,10 @@ xfs_bmap_add_extent_delay_real( * Neither the left nor right neighbors are contiguous with * the new one. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_startblock(ep, new->br_startblock); - XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + ip->i_df.if_lastex = idx; ip->i_d.di_nextents++; if (cur == NULL) @@ -991,14 +910,15 @@ xfs_bmap_add_extent_delay_real( * Filling in the first part of a previous delayed allocation. * The left neighbor is contiguous. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + temp = PREV.br_blockcount - new->br_blockcount; - XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); ip->i_df.if_lastex = idx - 1; if (cur == NULL) @@ -1020,7 +940,7 @@ xfs_bmap_add_extent_delay_real( temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), startblockval(PREV.br_startblock)); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); - XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); *dnew = temp; /* DELTA: The boundary between two in-core extents moved. */ temp = LEFT.br_startoff; @@ -1033,12 +953,10 @@ xfs_bmap_add_extent_delay_real( * Filling in the first part of a previous delayed allocation. * The left neighbor is not contiguous. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_startoff(ep, new_endoff); temp = PREV.br_blockcount - new->br_blockcount; xfs_bmbt_set_blockcount(ep, temp); - XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, - XFS_DATA_FORK); xfs_iext_insert(ip, idx, 1, new, state); ip->i_df.if_lastex = idx; ip->i_d.di_nextents++; @@ -1070,7 +988,7 @@ xfs_bmap_add_extent_delay_real( (cur ? cur->bc_private.b.allocated : 0)); ep = xfs_iext_get_ext(ifp, idx + 1); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); - XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); *dnew = temp; /* DELTA: One in-core extent is split in two. */ temp = PREV.br_startoff; @@ -1083,14 +1001,14 @@ xfs_bmap_add_extent_delay_real( * The right neighbor is contiguous with the new allocation. */ temp = PREV.br_blockcount - new->br_blockcount; - XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); - XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); + trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, RIGHT.br_state); - XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); ip->i_df.if_lastex = idx + 1; if (cur == NULL) rval = XFS_ILOG_DEXT; @@ -1111,7 +1029,7 @@ xfs_bmap_add_extent_delay_real( temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), startblockval(PREV.br_startblock)); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); - XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); *dnew = temp; /* DELTA: The boundary between two in-core extents moved. */ temp = PREV.br_startoff; @@ -1125,10 +1043,8 @@ xfs_bmap_add_extent_delay_real( * The right neighbor is not contiguous. */ temp = PREV.br_blockcount - new->br_blockcount; - XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); - XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, - XFS_DATA_FORK); xfs_iext_insert(ip, idx + 1, 1, new, state); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; @@ -1160,7 +1076,7 @@ xfs_bmap_add_extent_delay_real( (cur ? cur->bc_private.b.allocated : 0)); ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); - XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); *dnew = temp; /* DELTA: One in-core extent is split in two. */ temp = PREV.br_startoff; @@ -1174,7 +1090,7 @@ xfs_bmap_add_extent_delay_real( * This case is avoided almost all the time. */ temp = new->br_startoff - PREV.br_startoff; - XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); r[0] = *new; r[1].br_state = PREV.br_state; @@ -1182,8 +1098,6 @@ xfs_bmap_add_extent_delay_real( r[1].br_startoff = new_endoff; temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; r[1].br_blockcount = temp2; - XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1], - XFS_DATA_FORK); xfs_iext_insert(ip, idx + 1, 2, &r[0], state); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; @@ -1241,11 +1155,11 @@ xfs_bmap_add_extent_delay_real( } ep = xfs_iext_get_ext(ifp, idx); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); - XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); - XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_); xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), nullstartblock((int)temp2)); - XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_); *dnew = temp + temp2; /* DELTA: One in-core extent is split in three. */ temp = PREV.br_startoff; @@ -1391,14 +1305,12 @@ xfs_bmap_add_extent_unwritten_real( * Setting all of a previous oldext extent to newext. * The left and right neighbors are both contiguous with new. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1, - XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1, - XFS_DATA_FORK); - XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + xfs_iext_remove(ip, idx, 2, state); ip->i_df.if_lastex = idx - 1; ip->i_d.di_nextents -= 2; @@ -1441,14 +1353,12 @@ xfs_bmap_add_extent_unwritten_real( * Setting all of a previous oldext extent to newext. * The left neighbor is contiguous, the right is not. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1, - XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + PREV.br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + ip->i_df.if_lastex = idx - 1; - XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); xfs_iext_remove(ip, idx, 1, state); ip->i_d.di_nextents--; if (cur == NULL) @@ -1483,15 +1393,12 @@ xfs_bmap_add_extent_unwritten_real( * Setting all of a previous oldext extent to newext. * The right neighbor is contiguous, the left is not. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, - XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount + RIGHT.br_blockcount); xfs_bmbt_set_state(ep, newext); - XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); ip->i_df.if_lastex = idx; - XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); xfs_iext_remove(ip, idx + 1, 1, state); ip->i_d.di_nextents--; if (cur == NULL) @@ -1527,11 +1434,10 @@ xfs_bmap_add_extent_unwritten_real( * Neither the left nor right neighbors are contiguous with * the new one. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, - XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_state(ep, newext); - XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + ip->i_df.if_lastex = idx; if (cur == NULL) rval = XFS_ILOG_DEXT; @@ -1557,22 +1463,20 @@ xfs_bmap_add_extent_unwritten_real( * Setting the first part of a previous oldext extent to newext. * The left neighbor is contiguous. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, - XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, - XFS_DATA_FORK); - XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_startblock(ep, new->br_startblock + new->br_blockcount); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount - new->br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + ip->i_df.if_lastex = idx - 1; if (cur == NULL) rval = XFS_ILOG_DEXT; @@ -1608,16 +1512,15 @@ xfs_bmap_add_extent_unwritten_real( * Setting the first part of a previous oldext extent to newext. * The left neighbor is not contiguous. */ - XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); xfs_bmbt_set_startoff(ep, new_endoff); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount - new->br_blockcount); xfs_bmbt_set_startblock(ep, new->br_startblock + new->br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx, XFS_DATA_FORK); - XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + xfs_iext_insert(ip, idx, 1, new, state); ip->i_df.if_lastex = idx; ip->i_d.di_nextents++; @@ -1651,19 +1554,16 @@ xfs_bmap_add_extent_unwritten_real( * Setting the last part of a previous oldext extent to newext. * The right neighbor is contiguous with the new allocation. */ - XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, - XFS_DATA_FORK); - XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, - XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); + trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount - new->br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, newext); - XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); + ip->i_df.if_lastex = idx + 1; if (cur == NULL) rval = XFS_ILOG_DEXT; @@ -1698,12 +1598,11 @@ xfs_bmap_add_extent_unwritten_real( * Setting the last part of a previous oldext extent to newext. * The right neighbor is not contiguous. */ - XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, PREV.br_blockcount - new->br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); - XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + xfs_iext_insert(ip, idx + 1, 1, new, state); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; @@ -1742,18 +1641,17 @@ xfs_bmap_add_extent_unwritten_real( * newext. Contiguity is impossible here. * One extent becomes three extents. */ - XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, new->br_startoff - PREV.br_startoff); - XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + r[0] = *new; r[1].br_startoff = new_endoff; r[1].br_blockcount = PREV.br_startoff + PREV.br_blockcount - new_endoff; r[1].br_startblock = new->br_startblock + new->br_blockcount; r[1].br_state = oldext; - XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1], - XFS_DATA_FORK); xfs_iext_insert(ip, idx + 1, 2, &r[0], state); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents += 2; @@ -1908,8 +1806,8 @@ xfs_bmap_add_extent_hole_delay( */ temp = left.br_blockcount + new->br_blockcount + right.br_blockcount; - XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, - XFS_DATA_FORK); + + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); oldlen = startblockval(left.br_startblock) + startblockval(new->br_startblock) + @@ -1917,9 +1815,8 @@ xfs_bmap_add_extent_hole_delay( newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), nullstartblock((int)newlen)); - XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, - XFS_DATA_FORK); - XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + xfs_iext_remove(ip, idx, 1, state); ip->i_df.if_lastex = idx - 1; /* DELTA: Two in-core extents were replaced by one. */ @@ -1934,16 +1831,15 @@ xfs_bmap_add_extent_hole_delay( * Merge the new allocation with the left neighbor. */ temp = left.br_blockcount + new->br_blockcount; - XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, - XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); oldlen = startblockval(left.br_startblock) + startblockval(new->br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), nullstartblock((int)newlen)); - XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, - XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + ip->i_df.if_lastex = idx - 1; /* DELTA: One in-core extent grew into a hole. */ temp2 = temp; @@ -1956,14 +1852,15 @@ xfs_bmap_add_extent_hole_delay( * on the right. * Merge the new allocation with the right neighbor. */ - XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); temp = new->br_blockcount + right.br_blockcount; oldlen = startblockval(new->br_startblock) + startblockval(right.br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmbt_set_allf(ep, new->br_startoff, nullstartblock((int)newlen), temp, right.br_state); - XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + ip->i_df.if_lastex = idx; /* DELTA: One in-core extent grew into a hole. */ temp2 = temp; @@ -1977,8 +1874,6 @@ xfs_bmap_add_extent_hole_delay( * Insert a new entry. */ oldlen = newlen = 0; - XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, - XFS_DATA_FORK); xfs_iext_insert(ip, idx, 1, new, state); ip->i_df.if_lastex = idx; /* DELTA: A new in-core extent was added in a hole. */ @@ -2091,14 +1986,12 @@ xfs_bmap_add_extent_hole_real( * left and on the right. * Merge all three into a single extent record. */ - XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, - whichfork); + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), left.br_blockcount + new->br_blockcount + right.br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, - whichfork); - XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, whichfork); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + xfs_iext_remove(ip, idx, 1, state); ifp->if_lastex = idx - 1; XFS_IFORK_NEXT_SET(ip, whichfork, @@ -2140,10 +2033,11 @@ xfs_bmap_add_extent_hole_real( * on the left. * Merge the new allocation with the left neighbor. */ - XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, whichfork); + trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), left.br_blockcount + new->br_blockcount); - XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork); + trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); + ifp->if_lastex = idx - 1; if (cur == NULL) { rval = xfs_ilog_fext(whichfork); @@ -2174,11 +2068,12 @@ xfs_bmap_add_extent_hole_real( * on the right. * Merge the new allocation with the right neighbor. */ - XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, whichfork); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, new->br_blockcount + right.br_blockcount, right.br_state); - XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + ifp->if_lastex = idx; if (cur == NULL) { rval = xfs_ilog_fext(whichfork); @@ -2209,7 +2104,6 @@ xfs_bmap_add_extent_hole_real( * real allocation. * Insert a new entry. */ - XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork); xfs_iext_insert(ip, idx, 1, new, state); ifp->if_lastex = idx; XFS_IFORK_NEXT_SET(ip, whichfork, @@ -3070,8 +2964,13 @@ xfs_bmap_del_extent( uint qfield; /* quota field to update */ xfs_filblks_t temp; /* for indirect length calculations */ xfs_filblks_t temp2; /* for indirect length calculations */ + int state = 0; XFS_STATS_INC(xs_del_exlist); + + if (whichfork == XFS_ATTR_FORK) + state |= BMAP_ATTRFORK; + mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT((idx >= 0) && (idx < ifp->if_bytes / @@ -3151,7 +3050,6 @@ xfs_bmap_del_extent( /* * Matches the whole extent. Delete the entry. */ - XFS_BMAP_TRACE_DELETE("3", ip, idx, 1, whichfork); xfs_iext_remove(ip, idx, 1, whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); ifp->if_lastex = idx; @@ -3173,7 +3071,7 @@ xfs_bmap_del_extent( /* * Deleting the first part of the extent. */ - XFS_BMAP_TRACE_PRE_UPDATE("2", ip, idx, whichfork); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_startoff(ep, del_endoff); temp = got.br_blockcount - del->br_blockcount; xfs_bmbt_set_blockcount(ep, temp); @@ -3182,13 +3080,12 @@ xfs_bmap_del_extent( temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), da_old); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); - XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, - whichfork); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); da_new = temp; break; } xfs_bmbt_set_startblock(ep, del_endblock); - XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); if (!cur) { flags |= xfs_ilog_fext(whichfork); break; @@ -3204,19 +3101,18 @@ xfs_bmap_del_extent( * Deleting the last part of the extent. */ temp = got.br_blockcount - del->br_blockcount; - XFS_BMAP_TRACE_PRE_UPDATE("1", ip, idx, whichfork); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); ifp->if_lastex = idx; if (delay) { temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), da_old); xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); - XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, - whichfork); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); da_new = temp; break; } - XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); if (!cur) { flags |= xfs_ilog_fext(whichfork); break; @@ -3233,7 +3129,7 @@ xfs_bmap_del_extent( * Deleting the middle of the extent. */ temp = del->br_startoff - got.br_startoff; - XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, whichfork); + trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); xfs_bmbt_set_blockcount(ep, temp); new.br_startoff = del_endoff; temp2 = got_endoff - del_endoff; @@ -3320,11 +3216,8 @@ xfs_bmap_del_extent( } } } - XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, whichfork); - XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL, - whichfork); - xfs_iext_insert(ip, idx + 1, 1, &new, - whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); + trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); + xfs_iext_insert(ip, idx + 1, 1, &new, state); ifp->if_lastex = idx + 1; break; } @@ -3644,7 +3537,9 @@ xfs_bmap_local_to_extents( xfs_iext_add(ifp, 0, 1); ep = xfs_iext_get_ext(ifp, 0); xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); - XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork); + trace_xfs_bmap_post_update(ip, 0, + whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0, + _THIS_IP_); XFS_IFORK_NEXT_SET(ip, whichfork, 1); ip->i_d.di_nblocks = 1; xfs_trans_mod_dquot_byino(tp, ip, @@ -3757,158 +3652,6 @@ xfs_bmap_search_extents( return ep; } - -#ifdef XFS_BMAP_TRACE -ktrace_t *xfs_bmap_trace_buf; - -/* - * Add a bmap trace buffer entry. Base routine for the others. - */ -STATIC void -xfs_bmap_trace_addentry( - int opcode, /* operation */ - const char *fname, /* function name */ - char *desc, /* operation description */ - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* index of entry(ies) */ - xfs_extnum_t cnt, /* count of entries, 1 or 2 */ - xfs_bmbt_rec_host_t *r1, /* first record */ - xfs_bmbt_rec_host_t *r2, /* second record or null */ - int whichfork) /* data or attr fork */ -{ - xfs_bmbt_rec_host_t tr2; - - ASSERT(cnt == 1 || cnt == 2); - ASSERT(r1 != NULL); - if (cnt == 1) { - ASSERT(r2 == NULL); - r2 = &tr2; - memset(&tr2, 0, sizeof(tr2)); - } else - ASSERT(r2 != NULL); - ktrace_enter(xfs_bmap_trace_buf, - (void *)(__psint_t)(opcode | (whichfork << 16)), - (void *)fname, (void *)desc, (void *)ip, - (void *)(__psint_t)idx, - (void *)(__psint_t)cnt, - (void *)(__psunsigned_t)(ip->i_ino >> 32), - (void *)(__psunsigned_t)(unsigned)ip->i_ino, - (void *)(__psunsigned_t)(r1->l0 >> 32), - (void *)(__psunsigned_t)(unsigned)(r1->l0), - (void *)(__psunsigned_t)(r1->l1 >> 32), - (void *)(__psunsigned_t)(unsigned)(r1->l1), - (void *)(__psunsigned_t)(r2->l0 >> 32), - (void *)(__psunsigned_t)(unsigned)(r2->l0), - (void *)(__psunsigned_t)(r2->l1 >> 32), - (void *)(__psunsigned_t)(unsigned)(r2->l1) - ); - ASSERT(ip->i_xtrace); - ktrace_enter(ip->i_xtrace, - (void *)(__psint_t)(opcode | (whichfork << 16)), - (void *)fname, (void *)desc, (void *)ip, - (void *)(__psint_t)idx, - (void *)(__psint_t)cnt, - (void *)(__psunsigned_t)(ip->i_ino >> 32), - (void *)(__psunsigned_t)(unsigned)ip->i_ino, - (void *)(__psunsigned_t)(r1->l0 >> 32), - (void *)(__psunsigned_t)(unsigned)(r1->l0), - (void *)(__psunsigned_t)(r1->l1 >> 32), - (void *)(__psunsigned_t)(unsigned)(r1->l1), - (void *)(__psunsigned_t)(r2->l0 >> 32), - (void *)(__psunsigned_t)(unsigned)(r2->l0), - (void *)(__psunsigned_t)(r2->l1 >> 32), - (void *)(__psunsigned_t)(unsigned)(r2->l1) - ); -} - -/* - * Add bmap trace entry prior to a call to xfs_iext_remove. - */ -STATIC void -xfs_bmap_trace_delete( - const char *fname, /* function name */ - char *desc, /* operation description */ - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* index of entry(entries) deleted */ - xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */ - int whichfork) /* data or attr fork */ -{ - xfs_ifork_t *ifp; /* inode fork pointer */ - - ifp = XFS_IFORK_PTR(ip, whichfork); - xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx, - cnt, xfs_iext_get_ext(ifp, idx), - cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL, - whichfork); -} - -/* - * Add bmap trace entry prior to a call to xfs_iext_insert, or - * reading in the extents list from the disk (in the btree). - */ -STATIC void -xfs_bmap_trace_insert( - const char *fname, /* function name */ - char *desc, /* operation description */ - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* index of entry(entries) inserted */ - xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */ - xfs_bmbt_irec_t *r1, /* inserted record 1 */ - xfs_bmbt_irec_t *r2, /* inserted record 2 or null */ - int whichfork) /* data or attr fork */ -{ - xfs_bmbt_rec_host_t tr1; /* compressed record 1 */ - xfs_bmbt_rec_host_t tr2; /* compressed record 2 if needed */ - - xfs_bmbt_set_all(&tr1, r1); - if (cnt == 2) { - ASSERT(r2 != NULL); - xfs_bmbt_set_all(&tr2, r2); - } else { - ASSERT(cnt == 1); - ASSERT(r2 == NULL); - } - xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx, - cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork); -} - -/* - * Add bmap trace entry after updating an extent record in place. - */ -STATIC void -xfs_bmap_trace_post_update( - const char *fname, /* function name */ - char *desc, /* operation description */ - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* index of entry updated */ - int whichfork) /* data or attr fork */ -{ - xfs_ifork_t *ifp; /* inode fork pointer */ - - ifp = XFS_IFORK_PTR(ip, whichfork); - xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx, - 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork); -} - -/* - * Add bmap trace entry prior to updating an extent record in place. - */ -STATIC void -xfs_bmap_trace_pre_update( - const char *fname, /* function name */ - char *desc, /* operation description */ - xfs_inode_t *ip, /* incore inode pointer */ - xfs_extnum_t idx, /* index of entry to be updated */ - int whichfork) /* data or attr fork */ -{ - xfs_ifork_t *ifp; /* inode fork pointer */ - - ifp = XFS_IFORK_PTR(ip, whichfork); - xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1, - xfs_iext_get_ext(ifp, idx), NULL, whichfork); -} -#endif /* XFS_BMAP_TRACE */ - /* * Compute the worst-case number of indirect blocks that will be used * for ip's delayed extent of length "len". @@ -3940,37 +3683,6 @@ xfs_bmap_worst_indlen( return rval; } -#if defined(XFS_RW_TRACE) -STATIC void -xfs_bunmap_trace( - xfs_inode_t *ip, - xfs_fileoff_t bno, - xfs_filblks_t len, - int flags, - inst_t *ra) -{ - if (ip->i_rwtrace == NULL) - return; - ktrace_enter(ip->i_rwtrace, - (void *)(__psint_t)XFS_BUNMAP, - (void *)ip, - (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff), - (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff), - (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff), - (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff), - (void *)(__psint_t)len, - (void *)(__psint_t)flags, - (void *)(unsigned long)current_cpu(), - (void *)ra, - (void *)0, - (void *)0, - (void *)0, - (void *)0, - (void *)0, - (void *)0); -} -#endif - /* * Convert inode from non-attributed to attributed. * Must not be in a transaction, ip must not be locked. @@ -4659,34 +4371,30 @@ error0: return XFS_ERROR(EFSCORRUPTED); } -#ifdef XFS_BMAP_TRACE +#ifdef DEBUG /* * Add bmap trace insert entries for all the contents of the extent records. */ void xfs_bmap_trace_exlist( - const char *fname, /* function name */ xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t cnt, /* count of entries in the list */ - int whichfork) /* data or attr fork */ + int whichfork, /* data or attr fork */ + unsigned long caller_ip) { - xfs_bmbt_rec_host_t *ep; /* current extent record */ xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_irec_t s; /* file extent record */ + int state = 0; + + if (whichfork == XFS_ATTR_FORK) + state |= BMAP_ATTRFORK; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); - for (idx = 0; idx < cnt; idx++) { - ep = xfs_iext_get_ext(ifp, idx); - xfs_bmbt_get_all(ep, &s); - XFS_BMAP_TRACE_INSERT("exlist", ip, idx, 1, &s, NULL, - whichfork); - } + for (idx = 0; idx < cnt; idx++) + trace_xfs_extlist(ip, idx, whichfork, caller_ip); } -#endif -#ifdef DEBUG /* * Validate that the bmbt_irecs being returned from bmapi are valid * given the callers original parameters. Specifically check the @@ -5435,7 +5143,8 @@ xfs_bunmapi( int rsvd; /* OK to allocate reserved blocks */ xfs_fsblock_t sum; - xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address); + trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); + whichfork = (flags & XFS_BMAPI_ATTRFORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; ifp = XFS_IFORK_PTR(ip, whichfork); diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 3e84e8e8d79..419dafb9d87 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h @@ -95,6 +95,21 @@ typedef struct xfs_bmap_free /* need write cache flushing and no */ /* additional allocation alignments */ +#define XFS_BMAPI_FLAGS \ + { XFS_BMAPI_WRITE, "WRITE" }, \ + { XFS_BMAPI_DELAY, "DELAY" }, \ + { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ + { XFS_BMAPI_METADATA, "METADATA" }, \ + { XFS_BMAPI_EXACT, "EXACT" }, \ + { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \ + { XFS_BMAPI_ASYNC, "ASYNC" }, \ + { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \ + { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ + { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ + { XFS_BMAPI_CONTIG, "CONTIG" }, \ + { XFS_BMAPI_CONVERT, "CONVERT" } + + static inline int xfs_bmapi_aflag(int w) { return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0); @@ -148,36 +163,30 @@ typedef struct xfs_bmalloca { #define BMAP_RIGHT_VALID (1 << 7) #define BMAP_ATTRFORK (1 << 8) -#if defined(__KERNEL__) && defined(XFS_BMAP_TRACE) -/* - * Trace operations for bmap extent tracing - */ -#define XFS_BMAP_KTRACE_DELETE 1 -#define XFS_BMAP_KTRACE_INSERT 2 -#define XFS_BMAP_KTRACE_PRE_UP 3 -#define XFS_BMAP_KTRACE_POST_UP 4 - -#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */ -#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */ -extern ktrace_t *xfs_bmap_trace_buf; +#define XFS_BMAP_EXT_FLAGS \ + { BMAP_LEFT_CONTIG, "LC" }, \ + { BMAP_RIGHT_CONTIG, "RC" }, \ + { BMAP_LEFT_FILLING, "LF" }, \ + { BMAP_RIGHT_FILLING, "RF" }, \ + { BMAP_ATTRFORK, "ATTR" } /* * Add bmap trace insert entries for all the contents of the extent list. + * + * Quite excessive tracing. Only do this for debug builds. */ +#if defined(__KERNEL) && defined(DEBUG) void xfs_bmap_trace_exlist( - const char *fname, /* function name */ struct xfs_inode *ip, /* incore inode pointer */ xfs_extnum_t cnt, /* count of entries in list */ - int whichfork); /* data or attr fork */ + int whichfork, + unsigned long caller_ip); /* data or attr fork */ #define XFS_BMAP_TRACE_EXLIST(ip,c,w) \ - xfs_bmap_trace_exlist(__func__,ip,c,w) - -#else /* __KERNEL__ && XFS_BMAP_TRACE */ - + xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_) +#else #define XFS_BMAP_TRACE_EXLIST(ip,c,w) - -#endif /* __KERNEL__ && XFS_BMAP_TRACE */ +#endif /* * Convert inode from non-attributed to attributed. diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 6f5ccede63f..38751d5fac6 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -768,12 +768,6 @@ xfs_bmbt_trace_enter( (void *)a0, (void *)a1, (void *)a2, (void *)a3, (void *)a4, (void *)a5, (void *)a6, (void *)a7, (void *)a8, (void *)a9, (void *)a10); - ktrace_enter(ip->i_btrace, - (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), - (void *)func, (void *)s, (void *)ip, (void *)cur, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)a8, (void *)a9, (void *)a10); } STATIC void diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 52b5f14d0c3..36a0992dd66 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -39,6 +39,7 @@ #include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_error.h" +#include "xfs_trace.h" /* * Cursor allocation zone. @@ -81,7 +82,7 @@ xfs_btree_check_lblock( XFS_ERRTAG_BTREE_CHECK_LBLOCK, XFS_RANDOM_BTREE_CHECK_LBLOCK))) { if (bp) - xfs_buftrace("LBTREE ERROR", bp); + trace_xfs_btree_corrupt(bp, _RET_IP_); XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); @@ -119,7 +120,7 @@ xfs_btree_check_sblock( XFS_ERRTAG_BTREE_CHECK_SBLOCK, XFS_RANDOM_BTREE_CHECK_SBLOCK))) { if (bp) - xfs_buftrace("SBTREE ERROR", bp); + trace_xfs_btree_corrupt(bp, _RET_IP_); XFS_CORRUPTION_ERROR("xfs_btree_check_sblock", XFS_ERRLEVEL_LOW, cur->bc_mp, block); return XFS_ERROR(EFSCORRUPTED); diff --git a/fs/xfs/xfs_btree_trace.h b/fs/xfs/xfs_btree_trace.h index b3f5eb3c3c6..2d8a309873e 100644 --- a/fs/xfs/xfs_btree_trace.h +++ b/fs/xfs/xfs_btree_trace.h @@ -58,8 +58,6 @@ void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *, struct xfs_buf *, int, int); void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *, struct xfs_buf *, int, int, int); -void xfs_btree_trace_argfffi(const char *, struct xfs_btree_cur *, - xfs_dfiloff_t, xfs_dfsbno_t, xfs_dfilblks_t, int, int); void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int); void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int, union xfs_btree_ptr, union xfs_btree_key *, int); @@ -71,24 +69,10 @@ void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *, union xfs_btree_rec *, int); void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int); - -#define XFS_ALLOCBT_TRACE_SIZE 4096 /* size of global trace buffer */ -extern ktrace_t *xfs_allocbt_trace_buf; - -#define XFS_INOBT_TRACE_SIZE 4096 /* size of global trace buffer */ -extern ktrace_t *xfs_inobt_trace_buf; - -#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */ -#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */ -extern ktrace_t *xfs_bmbt_trace_buf; - - #define XFS_BTREE_TRACE_ARGBI(c, b, i) \ xfs_btree_trace_argbi(__func__, c, b, i, __LINE__) #define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \ xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__) -#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j) \ - xfs_btree_trace_argfffi(__func__, c, o, b, i, j, __LINE__) #define XFS_BTREE_TRACE_ARGI(c, i) \ xfs_btree_trace_argi(__func__, c, i, __LINE__) #define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \ @@ -104,7 +88,6 @@ extern ktrace_t *xfs_bmbt_trace_buf; #else #define XFS_BTREE_TRACE_ARGBI(c, b, i) #define XFS_BTREE_TRACE_ARGBII(c, b, i, j) -#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j) #define XFS_BTREE_TRACE_ARGI(c, i) #define XFS_BTREE_TRACE_ARGIPK(c, i, p, s) #define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 92af4098c7e..a30f7e9eb2b 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -29,6 +29,7 @@ #include "xfs_buf_item.h" #include "xfs_trans_priv.h" #include "xfs_error.h" +#include "xfs_trace.h" kmem_zone_t *xfs_buf_item_zone; @@ -164,7 +165,7 @@ xfs_buf_item_size( * is the buf log format structure with the * cancel flag in it. */ - xfs_buf_item_trace("SIZE STALE", bip); + trace_xfs_buf_item_size_stale(bip); ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); return 1; } @@ -206,7 +207,7 @@ xfs_buf_item_size( } } - xfs_buf_item_trace("SIZE NORM", bip); + trace_xfs_buf_item_size(bip); return nvecs; } @@ -259,7 +260,7 @@ xfs_buf_item_format( * is the buf log format structure with the * cancel flag in it. */ - xfs_buf_item_trace("FORMAT STALE", bip); + trace_xfs_buf_item_format_stale(bip); ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); bip->bli_format.blf_size = nvecs; return; @@ -335,7 +336,7 @@ xfs_buf_item_format( /* * Check to make sure everything is consistent. */ - xfs_buf_item_trace("FORMAT NORM", bip); + trace_xfs_buf_item_format(bip); xfs_buf_item_log_check(bip); } @@ -355,8 +356,7 @@ xfs_buf_item_pin( ASSERT(atomic_read(&bip->bli_refcount) > 0); ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || (bip->bli_flags & XFS_BLI_STALE)); - xfs_buf_item_trace("PIN", bip); - xfs_buftrace("XFS_PIN", bp); + trace_xfs_buf_item_pin(bip); xfs_bpin(bp); } @@ -383,8 +383,7 @@ xfs_buf_item_unpin( ASSERT(bp != NULL); ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); ASSERT(atomic_read(&bip->bli_refcount) > 0); - xfs_buf_item_trace("UNPIN", bip); - xfs_buftrace("XFS_UNPIN", bp); + trace_xfs_buf_item_unpin(bip); freed = atomic_dec_and_test(&bip->bli_refcount); ailp = bip->bli_item.li_ailp; @@ -395,8 +394,8 @@ xfs_buf_item_unpin( ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); ASSERT(XFS_BUF_ISSTALE(bp)); ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); - xfs_buf_item_trace("UNPIN STALE", bip); - xfs_buftrace("XFS_UNPIN STALE", bp); + trace_xfs_buf_item_unpin_stale(bip); + /* * If we get called here because of an IO error, we may * or may not have the item on the AIL. xfs_trans_ail_delete() @@ -440,8 +439,8 @@ xfs_buf_item_unpin_remove( if ((atomic_read(&bip->bli_refcount) == 1) && (bip->bli_flags & XFS_BLI_STALE)) { ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0); - xfs_buf_item_trace("UNPIN REMOVE", bip); - xfs_buftrace("XFS_UNPIN_REMOVE", bp); + trace_xfs_buf_item_unpin_stale(bip); + /* * yes -- clear the xaction descriptor in-use flag * and free the chunk if required. We can safely @@ -495,7 +494,7 @@ xfs_buf_item_trylock( XFS_BUF_HOLD(bp); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); - xfs_buf_item_trace("TRYLOCK SUCCESS", bip); + trace_xfs_buf_item_trylock(bip); return XFS_ITEM_SUCCESS; } @@ -524,7 +523,6 @@ xfs_buf_item_unlock( uint hold; bp = bip->bli_buf; - xfs_buftrace("XFS_UNLOCK", bp); /* * Clear the buffer's association with this transaction. @@ -547,7 +545,7 @@ xfs_buf_item_unlock( */ if (bip->bli_flags & XFS_BLI_STALE) { bip->bli_flags &= ~XFS_BLI_LOGGED; - xfs_buf_item_trace("UNLOCK STALE", bip); + trace_xfs_buf_item_unlock_stale(bip); ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); if (!aborted) return; @@ -574,7 +572,7 @@ xfs_buf_item_unlock( * release the buffer at the end of this routine. */ hold = bip->bli_flags & XFS_BLI_HOLD; - xfs_buf_item_trace("UNLOCK", bip); + trace_xfs_buf_item_unlock(bip); /* * If the buf item isn't tracking any data, free it. @@ -618,7 +616,8 @@ xfs_buf_item_committed( xfs_buf_log_item_t *bip, xfs_lsn_t lsn) { - xfs_buf_item_trace("COMMITTED", bip); + trace_xfs_buf_item_committed(bip); + if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && (bip->bli_item.li_lsn != 0)) { return bip->bli_item.li_lsn; @@ -640,7 +639,7 @@ xfs_buf_item_push( xfs_buf_t *bp; ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); - xfs_buf_item_trace("PUSH", bip); + trace_xfs_buf_item_push(bip); bp = bip->bli_buf; @@ -738,9 +737,6 @@ xfs_buf_item_init( bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); bip->bli_format.blf_map_size = map_size; -#ifdef XFS_BLI_TRACE - bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS); -#endif #ifdef XFS_TRANS_DEBUG /* @@ -878,9 +874,6 @@ xfs_buf_item_free( kmem_free(bip->bli_logged); #endif /* XFS_TRANS_DEBUG */ -#ifdef XFS_BLI_TRACE - ktrace_free(bip->bli_trace); -#endif kmem_zone_free(xfs_buf_item_zone, bip); } @@ -897,7 +890,8 @@ xfs_buf_item_relse( { xfs_buf_log_item_t *bip; - xfs_buftrace("XFS_RELSE", bp); + trace_xfs_buf_item_relse(bp, _RET_IP_); + bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && @@ -994,7 +988,7 @@ xfs_buf_iodone_callbacks( if (XFS_FORCED_SHUTDOWN(mp)) { ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); XFS_BUF_SUPER_STALE(bp); - xfs_buftrace("BUF_IODONE_CB", bp); + trace_xfs_buf_item_iodone(bp, _RET_IP_); xfs_buf_do_callbacks(bp, lip); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); @@ -1030,7 +1024,7 @@ xfs_buf_iodone_callbacks( XFS_BUF_SET_START(bp); } ASSERT(XFS_BUF_IODONE_FUNC(bp)); - xfs_buftrace("BUF_IODONE ASYNC", bp); + trace_xfs_buf_item_iodone_async(bp, _RET_IP_); xfs_buf_relse(bp); } else { /* @@ -1053,9 +1047,7 @@ xfs_buf_iodone_callbacks( } return; } -#ifdef XFSERRORDEBUG - xfs_buftrace("XFS BUFCB NOERR", bp); -#endif + xfs_buf_do_callbacks(bp, lip); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); @@ -1081,7 +1073,9 @@ xfs_buf_error_relse( XFS_BUF_DONE(bp); XFS_BUF_UNDELAYWRITE(bp); XFS_BUF_ERROR(bp,0); - xfs_buftrace("BUF_ERROR_RELSE", bp); + + trace_xfs_buf_error_relse(bp, _RET_IP_); + if (! XFS_FORCED_SHUTDOWN(mp)) xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); /* @@ -1128,34 +1122,3 @@ xfs_buf_iodone( xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); xfs_buf_item_free(bip); } - -#if defined(XFS_BLI_TRACE) -void -xfs_buf_item_trace( - char *id, - xfs_buf_log_item_t *bip) -{ - xfs_buf_t *bp; - ASSERT(bip->bli_trace != NULL); - - bp = bip->bli_buf; - ktrace_enter(bip->bli_trace, - (void *)id, - (void *)bip->bli_buf, - (void *)((unsigned long)bip->bli_flags), - (void *)((unsigned long)bip->bli_recur), - (void *)((unsigned long)atomic_read(&bip->bli_refcount)), - (void *)((unsigned long) - (0xFFFFFFFF & XFS_BUF_ADDR(bp) >> 32)), - (void *)((unsigned long)(0xFFFFFFFF & XFS_BUF_ADDR(bp))), - (void *)((unsigned long)XFS_BUF_COUNT(bp)), - (void *)((unsigned long)XFS_BUF_BFLAGS(bp)), - XFS_BUF_FSPRIVATE(bp, void *), - XFS_BUF_FSPRIVATE2(bp, void *), - (void *)(unsigned long)XFS_BUF_ISPINNED(bp), - (void *)XFS_BUF_IODONE_FUNC(bp), - (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))), - (void *)bip->bli_item.li_desc, - (void *)((unsigned long)bip->bli_item.li_flags)); -} -#endif /* XFS_BLI_TRACE */ diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index 5a41c348bb1..217f34af00c 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h @@ -70,22 +70,21 @@ typedef struct xfs_buf_log_format_t { #define XFS_BLI_INODE_ALLOC_BUF 0x10 #define XFS_BLI_STALE_INODE 0x20 +#define XFS_BLI_FLAGS \ + { XFS_BLI_HOLD, "HOLD" }, \ + { XFS_BLI_DIRTY, "DIRTY" }, \ + { XFS_BLI_STALE, "STALE" }, \ + { XFS_BLI_LOGGED, "LOGGED" }, \ + { XFS_BLI_INODE_ALLOC_BUF, "INODE_ALLOC" }, \ + { XFS_BLI_STALE_INODE, "STALE_INODE" } + #ifdef __KERNEL__ struct xfs_buf; -struct ktrace; struct xfs_mount; struct xfs_buf_log_item; -#if defined(XFS_BLI_TRACE) -#define XFS_BLI_TRACE_SIZE 32 - -void xfs_buf_item_trace(char *, struct xfs_buf_log_item *); -#else -#define xfs_buf_item_trace(id, bip) -#endif - /* * This is the in core log item structure used to track information * needed to log buffers. It tracks how many times the lock has been @@ -97,9 +96,6 @@ typedef struct xfs_buf_log_item { unsigned int bli_flags; /* misc flags */ unsigned int bli_recur; /* lock recursion count */ atomic_t bli_refcount; /* cnt of tp refs */ -#ifdef XFS_BLI_TRACE - struct ktrace *bli_trace; /* event trace buf */ -#endif #ifdef XFS_TRANS_DEBUG char *bli_orig; /* original buffer copy */ char *bli_logged; /* bytes logged (bitmap) */ diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 2847bbc1c53..c0c8869115b 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -46,6 +46,7 @@ #include "xfs_dir2_block.h" #include "xfs_dir2_node.h" #include "xfs_error.h" +#include "xfs_trace.h" /* * xfs_da_btree.c @@ -2107,7 +2108,7 @@ xfs_da_do_buf( (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC), mp, XFS_ERRTAG_DA_READ_BUF, XFS_RANDOM_DA_READ_BUF))) { - xfs_buftrace("DA READ ERROR", rbp->bps[0]); + trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_); XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)", XFS_ERRLEVEL_LOW, mp, info); error = XFS_ERROR(EFSCORRUPTED); diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h index 8c536167bf7..30cd08f56a3 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/xfs_da_btree.h @@ -125,6 +125,13 @@ typedef struct xfs_da_args { #define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */ #define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */ +#define XFS_DA_OP_FLAGS \ + { XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \ + { XFS_DA_OP_RENAME, "RENAME" }, \ + { XFS_DA_OP_ADDNAME, "ADDNAME" }, \ + { XFS_DA_OP_OKNOENT, "OKNOENT" }, \ + { XFS_DA_OP_CILOOKUP, "CILOOKUP" } + /* * Structure to describe buffer(s) for a block. * This is needed in the directory version 2 format case, when diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index ab89a7e94a0..d1483a4f71b 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c @@ -43,6 +43,7 @@ #include "xfs_error.h" #include "xfs_rw.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" /* * Syssgi interface for swapext @@ -168,7 +169,6 @@ xfs_swap_extents( } if (VN_CACHED(VFS_I(tip)) != 0) { - xfs_inval_cached_trace(tip, 0, -1, 0, -1); error = xfs_flushinval_pages(tip, 0, -1, FI_REMAPF_LOCKED); if (error) diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index bb1d58eb398..93634a7e90e 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c @@ -40,9 +40,9 @@ #include "xfs_dir2_leaf.h" #include "xfs_dir2_block.h" #include "xfs_dir2_node.h" -#include "xfs_dir2_trace.h" #include "xfs_error.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" struct xfs_name xfs_name_dotdot = {"..", 2}; @@ -525,7 +525,8 @@ xfs_dir2_grow_inode( xfs_trans_t *tp; xfs_drfsbno_t nblks; - xfs_dir2_trace_args_s("grow_inode", args, space); + trace_xfs_dir2_grow_inode(args, space); + dp = args->dp; tp = args->trans; mp = dp->i_mount; @@ -703,7 +704,8 @@ xfs_dir2_shrink_inode( xfs_mount_t *mp; xfs_trans_t *tp; - xfs_dir2_trace_args_db("shrink_inode", args, db, bp); + trace_xfs_dir2_shrink_inode(args, db); + dp = args->dp; mp = dp->i_mount; tp = args->trans; diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index ab52e9e1c1e..ddc4ecc7807 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -36,8 +36,8 @@ #include "xfs_dir2_data.h" #include "xfs_dir2_leaf.h" #include "xfs_dir2_block.h" -#include "xfs_dir2_trace.h" #include "xfs_error.h" +#include "xfs_trace.h" /* * Local function prototypes. @@ -94,7 +94,8 @@ xfs_dir2_block_addname( __be16 *tagp; /* pointer to tag value */ xfs_trans_t *tp; /* transaction structure */ - xfs_dir2_trace_args("block_addname", args); + trace_xfs_dir2_block_addname(args); + dp = args->dp; tp = args->trans; mp = dp->i_mount; @@ -590,7 +591,8 @@ xfs_dir2_block_lookup( int error; /* error return value */ xfs_mount_t *mp; /* filesystem mount point */ - xfs_dir2_trace_args("block_lookup", args); + trace_xfs_dir2_block_lookup(args); + /* * Get the buffer, look up the entry. * If not found (ENOENT) then return, have no buffer. @@ -747,7 +749,8 @@ xfs_dir2_block_removename( int size; /* shortform size */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args("block_removename", args); + trace_xfs_dir2_block_removename(args); + /* * Look up the entry in the block. Gets the buffer and entry index. * It will always be there, the vnodeops level does a lookup first. @@ -823,7 +826,8 @@ xfs_dir2_block_replace( int error; /* error return value */ xfs_mount_t *mp; /* filesystem mount point */ - xfs_dir2_trace_args("block_replace", args); + trace_xfs_dir2_block_replace(args); + /* * Lookup the entry in the directory. Get buffer and entry index. * This will always succeed since the caller has already done a lookup. @@ -897,7 +901,8 @@ xfs_dir2_leaf_to_block( int to; /* block/leaf to index */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args_bb("leaf_to_block", args, lbp, dbp); + trace_xfs_dir2_leaf_to_block(args); + dp = args->dp; tp = args->trans; mp = dp->i_mount; @@ -1044,7 +1049,8 @@ xfs_dir2_sf_to_block( xfs_trans_t *tp; /* transaction pointer */ struct xfs_name name; - xfs_dir2_trace_args("sf_to_block", args); + trace_xfs_dir2_sf_to_block(args); + dp = args->dp; tp = args->trans; mp = dp->i_mount; diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 41ad537c49e..29f484c11b3 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -38,8 +38,8 @@ #include "xfs_dir2_leaf.h" #include "xfs_dir2_block.h" #include "xfs_dir2_node.h" -#include "xfs_dir2_trace.h" #include "xfs_error.h" +#include "xfs_trace.h" /* * Local function declarations. @@ -80,7 +80,8 @@ xfs_dir2_block_to_leaf( int needscan; /* need to rescan bestfree */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args_b("block_to_leaf", args, dbp); + trace_xfs_dir2_block_to_leaf(args); + dp = args->dp; mp = dp->i_mount; tp = args->trans; @@ -188,7 +189,8 @@ xfs_dir2_leaf_addname( xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_db_t use_block; /* data block number */ - xfs_dir2_trace_args("leaf_addname", args); + trace_xfs_dir2_leaf_addname(args); + dp = args->dp; tp = args->trans; mp = dp->i_mount; @@ -1266,7 +1268,8 @@ xfs_dir2_leaf_lookup( xfs_dir2_leaf_entry_t *lep; /* leaf entry */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args("leaf_lookup", args); + trace_xfs_dir2_leaf_lookup(args); + /* * Look up name in the leaf block, returning both buffers and index. */ @@ -1454,7 +1457,8 @@ xfs_dir2_leaf_removename( xfs_dir2_data_off_t oldbest; /* old value of best free */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args("leaf_removename", args); + trace_xfs_dir2_leaf_removename(args); + /* * Lookup the leaf entry, get the leaf and data blocks read in. */ @@ -1586,7 +1590,8 @@ xfs_dir2_leaf_replace( xfs_dir2_leaf_entry_t *lep; /* leaf entry */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args("leaf_replace", args); + trace_xfs_dir2_leaf_replace(args); + /* * Look up the entry. */ @@ -1766,7 +1771,9 @@ xfs_dir2_node_to_leaf( if (state->path.active > 1) return 0; args = state->args; - xfs_dir2_trace_args("node_to_leaf", args); + + trace_xfs_dir2_node_to_leaf(args); + mp = state->mp; dp = args->dp; tp = args->trans; diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 5a81ccd1045..ce6e355199b 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -37,8 +37,8 @@ #include "xfs_dir2_leaf.h" #include "xfs_dir2_block.h" #include "xfs_dir2_node.h" -#include "xfs_dir2_trace.h" #include "xfs_error.h" +#include "xfs_trace.h" /* * Function declarations. @@ -123,7 +123,8 @@ xfs_dir2_leaf_to_node( __be16 *to; /* pointer to freespace entry */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args_b("leaf_to_node", args, lbp); + trace_xfs_dir2_leaf_to_node(args); + dp = args->dp; mp = dp->i_mount; tp = args->trans; @@ -196,7 +197,8 @@ xfs_dir2_leafn_add( xfs_mount_t *mp; /* filesystem mount point */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args_sb("leafn_add", args, index, bp); + trace_xfs_dir2_leafn_add(args, index); + dp = args->dp; mp = dp->i_mount; tp = args->trans; @@ -711,8 +713,8 @@ xfs_dir2_leafn_moveents( int stale; /* count stale leaves copied */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args_bibii("leafn_moveents", args, bp_s, start_s, bp_d, - start_d, count); + trace_xfs_dir2_leafn_moveents(args, start_s, start_d, count); + /* * Silently return if nothing to do. */ @@ -933,7 +935,8 @@ xfs_dir2_leafn_remove( int needscan; /* need to rescan data frees */ xfs_trans_t *tp; /* transaction pointer */ - xfs_dir2_trace_args_sb("leafn_remove", args, index, bp); + trace_xfs_dir2_leafn_remove(args, index); + dp = args->dp; tp = args->trans; mp = dp->i_mount; @@ -1363,7 +1366,8 @@ xfs_dir2_node_addname( int rval; /* sub-return value */ xfs_da_state_t *state; /* btree cursor */ - xfs_dir2_trace_args("node_addname", args); + trace_xfs_dir2_node_addname(args); + /* * Allocate and initialize the state (btree cursor). */ @@ -1822,7 +1826,8 @@ xfs_dir2_node_lookup( int rval; /* operation return value */ xfs_da_state_t *state; /* btree cursor */ - xfs_dir2_trace_args("node_lookup", args); + trace_xfs_dir2_node_lookup(args); + /* * Allocate and initialize the btree cursor. */ @@ -1875,7 +1880,8 @@ xfs_dir2_node_removename( int rval; /* operation return value */ xfs_da_state_t *state; /* btree cursor */ - xfs_dir2_trace_args("node_removename", args); + trace_xfs_dir2_node_removename(args); + /* * Allocate and initialize the btree cursor. */ @@ -1944,7 +1950,8 @@ xfs_dir2_node_replace( int rval; /* internal return value */ xfs_da_state_t *state; /* btree cursor */ - xfs_dir2_trace_args("node_replace", args); + trace_xfs_dir2_node_replace(args); + /* * Allocate and initialize the btree cursor. */ diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index e89734e8464..9d4f17a6967 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -37,7 +37,7 @@ #include "xfs_dir2_data.h" #include "xfs_dir2_leaf.h" #include "xfs_dir2_block.h" -#include "xfs_dir2_trace.h" +#include "xfs_trace.h" /* * Prototypes for internal functions. @@ -169,7 +169,8 @@ xfs_dir2_block_to_sf( xfs_dir2_sf_t *sfp; /* shortform structure */ xfs_ino_t temp; - xfs_dir2_trace_args_sb("block_to_sf", args, size, bp); + trace_xfs_dir2_block_to_sf(args); + dp = args->dp; mp = dp->i_mount; @@ -281,7 +282,8 @@ xfs_dir2_sf_addname( xfs_dir2_sf_t *sfp; /* shortform structure */ xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */ - xfs_dir2_trace_args("sf_addname", args); + trace_xfs_dir2_sf_addname(args); + ASSERT(xfs_dir2_sf_lookup(args) == ENOENT); dp = args->dp; ASSERT(dp->i_df.if_flags & XFS_IFINLINE); @@ -654,7 +656,8 @@ xfs_dir2_sf_create( xfs_dir2_sf_t *sfp; /* shortform structure */ int size; /* directory size */ - xfs_dir2_trace_args_i("sf_create", args, pino); + trace_xfs_dir2_sf_create(args); + dp = args->dp; ASSERT(dp != NULL); @@ -808,7 +811,8 @@ xfs_dir2_sf_lookup( enum xfs_dacmp cmp; /* comparison result */ xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */ - xfs_dir2_trace_args("sf_lookup", args); + trace_xfs_dir2_sf_lookup(args); + xfs_dir2_sf_check(args); dp = args->dp; @@ -891,7 +895,8 @@ xfs_dir2_sf_removename( xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ xfs_dir2_sf_t *sfp; /* shortform structure */ - xfs_dir2_trace_args("sf_removename", args); + trace_xfs_dir2_sf_removename(args); + dp = args->dp; ASSERT(dp->i_df.if_flags & XFS_IFINLINE); @@ -982,7 +987,8 @@ xfs_dir2_sf_replace( xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ xfs_dir2_sf_t *sfp; /* shortform structure */ - xfs_dir2_trace_args("sf_replace", args); + trace_xfs_dir2_sf_replace(args); + dp = args->dp; ASSERT(dp->i_df.if_flags & XFS_IFINLINE); @@ -1125,7 +1131,8 @@ xfs_dir2_sf_toino4( xfs_dir2_sf_entry_t *sfep; /* new sf entry */ xfs_dir2_sf_t *sfp; /* new sf directory */ - xfs_dir2_trace_args("sf_toino4", args); + trace_xfs_dir2_sf_toino4(args); + dp = args->dp; /* @@ -1202,7 +1209,8 @@ xfs_dir2_sf_toino8( xfs_dir2_sf_entry_t *sfep; /* new sf entry */ xfs_dir2_sf_t *sfp; /* new sf directory */ - xfs_dir2_trace_args("sf_toino8", args); + trace_xfs_dir2_sf_toino8(args); + dp = args->dp; /* diff --git a/fs/xfs/xfs_dir2_trace.c b/fs/xfs/xfs_dir2_trace.c deleted file mode 100644 index 6cc7c0c681a..00000000000 --- a/fs/xfs/xfs_dir2_trace.c +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_types.h" -#include "xfs_inum.h" -#include "xfs_dir2.h" -#include "xfs_da_btree.h" -#include "xfs_bmap_btree.h" -#include "xfs_dir2_sf.h" -#include "xfs_attr_sf.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_dir2_trace.h" - -#ifdef XFS_DIR2_TRACE -ktrace_t *xfs_dir2_trace_buf; - -/* - * Enter something in the trace buffers. - */ -static void -xfs_dir2_trace_enter( - xfs_inode_t *dp, - int type, - char *where, - char *name, - int namelen, - void *a0, - void *a1, - void *a2, - void *a3, - void *a4, - void *a5, - void *a6, - void *a7) -{ - void *n[5]; - - ASSERT(xfs_dir2_trace_buf); - ASSERT(dp->i_dir_trace); - if (name) - memcpy(n, name, min((int)sizeof(n), namelen)); - else - memset((char *)n, 0, sizeof(n)); - ktrace_enter(xfs_dir2_trace_buf, - (void *)(long)type, (void *)where, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)(long)namelen, - (void *)n[0], (void *)n[1], (void *)n[2], - (void *)n[3], (void *)n[4]); - ktrace_enter(dp->i_dir_trace, - (void *)(long)type, (void *)where, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)(long)namelen, - (void *)n[0], (void *)n[1], (void *)n[2], - (void *)n[3], (void *)n[4]); -} - -void -xfs_dir2_trace_args( - char *where, - xfs_da_args_t *args) -{ - xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS, where, - (char *)args->name, (int)args->namelen, - (void *)(unsigned long)args->hashval, - (void *)((unsigned long)(args->inumber >> 32)), - (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), - (void *)args->dp, (void *)args->trans, - (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), - NULL, NULL); -} - -void -xfs_dir2_trace_args_b( - char *where, - xfs_da_args_t *args, - xfs_dabuf_t *bp) -{ - xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_B, where, - (char *)args->name, (int)args->namelen, - (void *)(unsigned long)args->hashval, - (void *)((unsigned long)(args->inumber >> 32)), - (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), - (void *)args->dp, (void *)args->trans, - (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), - (void *)(bp ? bp->bps[0] : NULL), NULL); -} - -void -xfs_dir2_trace_args_bb( - char *where, - xfs_da_args_t *args, - xfs_dabuf_t *lbp, - xfs_dabuf_t *dbp) -{ - xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BB, where, - (char *)args->name, (int)args->namelen, - (void *)(unsigned long)args->hashval, - (void *)((unsigned long)(args->inumber >> 32)), - (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), - (void *)args->dp, (void *)args->trans, - (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), - (void *)(lbp ? lbp->bps[0] : NULL), - (void *)(dbp ? dbp->bps[0] : NULL)); -} - -void -xfs_dir2_trace_args_bibii( - char *where, - xfs_da_args_t *args, - xfs_dabuf_t *bs, - int ss, - xfs_dabuf_t *bd, - int sd, - int c) -{ - xfs_buf_t *bpbs = bs ? bs->bps[0] : NULL; - xfs_buf_t *bpbd = bd ? bd->bps[0] : NULL; - - xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BIBII, where, - (char *)args->name, (int)args->namelen, - (void *)args->dp, (void *)args->trans, - (void *)bpbs, (void *)(long)ss, (void *)bpbd, (void *)(long)sd, - (void *)(long)c, NULL); -} - -void -xfs_dir2_trace_args_db( - char *where, - xfs_da_args_t *args, - xfs_dir2_db_t db, - xfs_dabuf_t *bp) -{ - xfs_buf_t *dbp = bp ? bp->bps[0] : NULL; - - xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_DB, where, - (char *)args->name, (int)args->namelen, - (void *)(unsigned long)args->hashval, - (void *)((unsigned long)(args->inumber >> 32)), - (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), - (void *)args->dp, (void *)args->trans, - (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), - (void *)(long)db, (void *)dbp); -} - -void -xfs_dir2_trace_args_i( - char *where, - xfs_da_args_t *args, - xfs_ino_t i) -{ - xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_I, where, - (char *)args->name, (int)args->namelen, - (void *)(unsigned long)args->hashval, - (void *)((unsigned long)(args->inumber >> 32)), - (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), - (void *)args->dp, (void *)args->trans, - (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), - (void *)((unsigned long)(i >> 32)), - (void *)((unsigned long)(i & 0xFFFFFFFF))); -} - -void -xfs_dir2_trace_args_s( - char *where, - xfs_da_args_t *args, - int s) -{ - xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_S, where, - (char *)args->name, (int)args->namelen, - (void *)(unsigned long)args->hashval, - (void *)((unsigned long)(args->inumber >> 32)), - (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), - (void *)args->dp, (void *)args->trans, - (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), - (void *)(long)s, NULL); -} - -void -xfs_dir2_trace_args_sb( - char *where, - xfs_da_args_t *args, - int s, - xfs_dabuf_t *bp) -{ - xfs_buf_t *dbp = bp ? bp->bps[0] : NULL; - - xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_SB, where, - (char *)args->name, (int)args->namelen, - (void *)(unsigned long)args->hashval, - (void *)((unsigned long)(args->inumber >> 32)), - (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)), - (void *)args->dp, (void *)args->trans, - (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK), - (void *)(long)s, (void *)dbp); -} -#endif /* XFS_DIR2_TRACE */ diff --git a/fs/xfs/xfs_dir2_trace.h b/fs/xfs/xfs_dir2_trace.h deleted file mode 100644 index ca3c754f482..00000000000 --- a/fs/xfs/xfs_dir2_trace.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2000,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_DIR2_TRACE_H__ -#define __XFS_DIR2_TRACE_H__ - -/* - * Tracing for xfs v2 directories. - */ - -#if defined(XFS_DIR2_TRACE) - -struct ktrace; -struct xfs_dabuf; -struct xfs_da_args; - -#define XFS_DIR2_GTRACE_SIZE 4096 /* global buffer */ -#define XFS_DIR2_KTRACE_SIZE 32 /* per-inode buffer */ -extern struct ktrace *xfs_dir2_trace_buf; - -#define XFS_DIR2_KTRACE_ARGS 1 /* args only */ -#define XFS_DIR2_KTRACE_ARGS_B 2 /* args + buffer */ -#define XFS_DIR2_KTRACE_ARGS_BB 3 /* args + 2 buffers */ -#define XFS_DIR2_KTRACE_ARGS_DB 4 /* args, db, buffer */ -#define XFS_DIR2_KTRACE_ARGS_I 5 /* args, inum */ -#define XFS_DIR2_KTRACE_ARGS_S 6 /* args, int */ -#define XFS_DIR2_KTRACE_ARGS_SB 7 /* args, int, buffer */ -#define XFS_DIR2_KTRACE_ARGS_BIBII 8 /* args, buf/int/buf/int/int */ - -void xfs_dir2_trace_args(char *where, struct xfs_da_args *args); -void xfs_dir2_trace_args_b(char *where, struct xfs_da_args *args, - struct xfs_dabuf *bp); -void xfs_dir2_trace_args_bb(char *where, struct xfs_da_args *args, - struct xfs_dabuf *lbp, struct xfs_dabuf *dbp); -void xfs_dir2_trace_args_bibii(char *where, struct xfs_da_args *args, - struct xfs_dabuf *bs, int ss, - struct xfs_dabuf *bd, int sd, int c); -void xfs_dir2_trace_args_db(char *where, struct xfs_da_args *args, - xfs_dir2_db_t db, struct xfs_dabuf *bp); -void xfs_dir2_trace_args_i(char *where, struct xfs_da_args *args, xfs_ino_t i); -void xfs_dir2_trace_args_s(char *where, struct xfs_da_args *args, int s); -void xfs_dir2_trace_args_sb(char *where, struct xfs_da_args *args, int s, - struct xfs_dabuf *bp); - -#else /* XFS_DIR2_TRACE */ - -#define xfs_dir2_trace_args(where, args) -#define xfs_dir2_trace_args_b(where, args, bp) -#define xfs_dir2_trace_args_bb(where, args, lbp, dbp) -#define xfs_dir2_trace_args_bibii(where, args, bs, ss, bd, sd, c) -#define xfs_dir2_trace_args_db(where, args, db, bp) -#define xfs_dir2_trace_args_i(where, args, i) -#define xfs_dir2_trace_args_s(where, args, s) -#define xfs_dir2_trace_args_sb(where, args, s, bp) - -#endif /* XFS_DIR2_TRACE */ - -#endif /* __XFS_DIR2_TRACE_H__ */ diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index edf8bdf4141..a631e1451ab 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c @@ -34,6 +34,7 @@ #include "xfs_utils.h" #include "xfs_mru_cache.h" #include "xfs_filestream.h" +#include "xfs_trace.h" #ifdef XFS_FILESTREAMS_TRACE @@ -394,9 +395,7 @@ xfs_filestream_init(void) item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item"); if (!item_zone) return -ENOMEM; -#ifdef XFS_FILESTREAMS_TRACE - xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS); -#endif + return 0; } @@ -407,9 +406,6 @@ xfs_filestream_init(void) void xfs_filestream_uninit(void) { -#ifdef XFS_FILESTREAMS_TRACE - ktrace_free(xfs_filestreams_trace_buf); -#endif kmem_zone_destroy(item_zone); } diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 36079aa9134..a13919a6a36 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -45,6 +45,7 @@ #include "xfs_rtalloc.h" #include "xfs_rw.h" #include "xfs_filestream.h" +#include "xfs_trace.h" /* * File system operations @@ -347,6 +348,7 @@ xfs_growfs_data_private( be32_add_cpu(&agf->agf_length, new); ASSERT(be32_to_cpu(agf->agf_length) == be32_to_cpu(agi->agi_length)); + xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); /* * Free the new space. diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 073bb4a26b1..f5c904a10c1 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -43,7 +43,7 @@ #include "xfs_inode_item.h" #include "xfs_bmap.h" #include "xfs_btree_trace.h" -#include "xfs_dir2_trace.h" +#include "xfs_trace.h" /* @@ -90,28 +90,6 @@ xfs_inode_alloc( ip->i_size = 0; ip->i_new_size = 0; - /* - * Initialize inode's trace buffers. - */ -#ifdef XFS_INODE_TRACE - ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_BMAP_TRACE - ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_BTREE_TRACE - ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_RW_TRACE - ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_ILOCK_TRACE - ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_DIR2_TRACE - ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); -#endif - /* prevent anyone from using this yet */ VFS_I(ip)->i_state = I_NEW|I_LOCK; @@ -133,25 +111,6 @@ xfs_inode_free( if (ip->i_afp) xfs_idestroy_fork(ip, XFS_ATTR_FORK); -#ifdef XFS_INODE_TRACE - ktrace_free(ip->i_trace); -#endif -#ifdef XFS_BMAP_TRACE - ktrace_free(ip->i_xtrace); -#endif -#ifdef XFS_BTREE_TRACE - ktrace_free(ip->i_btrace); -#endif -#ifdef XFS_RW_TRACE - ktrace_free(ip->i_rwtrace); -#endif -#ifdef XFS_ILOCK_TRACE - ktrace_free(ip->i_lock_trace); -#endif -#ifdef XFS_DIR2_TRACE - ktrace_free(ip->i_dir_trace); -#endif - if (ip->i_itemp) { /* * Only if we are shutting down the fs will we see an @@ -210,6 +169,7 @@ xfs_iget_cache_hit( * instead of polling for it. */ if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { + trace_xfs_iget_skip(ip); XFS_STATS_INC(xs_ig_frecycle); error = EAGAIN; goto out_error; @@ -228,7 +188,7 @@ xfs_iget_cache_hit( * Need to carefully get it back into useable state. */ if (ip->i_flags & XFS_IRECLAIMABLE) { - xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); + trace_xfs_iget_reclaim(ip); /* * We need to set XFS_INEW atomically with clearing the @@ -254,6 +214,7 @@ xfs_iget_cache_hit( ip->i_flags &= ~XFS_INEW; ip->i_flags |= XFS_IRECLAIMABLE; __xfs_inode_set_reclaim_tag(pag, ip); + trace_xfs_iget_reclaim(ip); goto out_error; } inode->i_state = I_LOCK|I_NEW; @@ -273,8 +234,9 @@ xfs_iget_cache_hit( xfs_ilock(ip, lock_flags); xfs_iflags_clear(ip, XFS_ISTALE); - xfs_itrace_exit_tag(ip, "xfs_iget.found"); XFS_STATS_INC(xs_ig_found); + + trace_xfs_iget_found(ip); return 0; out_error: @@ -308,7 +270,7 @@ xfs_iget_cache_miss( if (error) goto out_destroy; - xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); + xfs_itrace_entry(ip); if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { error = ENOENT; @@ -353,6 +315,8 @@ xfs_iget_cache_miss( write_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); + + trace_xfs_iget_alloc(ip); *ipp = ip; return 0; @@ -639,7 +603,7 @@ xfs_ilock( else if (lock_flags & XFS_ILOCK_SHARED) mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); - xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address); + trace_xfs_ilock(ip, lock_flags, _RET_IP_); } /* @@ -684,7 +648,7 @@ xfs_ilock_nowait( if (!mrtryaccess(&ip->i_lock)) goto out_undo_iolock; } - xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address); + trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); return 1; out_undo_iolock: @@ -746,7 +710,7 @@ xfs_iunlock( xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp, (xfs_log_item_t*)(ip->i_itemp)); } - xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address); + trace_xfs_iunlock(ip, lock_flags, _RET_IP_); } /* @@ -765,6 +729,8 @@ xfs_ilock_demote( mrdemote(&ip->i_lock); if (lock_flags & XFS_IOLOCK_EXCL) mrdemote(&ip->i_iolock); + + trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); } #ifdef DEBUG @@ -795,52 +761,3 @@ xfs_isilocked( return 1; } #endif - -#ifdef XFS_INODE_TRACE - -#define KTRACE_ENTER(ip, vk, s, line, ra) \ - ktrace_enter((ip)->i_trace, \ -/* 0 */ (void *)(__psint_t)(vk), \ -/* 1 */ (void *)(s), \ -/* 2 */ (void *)(__psint_t) line, \ -/* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \ -/* 4 */ (void *)(ra), \ -/* 5 */ NULL, \ -/* 6 */ (void *)(__psint_t)current_cpu(), \ -/* 7 */ (void *)(__psint_t)current_pid(), \ -/* 8 */ (void *)__return_address, \ -/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL) - -/* - * Vnode tracing code. - */ -void -_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra); -} - -void -_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra); -} - -void -xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra); -} - -void -_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra); -} - -void -xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra); -} -#endif /* XFS_INODE_TRACE */ diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 614acd508b3..ce278b3ae7f 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -47,10 +47,10 @@ #include "xfs_rw.h" #include "xfs_error.h" #include "xfs_utils.h" -#include "xfs_dir2_trace.h" #include "xfs_quota.h" #include "xfs_filestream.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" kmem_zone_t *xfs_ifork_zone; kmem_zone_t *xfs_inode_zone; @@ -1291,42 +1291,6 @@ xfs_file_last_byte( return last_byte; } -#if defined(XFS_RW_TRACE) -STATIC void -xfs_itrunc_trace( - int tag, - xfs_inode_t *ip, - int flag, - xfs_fsize_t new_size, - xfs_off_t toss_start, - xfs_off_t toss_finish) -{ - if (ip->i_rwtrace == NULL) { - return; - } - - ktrace_enter(ip->i_rwtrace, - (void*)((long)tag), - (void*)ip, - (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff), - (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff), - (void*)((long)flag), - (void*)(unsigned long)((new_size >> 32) & 0xffffffff), - (void*)(unsigned long)(new_size & 0xffffffff), - (void*)(unsigned long)((toss_start >> 32) & 0xffffffff), - (void*)(unsigned long)(toss_start & 0xffffffff), - (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), - (void*)(unsigned long)(toss_finish & 0xffffffff), - (void*)(unsigned long)current_cpu(), - (void*)(unsigned long)current_pid(), - (void*)NULL, - (void*)NULL, - (void*)NULL); -} -#else -#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) -#endif - /* * Start the truncation of the file to new_size. The new size * must be smaller than the current size. This routine will @@ -1409,8 +1373,7 @@ xfs_itruncate_start( return 0; } last_byte = xfs_file_last_byte(ip); - xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, - last_byte); + trace_xfs_itruncate_start(ip, flags, new_size, toss_start, last_byte); if (last_byte > toss_start) { if (flags & XFS_ITRUNC_DEFINITE) { xfs_tosspages(ip, toss_start, @@ -1514,7 +1477,8 @@ xfs_itruncate_finish( new_size = 0LL; } first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); - xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); + trace_xfs_itruncate_finish_start(ip, new_size); + /* * The first thing we do is set the size to new_size permanently * on disk. This way we don't have to worry about anyone ever @@ -1731,7 +1695,7 @@ xfs_itruncate_finish( ASSERT((new_size != 0) || (fork == XFS_ATTR_FORK) || (ip->i_d.di_nextents == 0)); - xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); + trace_xfs_itruncate_finish_end(ip, new_size); return 0; } @@ -3252,23 +3216,6 @@ corrupt_out: return XFS_ERROR(EFSCORRUPTED); } - - -#ifdef XFS_ILOCK_TRACE -void -xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) -{ - ktrace_enter(ip->i_lock_trace, - (void *)ip, - (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */ - (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */ - (void *)ra, /* caller of ilock */ - (void *)(unsigned long)current_cpu(), - (void *)(unsigned long)current_pid(), - NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); -} -#endif - /* * Return a pointer to the extent record at file index idx. */ @@ -3309,6 +3256,8 @@ xfs_iext_insert( xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; xfs_extnum_t i; /* extent record index */ + trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); xfs_iext_add(ifp, idx, count); for (i = idx; i < idx + count; i++, new++) @@ -3560,6 +3509,8 @@ xfs_iext_remove( xfs_extnum_t nextents; /* number of extents in file */ int new_size; /* size of extents after removal */ + trace_xfs_iext_remove(ip, idx, state, _RET_IP_); + ASSERT(ext_diff > 0); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 946a8ca5d9c..ec1f28c4fc4 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -213,7 +213,6 @@ typedef struct xfs_icdinode { struct bhv_desc; struct cred; -struct ktrace; struct xfs_buf; struct xfs_bmap_free; struct xfs_bmbt_irec; @@ -222,13 +221,6 @@ struct xfs_mount; struct xfs_trans; struct xfs_dquot; -#if defined(XFS_ILOCK_TRACE) -#define XFS_ILOCK_KTRACE_SIZE 32 -extern void xfs_ilock_trace(struct xfs_inode *, int, unsigned int, inst_t *); -#else -#define xfs_ilock_trace(i,n,f,ra) -#endif - typedef struct dm_attrs_s { __uint32_t da_dmevmask; /* DMIG event mask */ __uint16_t da_dmstate; /* DMIG state info */ @@ -271,26 +263,6 @@ typedef struct xfs_inode { /* VFS inode */ struct inode i_vnode; /* embedded VFS inode */ - - /* Trace buffers per inode. */ -#ifdef XFS_INODE_TRACE - struct ktrace *i_trace; /* general inode trace */ -#endif -#ifdef XFS_BMAP_TRACE - struct ktrace *i_xtrace; /* inode extent list trace */ -#endif -#ifdef XFS_BTREE_TRACE - struct ktrace *i_btrace; /* inode bmap btree trace */ -#endif -#ifdef XFS_RW_TRACE - struct ktrace *i_rwtrace; /* inode read/write trace */ -#endif -#ifdef XFS_ILOCK_TRACE - struct ktrace *i_lock_trace; /* inode lock/unlock trace */ -#endif -#ifdef XFS_DIR2_TRACE - struct ktrace *i_dir_trace; /* inode directory trace */ -#endif } xfs_inode_t; #define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \ @@ -406,6 +378,14 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) #define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \ | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED) +#define XFS_LOCK_FLAGS \ + { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \ + { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \ + { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \ + { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \ + { XFS_IUNLOCK_NONOTIFY, "IUNLOCK_NONOTIFY" } + + /* * Flags for lockdep annotations. * @@ -455,6 +435,10 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) #define XFS_ITRUNC_DEFINITE 0x1 #define XFS_ITRUNC_MAYBE 0x2 +#define XFS_ITRUNC_FLAGS \ + { XFS_ITRUNC_DEFINITE, "DEFINITE" }, \ + { XFS_ITRUNC_MAYBE, "MAYBE" } + /* * For multiple groups support: if S_ISGID bit is set in the parent * directory, group of new file is set to that of the parent, and @@ -507,48 +491,16 @@ void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); void xfs_synchronize_times(xfs_inode_t *); void xfs_mark_inode_dirty_sync(xfs_inode_t *); -#if defined(XFS_INODE_TRACE) - -#define INODE_TRACE_SIZE 16 /* number of trace entries */ -#define INODE_KTRACE_ENTRY 1 -#define INODE_KTRACE_EXIT 2 -#define INODE_KTRACE_HOLD 3 -#define INODE_KTRACE_REF 4 -#define INODE_KTRACE_RELE 5 - -extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *); -extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *); -extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *); -extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *); -extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *); -#define xfs_itrace_entry(ip) \ - _xfs_itrace_entry(ip, __func__, (inst_t *)__return_address) -#define xfs_itrace_exit(ip) \ - _xfs_itrace_exit(ip, __func__, (inst_t *)__return_address) -#define xfs_itrace_exit_tag(ip, tag) \ - _xfs_itrace_exit(ip, tag, (inst_t *)__return_address) -#define xfs_itrace_ref(ip) \ - _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address) - -#else -#define xfs_itrace_entry(a) -#define xfs_itrace_exit(a) -#define xfs_itrace_exit_tag(a, b) -#define xfs_itrace_hold(a, b, c, d) -#define xfs_itrace_ref(a) -#define xfs_itrace_rele(a, b, c, d) -#endif - #define IHOLD(ip) \ do { \ ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ atomic_inc(&(VFS_I(ip)->i_count)); \ - xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ + trace_xfs_ihold(ip, _THIS_IP_); \ } while (0) #define IRELE(ip) \ do { \ - xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ + trace_xfs_irele(ip, _THIS_IP_); \ iput(VFS_I(ip)); \ } while (0) diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 9794b876d6f..f38855d21ea 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -41,6 +41,7 @@ #include "xfs_ialloc.h" #include "xfs_rw.h" #include "xfs_error.h" +#include "xfs_trace.h" kmem_zone_t *xfs_ili_zone; /* inode log item zone */ @@ -800,7 +801,9 @@ xfs_inode_item_pushbuf( !completion_done(&ip->i_flush)); iip->ili_pushbuf_flag = 0; xfs_iunlock(ip, XFS_ILOCK_SHARED); - xfs_buftrace("INODE ITEM PUSH", bp); + + trace_xfs_inode_item_push(bp, _RET_IP_); + if (XFS_BUF_ISPINNED(bp)) { xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 7294abce6ef..0b65039951a 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -47,72 +47,8 @@ #include "xfs_trans_space.h" #include "xfs_utils.h" #include "xfs_iomap.h" +#include "xfs_trace.h" -#if defined(XFS_RW_TRACE) -void -xfs_iomap_enter_trace( - int tag, - xfs_inode_t *ip, - xfs_off_t offset, - ssize_t count) -{ - if (!ip->i_rwtrace) - return; - - ktrace_enter(ip->i_rwtrace, - (void *)((unsigned long)tag), - (void *)ip, - (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), - (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), - (void *)((unsigned long)((offset >> 32) & 0xffffffff)), - (void *)((unsigned long)(offset & 0xffffffff)), - (void *)((unsigned long)count), - (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)), - (void *)((unsigned long)(ip->i_new_size & 0xffffffff)), - (void *)((unsigned long)current_pid()), - (void *)NULL, - (void *)NULL, - (void *)NULL, - (void *)NULL, - (void *)NULL, - (void *)NULL); -} - -void -xfs_iomap_map_trace( - int tag, - xfs_inode_t *ip, - xfs_off_t offset, - ssize_t count, - xfs_iomap_t *iomapp, - xfs_bmbt_irec_t *imapp, - int flags) -{ - if (!ip->i_rwtrace) - return; - - ktrace_enter(ip->i_rwtrace, - (void *)((unsigned long)tag), - (void *)ip, - (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), - (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), - (void *)((unsigned long)((offset >> 32) & 0xffffffff)), - (void *)((unsigned long)(offset & 0xffffffff)), - (void *)((unsigned long)count), - (void *)((unsigned long)flags), - (void *)((unsigned long)((iomapp->iomap_offset >> 32) & 0xffffffff)), - (void *)((unsigned long)(iomapp->iomap_offset & 0xffffffff)), - (void *)((unsigned long)(iomapp->iomap_delta)), - (void *)((unsigned long)(iomapp->iomap_bsize)), - (void *)((unsigned long)(iomapp->iomap_bn)), - (void *)(__psint_t)(imapp->br_startoff), - (void *)((unsigned long)(imapp->br_blockcount)), - (void *)(__psint_t)(imapp->br_startblock)); -} -#else -#define xfs_iomap_enter_trace(tag, io, offset, count) -#define xfs_iomap_map_trace(tag, io, offset, count, iomapp, imapp, flags) -#endif #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ << mp->m_writeio_log) @@ -187,21 +123,20 @@ xfs_iomap( if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); + trace_xfs_iomap_enter(ip, offset, count, flags, NULL); + switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) { case BMAPI_READ: - xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, ip, offset, count); lockmode = xfs_ilock_map_shared(ip); bmapi_flags = XFS_BMAPI_ENTIRE; break; case BMAPI_WRITE: - xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count); lockmode = XFS_ILOCK_EXCL; if (flags & BMAPI_IGNSTATE) bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; xfs_ilock(ip, lockmode); break; case BMAPI_ALLOCATE: - xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count); lockmode = XFS_ILOCK_SHARED; bmapi_flags = XFS_BMAPI_ENTIRE; @@ -237,8 +172,7 @@ xfs_iomap( if (nimaps && (imap.br_startblock != HOLESTARTBLOCK) && (imap.br_startblock != DELAYSTARTBLOCK)) { - xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, - offset, count, iomapp, &imap, flags); + trace_xfs_iomap_found(ip, offset, count, flags, &imap); break; } @@ -250,8 +184,7 @@ xfs_iomap( &imap, &nimaps); } if (!error) { - xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, ip, - offset, count, iomapp, &imap, flags); + trace_xfs_iomap_alloc(ip, offset, count, flags, &imap); } iomap_flags = IOMAP_NEW; break; @@ -261,8 +194,7 @@ xfs_iomap( lockmode = 0; if (nimaps && !isnullstartblock(imap.br_startblock)) { - xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, - offset, count, iomapp, &imap, flags); + trace_xfs_iomap_found(ip, offset, count, flags, &imap); break; } @@ -623,8 +555,7 @@ retry: * delalloc blocks and retry without EOF preallocation. */ if (nimaps == 0) { - xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, - ip, offset, count); + trace_xfs_delalloc_enospc(ip, offset, count); if (flushed) return XFS_ERROR(ENOSPC); @@ -837,7 +768,7 @@ xfs_iomap_write_unwritten( int committed; int error; - xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, ip, offset, count); + trace_xfs_unwritten_convert(ip, offset, count); offset_fsb = XFS_B_TO_FSBT(mp, offset); count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index fdcf7b82747..174f2999099 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h @@ -43,6 +43,14 @@ typedef enum { BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */ } bmapi_flags_t; +#define BMAPI_FLAGS \ + { BMAPI_READ, "READ" }, \ + { BMAPI_WRITE, "WRITE" }, \ + { BMAPI_ALLOCATE, "ALLOCATE" }, \ + { BMAPI_IGNSTATE, "IGNSTATE" }, \ + { BMAPI_DIRECT, "DIRECT" }, \ + { BMAPI_MMAP, "MMAP" }, \ + { BMAPI_TRYLOCK, "TRYLOCK" } /* * xfs_iomap_t: File system I/O map diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 9dbdff3ea48..4cb1792040e 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -40,6 +40,7 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_rw.h" +#include "xfs_trace.h" kmem_zone_t *xfs_log_ticket_zone; @@ -122,85 +123,6 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, STATIC int xlog_iclogs_empty(xlog_t *log); -#if defined(XFS_LOG_TRACE) - -#define XLOG_TRACE_LOGGRANT_SIZE 2048 -#define XLOG_TRACE_ICLOG_SIZE 256 - -void -xlog_trace_loggrant_alloc(xlog_t *log) -{ - log->l_grant_trace = ktrace_alloc(XLOG_TRACE_LOGGRANT_SIZE, KM_NOFS); -} - -void -xlog_trace_loggrant_dealloc(xlog_t *log) -{ - ktrace_free(log->l_grant_trace); -} - -void -xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string) -{ - unsigned long cnts; - - /* ticket counts are 1 byte each */ - cnts = ((unsigned long)tic->t_ocnt) | ((unsigned long)tic->t_cnt) << 8; - - ktrace_enter(log->l_grant_trace, - (void *)tic, - (void *)log->l_reserve_headq, - (void *)log->l_write_headq, - (void *)((unsigned long)log->l_grant_reserve_cycle), - (void *)((unsigned long)log->l_grant_reserve_bytes), - (void *)((unsigned long)log->l_grant_write_cycle), - (void *)((unsigned long)log->l_grant_write_bytes), - (void *)((unsigned long)log->l_curr_cycle), - (void *)((unsigned long)log->l_curr_block), - (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)), - (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)), - (void *)string, - (void *)((unsigned long)tic->t_trans_type), - (void *)cnts, - (void *)((unsigned long)tic->t_curr_res), - (void *)((unsigned long)tic->t_unit_res)); -} - -void -xlog_trace_iclog_alloc(xlog_in_core_t *iclog) -{ - iclog->ic_trace = ktrace_alloc(XLOG_TRACE_ICLOG_SIZE, KM_NOFS); -} - -void -xlog_trace_iclog_dealloc(xlog_in_core_t *iclog) -{ - ktrace_free(iclog->ic_trace); -} - -void -xlog_trace_iclog(xlog_in_core_t *iclog, uint state) -{ - ktrace_enter(iclog->ic_trace, - (void *)((unsigned long)state), - (void *)((unsigned long)current_pid()), - (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL, - (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL, - (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL, - (void *)NULL, (void *)NULL); -} -#else - -#define xlog_trace_loggrant_alloc(log) -#define xlog_trace_loggrant_dealloc(log) -#define xlog_trace_loggrant(log,tic,string) - -#define xlog_trace_iclog_alloc(iclog) -#define xlog_trace_iclog_dealloc(iclog) -#define xlog_trace_iclog(iclog,state) - -#endif /* XFS_LOG_TRACE */ - static void xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic) @@ -353,15 +275,17 @@ xfs_log_done(xfs_mount_t *mp, if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 || (flags & XFS_LOG_REL_PERM_RESERV)) { + trace_xfs_log_done_nonperm(log, ticket); + /* * Release ticket if not permanent reservation or a specific * request has been made to release a permanent reservation. */ - xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)"); xlog_ungrant_log_space(log, ticket); xfs_log_ticket_put(ticket); } else { - xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); + trace_xfs_log_done_perm(log, ticket); + xlog_regrant_reserve_log_space(log, ticket); /* If this ticket was a permanent reservation and we aren't * trying to release it, reset the inited flags; so next time @@ -505,10 +429,13 @@ xfs_log_reserve(xfs_mount_t *mp, XFS_STATS_INC(xs_try_logspace); + if (*ticket != NULL) { ASSERT(flags & XFS_LOG_PERM_RESERV); internal_ticket = (xlog_ticket_t *)*ticket; - xlog_trace_loggrant(log, internal_ticket, "xfs_log_reserve: existing ticket (permanent trans)"); + + trace_xfs_log_reserve(log, internal_ticket); + xlog_grant_push_ail(mp, internal_ticket->t_unit_res); retval = xlog_regrant_write_log_space(log, internal_ticket); } else { @@ -519,10 +446,9 @@ xfs_log_reserve(xfs_mount_t *mp, return XFS_ERROR(ENOMEM); internal_ticket->t_trans_type = t_type; *ticket = internal_ticket; - xlog_trace_loggrant(log, internal_ticket, - (internal_ticket->t_flags & XLOG_TIC_PERM_RESERV) ? - "xfs_log_reserve: create new ticket (permanent trans)" : - "xfs_log_reserve: create new ticket"); + + trace_xfs_log_reserve(log, internal_ticket); + xlog_grant_push_ail(mp, (internal_ticket->t_unit_res * internal_ticket->t_cnt)); @@ -734,7 +660,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) spin_unlock(&log->l_icloglock); } if (tic) { - xlog_trace_loggrant(log, tic, "unmount rec"); + trace_xfs_log_umount_write(log, tic); xlog_ungrant_log_space(log, tic); xfs_log_ticket_put(tic); } @@ -1030,7 +956,6 @@ xlog_iodone(xfs_buf_t *bp) xfs_fs_cmn_err(CE_WARN, l->l_mp, "xlog_iodone: Barriers are no longer supported" " by device. Disabling barriers\n"); - xfs_buftrace("XLOG_IODONE BARRIERS OFF", bp); } /* @@ -1085,13 +1010,10 @@ xlog_bdstrat_cb(struct xfs_buf *bp) return 0; } - xfs_buftrace("XLOG__BDSTRAT IOERROR", bp); XFS_BUF_ERROR(bp, EIO); XFS_BUF_STALE(bp); xfs_biodone(bp); return XFS_ERROR(EIO); - - } /* @@ -1246,7 +1168,6 @@ xlog_alloc_log(xfs_mount_t *mp, spin_lock_init(&log->l_grant_lock); sv_init(&log->l_flush_wait, 0, "flush_wait"); - xlog_trace_loggrant_alloc(log); /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); @@ -1305,8 +1226,6 @@ xlog_alloc_log(xfs_mount_t *mp, sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force"); sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write"); - xlog_trace_iclog_alloc(iclog); - iclogp = &iclog->ic_next; } *iclogp = log->l_iclog; /* complete ring */ @@ -1321,13 +1240,11 @@ out_free_iclog: sv_destroy(&iclog->ic_force_wait); sv_destroy(&iclog->ic_write_wait); xfs_buf_free(iclog->ic_bp); - xlog_trace_iclog_dealloc(iclog); } kmem_free(iclog); } spinlock_destroy(&log->l_icloglock); spinlock_destroy(&log->l_grant_lock); - xlog_trace_loggrant_dealloc(log); xfs_buf_free(log->l_xbuf); out_free_log: kmem_free(log); @@ -1607,7 +1524,6 @@ xlog_dealloc_log(xlog_t *log) sv_destroy(&iclog->ic_force_wait); sv_destroy(&iclog->ic_write_wait); xfs_buf_free(iclog->ic_bp); - xlog_trace_iclog_dealloc(iclog); next_iclog = iclog->ic_next; kmem_free(iclog); iclog = next_iclog; @@ -1616,7 +1532,6 @@ xlog_dealloc_log(xlog_t *log) spinlock_destroy(&log->l_grant_lock); xfs_buf_free(log->l_xbuf); - xlog_trace_loggrant_dealloc(log); log->l_mp->m_log = NULL; kmem_free(log); } /* xlog_dealloc_log */ @@ -2414,7 +2329,6 @@ restart: iclog = log->l_iclog; if (iclog->ic_state != XLOG_STATE_ACTIVE) { - xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); XFS_STATS_INC(xs_log_noiclogs); /* Wait for log writes to have flushed */ @@ -2520,13 +2434,15 @@ xlog_grant_log_space(xlog_t *log, /* Is there space or do we need to sleep? */ spin_lock(&log->l_grant_lock); - xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter"); + + trace_xfs_log_grant_enter(log, tic); /* something is already sleeping; insert new transaction at end */ if (log->l_reserve_headq) { xlog_ins_ticketq(&log->l_reserve_headq, tic); - xlog_trace_loggrant(log, tic, - "xlog_grant_log_space: sleep 1"); + + trace_xfs_log_grant_sleep1(log, tic); + /* * Gotta check this before going to sleep, while we're * holding the grant lock. @@ -2540,8 +2456,7 @@ xlog_grant_log_space(xlog_t *log, * If we got an error, and the filesystem is shutting down, * we'll catch it down below. So just continue... */ - xlog_trace_loggrant(log, tic, - "xlog_grant_log_space: wake 1"); + trace_xfs_log_grant_wake1(log, tic); spin_lock(&log->l_grant_lock); } if (tic->t_flags & XFS_LOG_PERM_RESERV) @@ -2558,8 +2473,9 @@ redo: if (free_bytes < need_bytes) { if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) xlog_ins_ticketq(&log->l_reserve_headq, tic); - xlog_trace_loggrant(log, tic, - "xlog_grant_log_space: sleep 2"); + + trace_xfs_log_grant_sleep2(log, tic); + spin_unlock(&log->l_grant_lock); xlog_grant_push_ail(log->l_mp, need_bytes); spin_lock(&log->l_grant_lock); @@ -2571,8 +2487,8 @@ redo: if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; - xlog_trace_loggrant(log, tic, - "xlog_grant_log_space: wake 2"); + trace_xfs_log_grant_wake2(log, tic); + goto redo; } else if (tic->t_flags & XLOG_TIC_IN_Q) xlog_del_ticketq(&log->l_reserve_headq, tic); @@ -2592,7 +2508,7 @@ redo: ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn))); } #endif - xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit"); + trace_xfs_log_grant_exit(log, tic); xlog_verify_grant_head(log, 1); spin_unlock(&log->l_grant_lock); return 0; @@ -2600,7 +2516,9 @@ redo: error_return: if (tic->t_flags & XLOG_TIC_IN_Q) xlog_del_ticketq(&log->l_reserve_headq, tic); - xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret"); + + trace_xfs_log_grant_error(log, tic); + /* * If we are failing, make sure the ticket doesn't have any * current reservations. We don't want to add this back when @@ -2640,7 +2558,8 @@ xlog_regrant_write_log_space(xlog_t *log, #endif spin_lock(&log->l_grant_lock); - xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter"); + + trace_xfs_log_regrant_write_enter(log, tic); if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; @@ -2669,8 +2588,8 @@ xlog_regrant_write_log_space(xlog_t *log, if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) xlog_ins_ticketq(&log->l_write_headq, tic); - xlog_trace_loggrant(log, tic, - "xlog_regrant_write_log_space: sleep 1"); + trace_xfs_log_regrant_write_sleep1(log, tic); + spin_unlock(&log->l_grant_lock); xlog_grant_push_ail(log->l_mp, need_bytes); spin_lock(&log->l_grant_lock); @@ -2685,8 +2604,7 @@ xlog_regrant_write_log_space(xlog_t *log, if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; - xlog_trace_loggrant(log, tic, - "xlog_regrant_write_log_space: wake 1"); + trace_xfs_log_regrant_write_wake1(log, tic); } } @@ -2704,6 +2622,8 @@ redo: spin_lock(&log->l_grant_lock); XFS_STATS_INC(xs_sleep_logspace); + trace_xfs_log_regrant_write_sleep2(log, tic); + sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); /* If we're shutting down, this tic is already off the queue */ @@ -2711,8 +2631,7 @@ redo: if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; - xlog_trace_loggrant(log, tic, - "xlog_regrant_write_log_space: wake 2"); + trace_xfs_log_regrant_write_wake2(log, tic); goto redo; } else if (tic->t_flags & XLOG_TIC_IN_Q) xlog_del_ticketq(&log->l_write_headq, tic); @@ -2727,7 +2646,8 @@ redo: } #endif - xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); + trace_xfs_log_regrant_write_exit(log, tic); + xlog_verify_grant_head(log, 1); spin_unlock(&log->l_grant_lock); return 0; @@ -2736,7 +2656,9 @@ redo: error_return: if (tic->t_flags & XLOG_TIC_IN_Q) xlog_del_ticketq(&log->l_reserve_headq, tic); - xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret"); + + trace_xfs_log_regrant_write_error(log, tic); + /* * If we are failing, make sure the ticket doesn't have any * current reservations. We don't want to add this back when @@ -2760,8 +2682,8 @@ STATIC void xlog_regrant_reserve_log_space(xlog_t *log, xlog_ticket_t *ticket) { - xlog_trace_loggrant(log, ticket, - "xlog_regrant_reserve_log_space: enter"); + trace_xfs_log_regrant_reserve_enter(log, ticket); + if (ticket->t_cnt > 0) ticket->t_cnt--; @@ -2769,8 +2691,9 @@ xlog_regrant_reserve_log_space(xlog_t *log, xlog_grant_sub_space(log, ticket->t_curr_res); ticket->t_curr_res = ticket->t_unit_res; xlog_tic_reset_res(ticket); - xlog_trace_loggrant(log, ticket, - "xlog_regrant_reserve_log_space: sub current res"); + + trace_xfs_log_regrant_reserve_sub(log, ticket); + xlog_verify_grant_head(log, 1); /* just return if we still have some of the pre-reserved space */ @@ -2780,8 +2703,9 @@ xlog_regrant_reserve_log_space(xlog_t *log, } xlog_grant_add_space_reserve(log, ticket->t_unit_res); - xlog_trace_loggrant(log, ticket, - "xlog_regrant_reserve_log_space: exit"); + + trace_xfs_log_regrant_reserve_exit(log, ticket); + xlog_verify_grant_head(log, 0); spin_unlock(&log->l_grant_lock); ticket->t_curr_res = ticket->t_unit_res; @@ -2811,11 +2735,11 @@ xlog_ungrant_log_space(xlog_t *log, ticket->t_cnt--; spin_lock(&log->l_grant_lock); - xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); + trace_xfs_log_ungrant_enter(log, ticket); xlog_grant_sub_space(log, ticket->t_curr_res); - xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current"); + trace_xfs_log_ungrant_sub(log, ticket); /* If this is a permanent reservation ticket, we may be able to free * up more space based on the remaining count. @@ -2825,7 +2749,8 @@ xlog_ungrant_log_space(xlog_t *log, xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt); } - xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit"); + trace_xfs_log_ungrant_exit(log, ticket); + xlog_verify_grant_head(log, 1); spin_unlock(&log->l_grant_lock); xfs_log_move_tail(log->l_mp, 1); diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 679c7c4926a..d55662db707 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -19,7 +19,6 @@ #define __XFS_LOG_PRIV_H__ struct xfs_buf; -struct ktrace; struct log; struct xlog_ticket; struct xfs_buf_cancel; @@ -135,6 +134,12 @@ static inline uint xlog_get_client_id(__be32 i) #define XLOG_TIC_INITED 0x1 /* has been initialized */ #define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */ #define XLOG_TIC_IN_Q 0x4 + +#define XLOG_TIC_FLAGS \ + { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \ + { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }, \ + { XLOG_TIC_IN_Q, "XLOG_TIC_IN_Q" } + #endif /* __KERNEL__ */ #define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */ @@ -361,9 +366,6 @@ typedef struct xlog_in_core { int ic_bwritecnt; unsigned short ic_state; char *ic_datap; /* pointer to iclog data */ -#ifdef XFS_LOG_TRACE - struct ktrace *ic_trace; -#endif /* Callback structures need their own cacheline */ spinlock_t ic_callback_lock ____cacheline_aligned_in_smp; @@ -429,10 +431,6 @@ typedef struct log { int l_grant_write_cycle; int l_grant_write_bytes; -#ifdef XFS_LOG_TRACE - struct ktrace *l_grant_trace; -#endif - /* The following field are used for debugging; need to hold icloglock */ #ifdef DEBUG char *l_iclog_bak[XLOG_MAX_ICLOGS]; @@ -456,12 +454,6 @@ extern void xlog_put_bp(struct xfs_buf *); extern kmem_zone_t *xfs_log_ticket_zone; -/* iclog tracing */ -#define XLOG_TRACE_GRAB_FLUSH 1 -#define XLOG_TRACE_REL_FLUSH 2 -#define XLOG_TRACE_SLEEP_FLUSH 3 -#define XLOG_TRACE_WAKE_FLUSH 4 - /* * Unmount record type is used as a pseudo transaction type for the ticket. * It's value must be outside the range of XFS_TRANS_* values. diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 1ec98ed914d..abc2034d83e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -46,6 +46,7 @@ #include "xfs_quota.h" #include "xfs_rw.h" #include "xfs_utils.h" +#include "xfs_trace.h" STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *); STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index bfffd6334ab..eb403b40e12 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -44,6 +44,8 @@ #include "xfs_quota.h" #include "xfs_fsops.h" #include "xfs_utils.h" +#include "xfs_trace.h" + STATIC void xfs_unmountfs_wait(xfs_mount_t *); diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index 3ec91ac74c2..91bfd60f4c7 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h @@ -92,6 +92,14 @@ typedef struct xfs_dqblk { #define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP) +#define XFS_DQ_FLAGS \ + { XFS_DQ_USER, "USER" }, \ + { XFS_DQ_PROJ, "PROJ" }, \ + { XFS_DQ_GROUP, "GROUP" }, \ + { XFS_DQ_DIRTY, "DIRTY" }, \ + { XFS_DQ_WANT, "WANT" }, \ + { XFS_DQ_INACTIVE, "INACTIVE" } + /* * In the worst case, when both user and group quotas are on, * we can have a max of three dquots changing in a single transaction. diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index b81deea0ce1..fc1cda23b81 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c @@ -39,6 +39,7 @@ #include "xfs_utils.h" #include "xfs_trans_space.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" /* diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 385f6dceba5..9e15a118536 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -45,6 +45,7 @@ #include "xfs_inode_item.h" #include "xfs_trans_space.h" #include "xfs_utils.h" +#include "xfs_trace.h" /* diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index 4c199d18f85..5aa07caea5f 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c @@ -44,6 +44,7 @@ #include "xfs_error.h" #include "xfs_buf_item.h" #include "xfs_rw.h" +#include "xfs_trace.h" /* * This is a subroutine for xfs_write() and other writers (xfs_ioctl) @@ -171,7 +172,6 @@ xfs_bioerror( * No need to wait until the buffer is unpinned. * We aren't flushing it. */ - xfs_buftrace("XFS IOERROR", bp); XFS_BUF_ERROR(bp, EIO); /* * We're calling biodone, so delete B_DONE flag. Either way @@ -205,7 +205,6 @@ xfs_bioerror_relse( ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks); ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone); - xfs_buftrace("XFS IOERRELSE", bp); fl = XFS_BUF_BFLAGS(bp); /* * No need to wait until the buffer is unpinned. diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index a0574f593f5..ca64f33c63a 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -100,6 +100,49 @@ typedef struct xfs_trans_header { #define XFS_TRANS_TYPE_MAX 41 /* new transaction types need to be reflected in xfs_logprint(8) */ +#define XFS_TRANS_TYPES \ + { XFS_TRANS_SETATTR_NOT_SIZE, "SETATTR_NOT_SIZE" }, \ + { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \ + { XFS_TRANS_INACTIVE, "INACTIVE" }, \ + { XFS_TRANS_CREATE, "CREATE" }, \ + { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \ + { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \ + { XFS_TRANS_REMOVE, "REMOVE" }, \ + { XFS_TRANS_LINK, "LINK" }, \ + { XFS_TRANS_RENAME, "RENAME" }, \ + { XFS_TRANS_MKDIR, "MKDIR" }, \ + { XFS_TRANS_RMDIR, "RMDIR" }, \ + { XFS_TRANS_SYMLINK, "SYMLINK" }, \ + { XFS_TRANS_SET_DMATTRS, "SET_DMATTRS" }, \ + { XFS_TRANS_GROWFS, "GROWFS" }, \ + { XFS_TRANS_STRAT_WRITE, "STRAT_WRITE" }, \ + { XFS_TRANS_DIOSTRAT, "DIOSTRAT" }, \ + { XFS_TRANS_WRITEID, "WRITEID" }, \ + { XFS_TRANS_ADDAFORK, "ADDAFORK" }, \ + { XFS_TRANS_ATTRINVAL, "ATTRINVAL" }, \ + { XFS_TRANS_ATRUNCATE, "ATRUNCATE" }, \ + { XFS_TRANS_ATTR_SET, "ATTR_SET" }, \ + { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \ + { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \ + { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \ + { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \ + { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \ + { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \ + { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \ + { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \ + { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \ + { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \ + { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \ + { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \ + { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \ + { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \ + { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \ + { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \ + { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \ + { XFS_TRANS_DUMMY1, "DUMMY1" }, \ + { XFS_TRANS_DUMMY2, "DUMMY2" }, \ + { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" } + /* * This structure is used to track log items associated with * a transaction. It points to the log item and keeps some @@ -782,6 +825,10 @@ typedef struct xfs_log_item { #define XFS_LI_IN_AIL 0x1 #define XFS_LI_ABORTED 0x2 +#define XFS_LI_FLAGS \ + { XFS_LI_IN_AIL, "IN_AIL" }, \ + { XFS_LI_ABORTED, "ABORTED" } + typedef struct xfs_item_ops { uint (*iop_size)(xfs_log_item_t *); void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 03a1f701fea..49130628d5e 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -38,6 +38,7 @@ #include "xfs_trans_priv.h" #include "xfs_error.h" #include "xfs_rw.h" +#include "xfs_trace.h" STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *, @@ -95,26 +96,23 @@ xfs_trans_get_buf(xfs_trans_t *tp, } if (bp != NULL) { ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); - if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { - xfs_buftrace("TRANS GET RECUR SHUT", bp); + if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) XFS_BUF_SUPER_STALE(bp); - } + /* * If the buffer is stale then it was binval'ed * since last read. This doesn't matter since the * caller isn't allowed to use the data anyway. */ - else if (XFS_BUF_ISSTALE(bp)) { - xfs_buftrace("TRANS GET RECUR STALE", bp); + else if (XFS_BUF_ISSTALE(bp)) ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); - } + ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_recur++; - xfs_buftrace("TRANS GET RECUR", bp); - xfs_buf_item_trace("GET RECUR", bip); + trace_xfs_trans_get_buf_recur(bip); return (bp); } @@ -166,8 +164,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, */ XFS_BUF_SET_FSPRIVATE2(bp, tp); - xfs_buftrace("TRANS GET", bp); - xfs_buf_item_trace("GET", bip); + trace_xfs_trans_get_buf(bip); return (bp); } @@ -207,7 +204,7 @@ xfs_trans_getsb(xfs_trans_t *tp, ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_recur++; - xfs_buf_item_trace("GETSB RECUR", bip); + trace_xfs_trans_getsb_recur(bip); return (bp); } @@ -249,7 +246,7 @@ xfs_trans_getsb(xfs_trans_t *tp, */ XFS_BUF_SET_FSPRIVATE2(bp, tp); - xfs_buf_item_trace("GETSB", bip); + trace_xfs_trans_getsb(bip); return (bp); } @@ -347,7 +344,7 @@ xfs_trans_read_buf( ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); ASSERT((XFS_BUF_ISERROR(bp)) == 0); if (!(XFS_BUF_ISDONE(bp))) { - xfs_buftrace("READ_BUF_INCORE !DONE", bp); + trace_xfs_trans_read_buf_io(bp, _RET_IP_); ASSERT(!XFS_BUF_ISASYNC(bp)); XFS_BUF_READ(bp); xfsbdstrat(tp->t_mountp, bp); @@ -372,7 +369,7 @@ xfs_trans_read_buf( * brelse it either. Just get out. */ if (XFS_FORCED_SHUTDOWN(mp)) { - xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp); + trace_xfs_trans_read_buf_shut(bp, _RET_IP_); *bpp = NULL; return XFS_ERROR(EIO); } @@ -382,7 +379,7 @@ xfs_trans_read_buf( bip->bli_recur++; ASSERT(atomic_read(&bip->bli_refcount) > 0); - xfs_buf_item_trace("READ RECUR", bip); + trace_xfs_trans_read_buf_recur(bip); *bpp = bp; return 0; } @@ -402,7 +399,6 @@ xfs_trans_read_buf( } if (XFS_BUF_GETERROR(bp) != 0) { XFS_BUF_SUPER_STALE(bp); - xfs_buftrace("READ ERROR", bp); error = XFS_BUF_GETERROR(bp); xfs_ioerror_alert("xfs_trans_read_buf", mp, @@ -461,8 +457,7 @@ xfs_trans_read_buf( */ XFS_BUF_SET_FSPRIVATE2(bp, tp); - xfs_buftrace("TRANS READ", bp); - xfs_buf_item_trace("READ", bip); + trace_xfs_trans_read_buf(bip); *bpp = bp; return 0; @@ -480,7 +475,7 @@ shutdown_abort: ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != (XFS_B_STALE|XFS_B_DELWRI)); - xfs_buftrace("READ_BUF XFSSHUTDN", bp); + trace_xfs_trans_read_buf_shut(bp, _RET_IP_); xfs_buf_relse(bp); *bpp = NULL; return XFS_ERROR(EIO); @@ -546,13 +541,14 @@ xfs_trans_brelse(xfs_trans_t *tp, lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); ASSERT(lidp != NULL); + trace_xfs_trans_brelse(bip); + /* * If the release is just for a recursive lock, * then decrement the count and return. */ if (bip->bli_recur > 0) { bip->bli_recur--; - xfs_buf_item_trace("RELSE RECUR", bip); return; } @@ -560,10 +556,8 @@ xfs_trans_brelse(xfs_trans_t *tp, * If the buffer is dirty within this transaction, we can't * release it until we commit. */ - if (lidp->lid_flags & XFS_LID_DIRTY) { - xfs_buf_item_trace("RELSE DIRTY", bip); + if (lidp->lid_flags & XFS_LID_DIRTY) return; - } /* * If the buffer has been invalidated, then we can't release @@ -571,13 +565,10 @@ xfs_trans_brelse(xfs_trans_t *tp, * as part of this transaction. This prevents us from pulling * the item from the AIL before we should. */ - if (bip->bli_flags & XFS_BLI_STALE) { - xfs_buf_item_trace("RELSE STALE", bip); + if (bip->bli_flags & XFS_BLI_STALE) return; - } ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); - xfs_buf_item_trace("RELSE", bip); /* * Free up the log item descriptor tracking the released item. @@ -674,7 +665,7 @@ xfs_trans_bjoin(xfs_trans_t *tp, */ XFS_BUF_SET_FSPRIVATE2(bp, tp); - xfs_buf_item_trace("BJOIN", bip); + trace_xfs_trans_bjoin(bip); } /* @@ -698,7 +689,7 @@ xfs_trans_bhold(xfs_trans_t *tp, ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_HOLD; - xfs_buf_item_trace("BHOLD", bip); + trace_xfs_trans_bhold(bip); } /* @@ -721,7 +712,8 @@ xfs_trans_bhold_release(xfs_trans_t *tp, ASSERT(atomic_read(&bip->bli_refcount) > 0); ASSERT(bip->bli_flags & XFS_BLI_HOLD); bip->bli_flags &= ~XFS_BLI_HOLD; - xfs_buf_item_trace("BHOLD RELEASE", bip); + + trace_xfs_trans_bhold_release(bip); } /* @@ -767,6 +759,8 @@ xfs_trans_log_buf(xfs_trans_t *tp, XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone; + trace_xfs_trans_log_buf(bip); + /* * If we invalidated the buffer within this transaction, then * cancel the invalidation now that we're dirtying the buffer @@ -774,7 +768,6 @@ xfs_trans_log_buf(xfs_trans_t *tp, * because we have a reference to the buffer this entire time. */ if (bip->bli_flags & XFS_BLI_STALE) { - xfs_buf_item_trace("BLOG UNSTALE", bip); bip->bli_flags &= ~XFS_BLI_STALE; ASSERT(XFS_BUF_ISSTALE(bp)); XFS_BUF_UNSTALE(bp); @@ -789,7 +782,6 @@ xfs_trans_log_buf(xfs_trans_t *tp, lidp->lid_flags &= ~XFS_LID_BUF_STALE; bip->bli_flags |= XFS_BLI_LOGGED; xfs_buf_item_log(bip, first, last); - xfs_buf_item_trace("BLOG", bip); } @@ -828,6 +820,8 @@ xfs_trans_binval( ASSERT(lidp != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); + trace_xfs_trans_binval(bip); + if (bip->bli_flags & XFS_BLI_STALE) { /* * If the buffer is already invalidated, then @@ -840,8 +834,6 @@ xfs_trans_binval( ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); ASSERT(lidp->lid_flags & XFS_LID_DIRTY); ASSERT(tp->t_flags & XFS_TRANS_DIRTY); - xfs_buftrace("XFS_BINVAL RECUR", bp); - xfs_buf_item_trace("BINVAL RECUR", bip); return; } @@ -875,8 +867,6 @@ xfs_trans_binval( (bip->bli_format.blf_map_size * sizeof(uint))); lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE; tp->t_flags |= XFS_TRANS_DIRTY; - xfs_buftrace("XFS_BINVAL", bp); - xfs_buf_item_trace("BINVAL", bip); } /* diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 578f3f59b78..6558ffd8d14 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -53,6 +53,7 @@ #include "xfs_log_priv.h" #include "xfs_filestream.h" #include "xfs_vnodeops.h" +#include "xfs_trace.h" int xfs_setattr( @@ -1397,7 +1398,6 @@ xfs_lookup( if (error) goto out_free_name; - xfs_itrace_ref(*ipp); return 0; out_free_name: @@ -1543,7 +1543,6 @@ xfs_create( * At this point, we've gotten a newly allocated inode. * It is locked (and joined to the transaction). */ - xfs_itrace_ref(ip); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); /* @@ -2003,9 +2002,6 @@ xfs_remove( if (!is_dir && link_zero && xfs_inode_is_filestream(ip)) xfs_filestream_deassociate(ip); - xfs_itrace_exit(ip); - xfs_itrace_exit(dp); - std_return: if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL, @@ -2302,7 +2298,6 @@ xfs_symlink( goto error_return; goto error1; } - xfs_itrace_ref(ip); /* * An error after we've joined dp to the transaction will result in the @@ -2845,7 +2840,6 @@ xfs_free_file_space( ioffset = offset & ~(rounding - 1); if (VN_CACHED(VFS_I(ip)) != 0) { - xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1); error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); if (error) goto out_unlock_iolock; |