diff options
author | Nathan Scott <nathans@sgi.com> | 2006-09-28 11:03:05 +1000 |
---|---|---|
committer | Tim Shimmin <tes@sgi.com> | 2006-09-28 11:03:05 +1000 |
commit | efb8ad7e9431a430a75d44288614cf6047ff4baa (patch) | |
tree | cbebf7d8bf1b5f25235756c0aa7ff610d4cb4055 /fs/xfs | |
parent | 3f89243c5b987dd55f8eec6fd54be05887d69bc6 (diff) |
[XFS] Add a debug flag for allocations which are known to be larger than
one page.
SGI-PV: 955302
SGI-Modid: xfs-linux-melb:xfs-kern:26800a
Signed-off-by: Nathan Scott <nathans@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/linux-2.6/kmem.c | 8 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/kmem.h | 3 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 2 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm.c | 6 | ||||
-rw-r--r-- | fs/xfs/support/ktrace.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_iget.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_log.c | 2 |
7 files changed, 18 insertions, 9 deletions
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c index aba7fcf881a..f77fe5c8fcc 100644 --- a/fs/xfs/linux-2.6/kmem.c +++ b/fs/xfs/linux-2.6/kmem.c @@ -34,6 +34,14 @@ kmem_alloc(size_t size, unsigned int __nocast flags) gfp_t lflags = kmem_flags_convert(flags); void *ptr; +#ifdef DEBUG + if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) { + printk(KERN_WARNING "Large %s attempt, size=%ld\n", + __FUNCTION__, (long)size); + dump_stack(); + } +#endif + do { if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS) ptr = kmalloc(size, lflags); diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index 0e8293c5a32..6d24274fb3c 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h @@ -30,6 +30,7 @@ #define KM_NOSLEEP 0x0002u #define KM_NOFS 0x0004u #define KM_MAYFAIL 0x0008u +#define KM_LARGE 0x0010u /* * We use a special process flag to avoid recursive callbacks into @@ -41,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags) { gfp_t lflags; - BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); + BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE)); if (flags & KM_NOSLEEP) { lflags = GFP_ATOMIC | __GFP_NOWARN; diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 247adced6e1..58b6599de61 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -768,7 +768,7 @@ xfs_buf_get_noaddr( _xfs_buf_initialize(bp, target, 0, len, 0); try_again: - data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); + data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE); if (unlikely(data == NULL)) goto fail_free_buf; diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index e23e45535c4..3f86c7c0464 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -112,17 +112,17 @@ xfs_Gqm_init(void) { xfs_dqhash_t *udqhash, *gdqhash; xfs_qm_t *xqm; - uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL; + uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE; /* * Initialize the dquot hash tables. */ hsize = XFS_QM_HASHSIZE_HIGH; - while (!(udqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), flags))) { + while (!(udqhash = kmem_zalloc(hsize * sizeof(*udqhash), flags))) { if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW) flags = KM_SLEEP; } - gdqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), KM_SLEEP); + gdqhash = kmem_zalloc(hsize * sizeof(*gdqhash), KM_SLEEP | KM_LARGE); ndquot = hsize << 8; xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c index addf5a7ea06..5cf2e86caa7 100644 --- a/fs/xfs/support/ktrace.c +++ b/fs/xfs/support/ktrace.c @@ -75,7 +75,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) sleep); } else { ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), - sleep); + sleep | KM_LARGE); } if (ktep == NULL) { diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 109000a4bc8..30eebc2fd90 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -50,7 +50,7 @@ void xfs_ihash_init(xfs_mount_t *mp) { __uint64_t icount; - uint i, flags = KM_SLEEP | KM_MAYFAIL; + uint i, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE; if (!mp->m_ihsize) { icount = mp->m_maxicount ? mp->m_maxicount : @@ -95,7 +95,7 @@ xfs_chash_init(xfs_mount_t *mp) mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize); mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize * sizeof(xfs_chash_t), - KM_SLEEP); + KM_SLEEP | KM_LARGE); for (i = 0; i < mp->m_chsize; i++) { spinlock_init(&mp->m_chash[i].ch_lock,"xfshash"); } diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 2a46919110f..ac999789a44 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1196,7 +1196,7 @@ xlog_alloc_log(xfs_mount_t *mp, kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP); iclog = *iclogp; iclog->hic_data = (xlog_in_core_2_t *) - kmem_zalloc(iclogsize, KM_SLEEP); + kmem_zalloc(iclogsize, KM_SLEEP | KM_LARGE); iclog->ic_prev = prev_iclog; prev_iclog = iclog; |