aboutsummaryrefslogtreecommitdiff
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
authorBen Dooks <ben-linux@fluff.org>2008-10-21 11:12:00 +0100
committerBen Dooks <ben-linux@fluff.org>2008-10-21 11:12:00 +0100
commitc3380942e6699ed5b3e3f37b49ceb724b7699813 (patch)
tree516d791cdfd1c7f99a29f71e1a7bba8f75af96e1 /include/linux/pagemap.h
parent0ffda6cca2c66e42d0ad65719f58c637ed180b05 (diff)
parent2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into s3c64xx
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h44
1 files changed, 36 insertions, 8 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 5da31c12101..709742be02f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -32,6 +32,34 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
}
}
+#ifdef CONFIG_UNEVICTABLE_LRU
+#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
+
+static inline void mapping_set_unevictable(struct address_space *mapping)
+{
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline void mapping_clear_unevictable(struct address_space *mapping)
+{
+ clear_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+ if (likely(mapping))
+ return test_bit(AS_UNEVICTABLE, &mapping->flags);
+ return !!mapping;
+}
+#else
+static inline void mapping_set_unevictable(struct address_space *mapping) { }
+static inline void mapping_clear_unevictable(struct address_space *mapping) { }
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+ return 0;
+}
+#endif
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
@@ -271,19 +299,19 @@ extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
extern void unlock_page(struct page *page);
-static inline void set_page_locked(struct page *page)
+static inline void __set_page_locked(struct page *page)
{
- set_bit(PG_locked, &page->flags);
+ __set_bit(PG_locked, &page->flags);
}
-static inline void clear_page_locked(struct page *page)
+static inline void __clear_page_locked(struct page *page)
{
- clear_bit(PG_locked, &page->flags);
+ __clear_bit(PG_locked, &page->flags);
}
static inline int trylock_page(struct page *page)
{
- return !test_and_set_bit(PG_locked, &page->flags);
+ return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
}
/*
@@ -410,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run set_page_locked() against it.
+ * the page is new, so we can just run __set_page_locked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
- set_page_locked(page);
+ __set_page_locked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
- clear_page_locked(page);
+ __clear_page_locked(page);
return error;
}