aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--mm/swap_state.c4
2 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b6c4d8d26b..7334b2b6c4c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -299,14 +299,14 @@ extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
extern void unlock_page(struct page *page);
-static inline void set_page_locked(struct page *page)
+static inline void __set_page_locked(struct page *page)
{
- set_bit(PG_locked, &page->flags);
+ __set_bit(PG_locked, &page->flags);
}
-static inline void clear_page_locked(struct page *page)
+static inline void __clear_page_locked(struct page *page)
{
- clear_bit(PG_locked, &page->flags);
+ __clear_bit(PG_locked, &page->flags);
}
static inline int trylock_page(struct page *page)
@@ -438,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run set_page_locked() against it.
+ * the page is new, so we can just run __set_page_locked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
- set_page_locked(page);
+ __set_page_locked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
- clear_page_locked(page);
+ __clear_page_locked(page);
return error;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 43cda7b4b80..3353c9029ce 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -303,7 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* re-using the just freed swap entry for an existing page.
* May fail (-ENOMEM) if radix-tree node allocation failed.
*/
- set_page_locked(new_page);
+ __set_page_locked(new_page);
SetPageSwapBacked(new_page);
err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
if (likely(!err)) {
@@ -315,7 +315,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return new_page;
}
ClearPageSwapBacked(new_page);
- clear_page_locked(new_page);
+ __clear_page_locked(new_page);
swap_free(entry);
} while (err != -ENOMEM);