aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/highmem.c4
-rw-r--r--mm/internal.h2
-rw-r--r--mm/memory.c3
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c16
-rw-r--r--mm/swap.c10
7 files changed, 24 insertions, 23 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 96920f84056..81fb9bff0d4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -527,7 +527,7 @@ static inline void wake_up_page(struct page *page, int bit)
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
-void fastcall wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
@@ -551,7 +551,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
* parallel wait_on_page_locked()).
*/
-void fastcall unlock_page(struct page *page)
+void unlock_page(struct page *page)
{
smp_mb__before_clear_bit();
if (!TestClearPageLocked(page))
@@ -585,7 +585,7 @@ EXPORT_SYMBOL(end_page_writeback);
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
-void fastcall __lock_page(struct page *page)
+void __lock_page(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
@@ -606,7 +606,7 @@ int fastcall __lock_page_killable(struct page *page)
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
*/
-void fastcall __lock_page_nosync(struct page *page)
+void __lock_page_nosync(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -1276,7 +1276,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
-static int fastcall page_cache_read(struct file * file, pgoff_t offset)
+static int page_cache_read(struct file *file, pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
diff --git a/mm/highmem.c b/mm/highmem.c
index 7a967bc3515..35d47733cde 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -163,7 +163,7 @@ start:
return vaddr;
}
-void fastcall *kmap_high(struct page *page)
+void *kmap_high(struct page *page)
{
unsigned long vaddr;
@@ -185,7 +185,7 @@ void fastcall *kmap_high(struct page *page)
EXPORT_SYMBOL(kmap_high);
-void fastcall kunmap_high(struct page *page)
+void kunmap_high(struct page *page)
{
unsigned long vaddr;
unsigned long nr;
diff --git a/mm/internal.h b/mm/internal.h
index 953f941ea86..1e34d2462a4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -34,7 +34,7 @@ static inline void __put_page(struct page *page)
atomic_dec(&page->_count);
}
-extern void fastcall __init __free_pages_bootmem(struct page *page,
+extern void __init __free_pages_bootmem(struct page *page,
unsigned int order);
/*
diff --git a/mm/memory.c b/mm/memory.c
index 1c81fc2174c..6a9c048f601 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1109,7 +1109,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages);
-pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
+pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
pud_t * pud = pud_alloc(mm, pgd, addr);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c689b60af00..a4ca162666c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1073,7 +1073,7 @@ static int __set_page_dirty(struct page *page)
return 0;
}
-int fastcall set_page_dirty(struct page *page)
+int set_page_dirty(struct page *page)
{
int ret = __set_page_dirty(page);
if (ret)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 55fe57cd99a..d73c133fdbe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -537,7 +537,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
/*
* permit the bootmem allocator to evade page validation on high-order frees
*/
-void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
+void __init __free_pages_bootmem(struct page *page, unsigned int order)
{
if (order == 0) {
__ClearPageReserved(page);
@@ -974,7 +974,7 @@ void mark_free_pages(struct zone *zone)
/*
* Free a 0-order page
*/
-static void fastcall free_hot_cold_page(struct page *page, int cold)
+static void free_hot_cold_page(struct page *page, int cold)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
@@ -1007,12 +1007,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
put_cpu();
}
-void fastcall free_hot_page(struct page *page)
+void free_hot_page(struct page *page)
{
free_hot_cold_page(page, 0);
}
-void fastcall free_cold_page(struct page *page)
+void free_cold_page(struct page *page)
{
free_hot_cold_page(page, 1);
}
@@ -1641,7 +1641,7 @@ EXPORT_SYMBOL(__alloc_pages);
/*
* Common helper functions.
*/
-fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
+unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
struct page * page;
page = alloc_pages(gfp_mask, order);
@@ -1652,7 +1652,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
EXPORT_SYMBOL(__get_free_pages);
-fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
+unsigned long get_zeroed_page(gfp_t gfp_mask)
{
struct page * page;
@@ -1678,7 +1678,7 @@ void __pagevec_free(struct pagevec *pvec)
free_hot_cold_page(pvec->pages[i], pvec->cold);
}
-fastcall void __free_pages(struct page *page, unsigned int order)
+void __free_pages(struct page *page, unsigned int order)
{
if (put_page_testzero(page)) {
if (order == 0)
@@ -1690,7 +1690,7 @@ fastcall void __free_pages(struct page *page, unsigned int order)
EXPORT_SYMBOL(__free_pages);
-fastcall void free_pages(unsigned long addr, unsigned int order)
+void free_pages(unsigned long addr, unsigned int order)
{
if (addr != 0) {
VM_BUG_ON(!virt_addr_valid((void *)addr));
diff --git a/mm/swap.c b/mm/swap.c
index 9ac88323d23..57b7e25a939 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -41,7 +41,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, };
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
*/
-static void fastcall __page_cache_release(struct page *page)
+static void __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
unsigned long flags;
@@ -165,7 +165,7 @@ int rotate_reclaimable_page(struct page *page)
/*
* FIXME: speed this up?
*/
-void fastcall activate_page(struct page *page)
+void activate_page(struct page *page)
{
struct zone *zone = page_zone(page);
@@ -186,7 +186,7 @@ void fastcall activate_page(struct page *page)
* inactive,referenced -> active,unreferenced
* active,unreferenced -> active,referenced
*/
-void fastcall mark_page_accessed(struct page *page)
+void mark_page_accessed(struct page *page)
{
if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
activate_page(page);
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(mark_page_accessed);
* lru_cache_add: add a page to the page lists
* @page: the page to add
*/
-void fastcall lru_cache_add(struct page *page)
+void lru_cache_add(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
@@ -212,7 +212,7 @@ void fastcall lru_cache_add(struct page *page)
put_cpu_var(lru_add_pvecs);
}
-void fastcall lru_cache_add_active(struct page *page)
+void lru_cache_add_active(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);