aboutsummaryrefslogtreecommitdiff
path: root/arch/frv/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/frv/mm')
-rw-r--r--arch/frv/mm/dma-alloc.c4
-rw-r--r--arch/frv/mm/init.c6
-rw-r--r--arch/frv/mm/mmu-context.c6
3 files changed, 7 insertions, 9 deletions
diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c
index 342823aad75..636b2f8b5d9 100644
--- a/arch/frv/mm/dma-alloc.c
+++ b/arch/frv/mm/dma-alloc.c
@@ -115,9 +115,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
*/
if (order > 0) {
struct page *rpage = virt_to_page(page);
-
- for (i = 1; i < (1 << order); i++)
- set_page_count(rpage + i, 1);
+ split_page(rpage, order);
}
err = 0;
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index 765088ea8a5..8899aa1a4f0 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -169,7 +169,7 @@ void __init mem_init(void)
struct page *page = &mem_map[pfn];
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalram_pages++;
}
@@ -210,7 +210,7 @@ void __init free_initmem(void)
/* next to check that the page we free is not a partial page */
for (addr = start; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
@@ -230,7 +230,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
int pages = 0;
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
pages++;
diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c
index f2c6866fc88..1530a4111e6 100644
--- a/arch/frv/mm/mmu-context.c
+++ b/arch/frv/mm/mmu-context.c
@@ -54,9 +54,9 @@ static unsigned get_cxn(mm_context_t *ctx)
/* find the first unallocated context number
* - 0 is reserved for the kernel
*/
- cxn = find_next_zero_bit(&cxn_bitmap, NR_CXN, 1);
+ cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1);
if (cxn < NR_CXN) {
- set_bit(cxn, &cxn_bitmap);
+ set_bit(cxn, cxn_bitmap);
}
else {
/* none remaining - need to steal someone else's cxn */
@@ -138,7 +138,7 @@ void destroy_context(struct mm_struct *mm)
cxn_pinned = -1;
list_del_init(&ctx->id_link);
- clear_bit(ctx->id, &cxn_bitmap);
+ clear_bit(ctx->id, cxn_bitmap);
__flush_tlb_mm(ctx->id);
ctx->id = 0;
}