diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 47 | ||||
-rw-r--r-- | mm/shmem.c | 43 |
2 files changed, 26 insertions, 64 deletions
diff --git a/mm/memory.c b/mm/memory.c index 1b8ca160f1d..1d803c2d018 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1998,45 +1998,26 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) */ void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma) { -#ifdef CONFIG_NUMA - struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL; -#endif - int i, num; - struct page *new_page; + int nr_pages; + struct page *page; unsigned long offset; + unsigned long end_offset; /* - * Get the number of handles we should do readahead io to. + * Get starting offset for readaround, and number of pages to read. + * Adjust starting address by readbehind (for NUMA interleave case)? + * No, it's very unlikely that swap layout would follow vma layout, + * more likely that neighbouring swap pages came from the same node: + * so use the same "addr" to choose the same node for each swap read. */ - num = valid_swaphandles(entry, &offset); - for (i = 0; i < num; offset++, i++) { + nr_pages = valid_swaphandles(entry, &offset); + for (end_offset = offset + nr_pages; offset < end_offset; offset++) { /* Ok, do the async read-ahead now */ - new_page = read_swap_cache_async(swp_entry(swp_type(entry), - offset), vma, addr); - if (!new_page) + page = read_swap_cache_async(swp_entry(swp_type(entry), offset), + vma, addr); + if (!page) break; - page_cache_release(new_page); -#ifdef CONFIG_NUMA - /* - * Find the next applicable VMA for the NUMA policy. - */ - addr += PAGE_SIZE; - if (addr == 0) - vma = NULL; - if (vma) { - if (addr >= vma->vm_end) { - vma = next_vma; - next_vma = vma ? vma->vm_next : NULL; - } - if (vma && addr < vma->vm_start) - vma = NULL; - } else { - if (next_vma && addr >= next_vma->vm_start) { - vma = next_vma; - next_vma = vma->vm_next; - } - } -#endif + page_cache_release(page); } lru_add_drain(); /* Push any new pages onto the LRU now */ } diff --git a/mm/shmem.c b/mm/shmem.c index 51b3d6ccdda..88c6685f16b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1025,53 +1025,34 @@ out: return err; } -static struct page *shmem_swapin_async(struct shared_policy *p, +static struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, unsigned long idx) { - struct page *page; struct vm_area_struct pvma; + struct page *page; /* Create a pseudo vma that just contains the policy */ - memset(&pvma, 0, sizeof(struct vm_area_struct)); - pvma.vm_end = PAGE_SIZE; + pvma.vm_start = 0; pvma.vm_pgoff = idx; - pvma.vm_policy = mpol_shared_policy_lookup(p, idx); + pvma.vm_ops = NULL; + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); + swapin_readahead(entry, 0, &pvma); page = read_swap_cache_async(entry, &pvma, 0); mpol_free(pvma.vm_policy); return page; } -static struct page *shmem_swapin(struct shmem_inode_info *info, - swp_entry_t entry, unsigned long idx) -{ - struct shared_policy *p = &info->policy; - int i, num; - struct page *page; - unsigned long offset; - - num = valid_swaphandles(entry, &offset); - for (i = 0; i < num; offset++, i++) { - page = shmem_swapin_async(p, - swp_entry(swp_type(entry), offset), idx); - if (!page) - break; - page_cache_release(page); - } - lru_add_drain(); /* Push any new pages onto the LRU now */ - return shmem_swapin_async(p, entry, idx); -} - -static struct page * -shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, - unsigned long idx) +static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, + unsigned long idx) { struct vm_area_struct pvma; struct page *page; - memset(&pvma, 0, sizeof(struct vm_area_struct)); - pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); + /* Create a pseudo vma that just contains the policy */ + pvma.vm_start = 0; pvma.vm_pgoff = idx; - pvma.vm_end = PAGE_SIZE; + pvma.vm_ops = NULL; + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); page = alloc_page_vma(gfp, &pvma, 0); mpol_free(pvma.vm_policy); return page; |