diff options
author | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-07-27 13:54:08 +0200 |
---|---|---|
committer | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-07-27 13:54:08 +0200 |
commit | eda3d8f5604860aae1bb9996bb5efc4213778369 (patch) | |
tree | 9d3887d2665bcc5f5abf200758794545c7b2c69b /include/linux/mm.h | |
parent | 87a9f704658a40940e740b1d73d861667e9164d3 (diff) | |
parent | 8be1a6d6c77ab4532e4476fdb8177030ef48b52c (diff) |
Merge commit 'upstream/master'
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 64 |
1 files changed, 48 insertions, 16 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 2128ef7780c..6e695eaab4c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -41,6 +41,9 @@ extern unsigned long mmap_min_addr; #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way @@ -100,6 +103,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ +#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ @@ -166,12 +170,16 @@ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); - unsigned long (*nopfn)(struct vm_area_struct *area, - unsigned long address); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); + + /* called by access_process_vm when get_user_pages() fails, typically + * for use by special VMAs that can switch between memory and hardware + */ + int (*access)(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); #ifdef CONFIG_NUMA /* * set_policy() op must add a reference to any non-NULL @new mempolicy @@ -675,13 +683,6 @@ static inline int page_mapped(struct page *page) } /* - * Error return values for the *_nopfn functions - */ -#define NOPFN_SIGBUS ((unsigned long) -1) -#define NOPFN_OOM ((unsigned long) -2) -#define NOPFN_REFAULT ((unsigned long) -3) - -/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. @@ -772,14 +773,14 @@ struct mm_walk { int walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk); -void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, +void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); -void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, - unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); static inline void unmap_shared_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen) @@ -809,7 +810,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); -void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); @@ -832,6 +832,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); +#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST +/* + * get_user_pages_fast provides equivalent functionality to get_user_pages, + * operating on current and current->mm (force=0 and doesn't return any vmas). + * + * get_user_pages_fast may take mmap_sem and page tables, so no assumptions + * can be made about locking. get_user_pages_fast is to be implemented in a + * way that is advantageous (vs get_user_pages()) when the user memory area is + * already faulted in and present in ptes. However if the pages have to be + * faulted in, it may turn out to be slightly slower). + */ +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); + +#else +/* + * Should probably be moved to asm-generic, and architectures can include it if + * they don't implement their own get_user_pages_fast. + */ +#define get_user_pages_fast(start, nr_pages, write, pages) \ +({ \ + struct mm_struct *mm = current->mm; \ + int ret; \ + \ + down_read(&mm->mmap_sem); \ + ret = get_user_pages(current, mm, start, nr_pages, \ + write, 0, pages, NULL); \ + up_read(&mm->mmap_sem); \ + \ + ret; \ +}) +#endif + /* * A callback you can register to apply pressure to ageable caches. * @@ -965,9 +998,8 @@ static inline void pgtable_page_dtor(struct page *page) NULL: pte_offset_kernel(pmd, address)) extern void free_area_init(unsigned long * zones_size); -extern void free_area_init_node(int nid, pg_data_t *pgdat, - unsigned long * zones_size, unsigned long zone_start_pfn, - unsigned long *zholes_size); +extern void free_area_init_node(int nid, unsigned long * zones_size, + unsigned long zone_start_pfn, unsigned long *zholes_size); #ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its |