aboutsummaryrefslogtreecommitdiff
path: root/mm/sparse.c
blob: f888385b9e14ef46438d79974c24fdbc873af820 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
/*
 * sparse memory mappings.
 */
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <asm/dma.h>

/*
 * Permanent SPARSEMEM data:
 *
 * 1) mem_section	- memory sections, mem_map's for valid memory
 */
struct mem_section mem_section[NR_MEM_SECTIONS];
EXPORT_SYMBOL(mem_section);

/* Record a memory area against a node. */
void memory_present(int nid, unsigned long start, unsigned long end)
{
	unsigned long pfn;

	start &= PAGE_SECTION_MASK;
	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
		unsigned long section = pfn_to_section_nr(pfn);
		if (!mem_section[section].section_mem_map)
			mem_section[section].section_mem_map = (void *) -1;
	}
}

/*
 * Only used by the i386 NUMA architecures, but relatively
 * generic code.
 */
unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
						     unsigned long end_pfn)
{
	unsigned long pfn;
	unsigned long nr_pages = 0;

	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
		if (nid != early_pfn_to_nid(pfn))
			continue;

		if (pfn_valid(pfn))
			nr_pages += PAGES_PER_SECTION;
	}

	return nr_pages * sizeof(struct page);
}

/*
 * Allocate the accumulated non-linear sections, allocate a mem_map
 * for each and record the physical to section mapping.
 */
void sparse_init(void)
{
	unsigned long pnum;
	struct page *map;
	int nid;

	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
		if (!mem_section[pnum].section_mem_map)
			continue;

		nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
		map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
		if (!map)
			map = alloc_bootmem_node(NODE_DATA(nid),
				sizeof(struct page) * PAGES_PER_SECTION);
		if (!map) {
			mem_section[pnum].section_mem_map = 0;
			continue;
		}

		/*
		 * Subtle, we encode the real pfn into the mem_map such that
		 * the identity pfn - section_mem_map will return the actual
		 * physical page frame number.
		 */
		mem_section[pnum].section_mem_map = map -
						section_nr_to_pfn(pnum);
	}
}