From d9b2b2a277219d4812311d995054ce4f95067725 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 13 Feb 2008 16:56:49 -0800 Subject: [LIB]: Make PowerPC LMB code generic so sparc64 can use it too. Signed-off-by: David S. Miller --- lib/Kconfig | 3 + lib/Makefile | 2 + lib/lmb.c | 354 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 359 insertions(+) create mode 100644 lib/lmb.c (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index ba3d104994d..2d53dc092e8 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -141,4 +141,7 @@ config HAS_DMA config CHECK_SIGNATURE bool +config HAVE_LMB + boolean + endmenu diff --git a/lib/Makefile b/lib/Makefile index 23de261a4c8..61bba16a0a2 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -70,6 +70,8 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o lib-$(CONFIG_GENERIC_BUG) += bug.o +obj-$(CONFIG_HAVE_LMB) += lmb.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/lmb.c b/lib/lmb.c new file mode 100644 index 00000000000..98078b4ec20 --- /dev/null +++ b/lib/lmb.c @@ -0,0 +1,354 @@ +/* + * Procedures for maintaining information about logical memory blocks. + * + * Peter Bergner, IBM Corp. June 2001. + * Copyright (C) 2001 Peter Bergner. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include + +#undef DEBUG + +#ifdef DEBUG +#define DBG(fmt...) LMB_DBG(fmt) +#else +#define DBG(fmt...) +#endif + +#define LMB_ALLOC_ANYWHERE 0 + +struct lmb lmb; + +void lmb_dump_all(void) +{ +#ifdef DEBUG + unsigned long i; + + DBG("lmb_dump_all:\n"); + DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); + DBG(" memory.size = 0x%lx\n", lmb.memory.size); + for (i=0; i < lmb.memory.cnt ;i++) { + DBG(" memory.region[0x%x].base = 0x%lx\n", + i, lmb.memory.region[i].base); + DBG(" .size = 0x%lx\n", + lmb.memory.region[i].size); + } + + DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); + DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); + for (i=0; i < lmb.reserved.cnt ;i++) { + DBG(" reserved.region[0x%x].base = 0x%lx\n", + i, lmb.reserved.region[i].base); + DBG(" .size = 0x%lx\n", + lmb.reserved.region[i].size); + } +#endif /* DEBUG */ +} + +static unsigned long __init lmb_addrs_overlap(unsigned long base1, + unsigned long size1, unsigned long base2, unsigned long size2) +{ + return ((base1 < (base2+size2)) && (base2 < (base1+size1))); +} + +static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, + unsigned long base2, unsigned long size2) +{ + if (base2 == base1 + size1) + return 1; + else if (base1 == base2 + size2) + return -1; + + return 0; +} + +static long __init lmb_regions_adjacent(struct lmb_region *rgn, + unsigned long r1, unsigned long r2) +{ + unsigned long base1 = rgn->region[r1].base; + unsigned long size1 = rgn->region[r1].size; + unsigned long base2 = rgn->region[r2].base; + unsigned long size2 = rgn->region[r2].size; + + return lmb_addrs_adjacent(base1, size1, base2, size2); +} + +static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) +{ + unsigned long i; + + for (i = r; i < rgn->cnt - 1; i++) { + rgn->region[i].base = rgn->region[i + 1].base; + rgn->region[i].size = rgn->region[i + 1].size; + } + rgn->cnt--; +} + +/* Assumption: base addr of region 1 < base addr of region 2 */ +static void __init lmb_coalesce_regions(struct lmb_region *rgn, + unsigned long r1, unsigned long r2) +{ + rgn->region[r1].size += rgn->region[r2].size; + lmb_remove_region(rgn, r2); +} + +/* This routine called with relocation disabled. */ +void __init lmb_init(void) +{ + /* Create a dummy zero size LMB which will get coalesced away later. + * This simplifies the lmb_add() code below... + */ + lmb.memory.region[0].base = 0; + lmb.memory.region[0].size = 0; + lmb.memory.cnt = 1; + + /* Ditto. */ + lmb.reserved.region[0].base = 0; + lmb.reserved.region[0].size = 0; + lmb.reserved.cnt = 1; +} + +/* This routine may be called with relocation disabled. */ +void __init lmb_analyze(void) +{ + int i; + + lmb.memory.size = 0; + + for (i = 0; i < lmb.memory.cnt; i++) + lmb.memory.size += lmb.memory.region[i].size; +} + +/* This routine called with relocation disabled. */ +static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, + unsigned long size) +{ + unsigned long coalesced = 0; + long adjacent, i; + + /* First try and coalesce this LMB with another. */ + for (i=0; i < rgn->cnt; i++) { + unsigned long rgnbase = rgn->region[i].base; + unsigned long rgnsize = rgn->region[i].size; + + if ((rgnbase == base) && (rgnsize == size)) + /* Already have this region, so we're done */ + return 0; + + adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); + if ( adjacent > 0 ) { + rgn->region[i].base -= size; + rgn->region[i].size += size; + coalesced++; + break; + } + else if ( adjacent < 0 ) { + rgn->region[i].size += size; + coalesced++; + break; + } + } + + if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { + lmb_coalesce_regions(rgn, i, i+1); + coalesced++; + } + + if (coalesced) + return coalesced; + if (rgn->cnt >= MAX_LMB_REGIONS) + return -1; + + /* Couldn't coalesce the LMB, so add it to the sorted table. */ + for (i = rgn->cnt-1; i >= 0; i--) { + if (base < rgn->region[i].base) { + rgn->region[i+1].base = rgn->region[i].base; + rgn->region[i+1].size = rgn->region[i].size; + } else { + rgn->region[i+1].base = base; + rgn->region[i+1].size = size; + break; + } + } + rgn->cnt++; + + return 0; +} + +/* This routine may be called with relocation disabled. */ +long __init lmb_add(unsigned long base, unsigned long size) +{ + struct lmb_region *_rgn = &(lmb.memory); + + /* On pSeries LPAR systems, the first LMB is our RMO region. */ + if (base == 0) + lmb.rmo_size = size; + + return lmb_add_region(_rgn, base, size); + +} + +long __init lmb_reserve(unsigned long base, unsigned long size) +{ + struct lmb_region *_rgn = &(lmb.reserved); + + BUG_ON(0 == size); + + return lmb_add_region(_rgn, base, size); +} + +long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, + unsigned long size) +{ + unsigned long i; + + for (i=0; i < rgn->cnt; i++) { + unsigned long rgnbase = rgn->region[i].base; + unsigned long rgnsize = rgn->region[i].size; + if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { + break; + } + } + + return (i < rgn->cnt) ? i : -1; +} + +unsigned long __init lmb_alloc(unsigned long size, unsigned long align) +{ + return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); +} + +unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, + unsigned long max_addr) +{ + unsigned long alloc; + + alloc = __lmb_alloc_base(size, align, max_addr); + + if (alloc == 0) + panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", + size, max_addr); + + return alloc; +} + +static unsigned long lmb_align_down(unsigned long addr, unsigned long size) +{ + return addr & ~(size - 1); +} + +unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, + unsigned long max_addr) +{ + long i, j; + unsigned long base = 0; + + BUG_ON(0 == size); + + /* On some platforms, make sure we allocate lowmem */ + if (max_addr == LMB_ALLOC_ANYWHERE) + max_addr = LMB_REAL_LIMIT; + + for (i = lmb.memory.cnt-1; i >= 0; i--) { + unsigned long lmbbase = lmb.memory.region[i].base; + unsigned long lmbsize = lmb.memory.region[i].size; + + if (max_addr == LMB_ALLOC_ANYWHERE) + base = lmb_align_down(lmbbase + lmbsize - size, align); + else if (lmbbase < max_addr) { + base = min(lmbbase + lmbsize, max_addr); + base = lmb_align_down(base - size, align); + } else + continue; + + while ((lmbbase <= base) && + ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) ) + base = lmb_align_down(lmb.reserved.region[j].base - size, + align); + + if ((base != 0) && (lmbbase <= base)) + break; + } + + if (i < 0) + return 0; + + lmb_add_region(&lmb.reserved, base, size); + + return base; +} + +/* You must call lmb_analyze() before this. */ +unsigned long __init lmb_phys_mem_size(void) +{ + return lmb.memory.size; +} + +unsigned long __init lmb_end_of_DRAM(void) +{ + int idx = lmb.memory.cnt - 1; + + return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); +} + +/* You must call lmb_analyze() after this. */ +void __init lmb_enforce_memory_limit(unsigned long memory_limit) +{ + unsigned long i, limit; + struct lmb_property *p; + + if (! memory_limit) + return; + + /* Truncate the lmb regions to satisfy the memory limit. */ + limit = memory_limit; + for (i = 0; i < lmb.memory.cnt; i++) { + if (limit > lmb.memory.region[i].size) { + limit -= lmb.memory.region[i].size; + continue; + } + + lmb.memory.region[i].size = limit; + lmb.memory.cnt = i + 1; + break; + } + + if (lmb.memory.region[0].size < lmb.rmo_size) + lmb.rmo_size = lmb.memory.region[0].size; + + /* And truncate any reserves above the limit also. */ + for (i = 0; i < lmb.reserved.cnt; i++) { + p = &lmb.reserved.region[i]; + + if (p->base > memory_limit) + p->size = 0; + else if ((p->base + p->size) > memory_limit) + p->size = memory_limit - p->base; + + if (p->size == 0) { + lmb_remove_region(&lmb.reserved, i); + i--; + } + } +} + +int __init lmb_is_reserved(unsigned long addr) +{ + int i; + + for (i = 0; i < lmb.reserved.cnt; i++) { + unsigned long upper = lmb.reserved.region[i].base + + lmb.reserved.region[i].size - 1; + if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) + return 1; + } + return 0; +} -- cgit v1.2.3 From eea89e13a9c61d3928223d2f9bf2295e22e0efb6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 13 Feb 2008 16:57:09 -0800 Subject: [LMB]: Fix bug in __lmb_alloc_base(). We need to check lmb_add_region() for errors, it can run out of regions etc. Also, the size needs to be padded to the given alignment or else the lmb.reserved regions don't get expanded and instead we get tons of holes and eventually run out of regions prematurely. Signed-off-by: David S. Miller --- lib/lmb.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/lmb.c b/lib/lmb.c index 98078b4ec20..6390d63a2a0 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -245,6 +245,11 @@ static unsigned long lmb_align_down(unsigned long addr, unsigned long size) return addr & ~(size - 1); } +static unsigned long lmb_align_up(unsigned long addr, unsigned long size) +{ + return (addr + (size - 1)) & ~(size - 1); +} + unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr) { @@ -281,7 +286,8 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, if (i < 0) return 0; - lmb_add_region(&lmb.reserved, base, size); + if (lmb_add_region(&lmb.reserved, base, lmb_align_up(size, align)) < 0) + return 0; return base; } -- cgit v1.2.3 From 27e6672bb9912d3e3a41cf88e6142d3ae5e534b3 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 13 Feb 2008 16:58:11 -0800 Subject: [LMB]: Fix initial lmb add region with a non-zero base If we add to an empty lmb region with a non-zero base we will not coalesce the number of regions down to one. This causes problems on ppc32 for the memory region as its assumed to only have one region. We can fix this be easily specially casing the initial add to just replace the dummy region. Signed-off-by: Kumar Gala Signed-off-by: David S. Miller --- lib/lmb.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'lib') diff --git a/lib/lmb.c b/lib/lmb.c index 6390d63a2a0..e34a9e586c4 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -134,6 +134,12 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long coalesced = 0; long adjacent, i; + if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { + rgn->region[0].base = base; + rgn->region[0].size = size; + return 0; + } + /* First try and coalesce this LMB with another. */ for (i=0; i < rgn->cnt; i++) { unsigned long rgnbase = rgn->region[i].base; -- cgit v1.2.3 From e5f270954364a4add74e8445b1db925ac534fcfb Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Wed, 13 Feb 2008 16:58:39 -0800 Subject: [LMB]: Make lmb support large physical addressing Convert the lmb code to use u64 instead of unsigned long for physical addresses and sizes. This is needed to support large amounts of RAM on 32-bit systems that support 36-bit physical addressing. Signed-off-by: Becky Bruce Signed-off-by: David S. Miller --- lib/lmb.c | 93 +++++++++++++++++++++++++++++++-------------------------------- 1 file changed, 46 insertions(+), 47 deletions(-) (limited to 'lib') diff --git a/lib/lmb.c b/lib/lmb.c index e34a9e586c4..e3c8dcb04b4 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -34,33 +34,34 @@ void lmb_dump_all(void) DBG("lmb_dump_all:\n"); DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); - DBG(" memory.size = 0x%lx\n", lmb.memory.size); + DBG(" memory.size = 0x%llx\n", + (unsigned long long)lmb.memory.size); for (i=0; i < lmb.memory.cnt ;i++) { - DBG(" memory.region[0x%x].base = 0x%lx\n", - i, lmb.memory.region[i].base); - DBG(" .size = 0x%lx\n", - lmb.memory.region[i].size); + DBG(" memory.region[0x%x].base = 0x%llx\n", + i, (unsigned long long)lmb.memory.region[i].base); + DBG(" .size = 0x%llx\n", + (unsigned long long)lmb.memory.region[i].size); } DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); for (i=0; i < lmb.reserved.cnt ;i++) { - DBG(" reserved.region[0x%x].base = 0x%lx\n", - i, lmb.reserved.region[i].base); - DBG(" .size = 0x%lx\n", - lmb.reserved.region[i].size); + DBG(" reserved.region[0x%x].base = 0x%llx\n", + i, (unsigned long long)lmb.reserved.region[i].base); + DBG(" .size = 0x%llx\n", + (unsigned long long)lmb.reserved.region[i].size); } #endif /* DEBUG */ } -static unsigned long __init lmb_addrs_overlap(unsigned long base1, - unsigned long size1, unsigned long base2, unsigned long size2) +static unsigned long __init lmb_addrs_overlap(u64 base1, + u64 size1, u64 base2, u64 size2) { return ((base1 < (base2+size2)) && (base2 < (base1+size1))); } -static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, - unsigned long base2, unsigned long size2) +static long __init lmb_addrs_adjacent(u64 base1, u64 size1, + u64 base2, u64 size2) { if (base2 == base1 + size1) return 1; @@ -73,10 +74,10 @@ static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, static long __init lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2) { - unsigned long base1 = rgn->region[r1].base; - unsigned long size1 = rgn->region[r1].size; - unsigned long base2 = rgn->region[r2].base; - unsigned long size2 = rgn->region[r2].size; + u64 base1 = rgn->region[r1].base; + u64 size1 = rgn->region[r1].size; + u64 base2 = rgn->region[r2].base; + u64 size2 = rgn->region[r2].size; return lmb_addrs_adjacent(base1, size1, base2, size2); } @@ -128,8 +129,7 @@ void __init lmb_analyze(void) } /* This routine called with relocation disabled. */ -static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, - unsigned long size) +static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) { unsigned long coalesced = 0; long adjacent, i; @@ -142,8 +142,8 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, /* First try and coalesce this LMB with another. */ for (i=0; i < rgn->cnt; i++) { - unsigned long rgnbase = rgn->region[i].base; - unsigned long rgnsize = rgn->region[i].size; + u64 rgnbase = rgn->region[i].base; + u64 rgnsize = rgn->region[i].size; if ((rgnbase == base) && (rgnsize == size)) /* Already have this region, so we're done */ @@ -190,7 +190,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, } /* This routine may be called with relocation disabled. */ -long __init lmb_add(unsigned long base, unsigned long size) +long __init lmb_add(u64 base, u64 size) { struct lmb_region *_rgn = &(lmb.memory); @@ -202,7 +202,7 @@ long __init lmb_add(unsigned long base, unsigned long size) } -long __init lmb_reserve(unsigned long base, unsigned long size) +long __init lmb_reserve(u64 base, u64 size) { struct lmb_region *_rgn = &(lmb.reserved); @@ -211,14 +211,14 @@ long __init lmb_reserve(unsigned long base, unsigned long size) return lmb_add_region(_rgn, base, size); } -long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, - unsigned long size) +long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, + u64 size) { unsigned long i; for (i=0; i < rgn->cnt; i++) { - unsigned long rgnbase = rgn->region[i].base; - unsigned long rgnsize = rgn->region[i].size; + u64 rgnbase = rgn->region[i].base; + u64 rgnsize = rgn->region[i].size; if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { break; } @@ -227,40 +227,38 @@ long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, return (i < rgn->cnt) ? i : -1; } -unsigned long __init lmb_alloc(unsigned long size, unsigned long align) +u64 __init lmb_alloc(u64 size, u64 align) { return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); } -unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, - unsigned long max_addr) +u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) { - unsigned long alloc; + u64 alloc; alloc = __lmb_alloc_base(size, align, max_addr); if (alloc == 0) - panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", - size, max_addr); + panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", + (unsigned long long) size, (unsigned long long) max_addr); return alloc; } -static unsigned long lmb_align_down(unsigned long addr, unsigned long size) +static u64 lmb_align_down(u64 addr, u64 size) { return addr & ~(size - 1); } -static unsigned long lmb_align_up(unsigned long addr, unsigned long size) +static u64 lmb_align_up(u64 addr, u64 size) { return (addr + (size - 1)) & ~(size - 1); } -unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, - unsigned long max_addr) +u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) { long i, j; - unsigned long base = 0; + u64 base = 0; BUG_ON(0 == size); @@ -269,8 +267,8 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, max_addr = LMB_REAL_LIMIT; for (i = lmb.memory.cnt-1; i >= 0; i--) { - unsigned long lmbbase = lmb.memory.region[i].base; - unsigned long lmbsize = lmb.memory.region[i].size; + u64 lmbbase = lmb.memory.region[i].base; + u64 lmbsize = lmb.memory.region[i].size; if (max_addr == LMB_ALLOC_ANYWHERE) base = lmb_align_down(lmbbase + lmbsize - size, align); @@ -299,12 +297,12 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, } /* You must call lmb_analyze() before this. */ -unsigned long __init lmb_phys_mem_size(void) +u64 __init lmb_phys_mem_size(void) { return lmb.memory.size; } -unsigned long __init lmb_end_of_DRAM(void) +u64 __init lmb_end_of_DRAM(void) { int idx = lmb.memory.cnt - 1; @@ -312,9 +310,10 @@ unsigned long __init lmb_end_of_DRAM(void) } /* You must call lmb_analyze() after this. */ -void __init lmb_enforce_memory_limit(unsigned long memory_limit) +void __init lmb_enforce_memory_limit(u64 memory_limit) { - unsigned long i, limit; + unsigned long i; + u64 limit; struct lmb_property *p; if (! memory_limit) @@ -352,13 +351,13 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit) } } -int __init lmb_is_reserved(unsigned long addr) +int __init lmb_is_reserved(u64 addr) { int i; for (i = 0; i < lmb.reserved.cnt; i++) { - unsigned long upper = lmb.reserved.region[i].base + - lmb.reserved.region[i].size - 1; + u64 upper = lmb.reserved.region[i].base + + lmb.reserved.region[i].size - 1; if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) return 1; } -- cgit v1.2.3 From 74b20dad1c4cc0fd13ceca62fbab808919e1a7ea Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 19 Feb 2008 21:28:18 -0800 Subject: [LMB]: Fix lmb_add_region if region should be added at the head We introduced a bug in fixing lmb_add_region to handle an initial region being non-zero. Before that fix it was impossible to insert a region at the head of the list since the first region always started at zero. Now that its possible for the first region to be non-zero we need to check to see if the new region should be added at the head and if so actually add it. Signed-off-by: Kumar Gala Signed-off-by: David S. Miller --- lib/lmb.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'lib') diff --git a/lib/lmb.c b/lib/lmb.c index e3c8dcb04b4..3c43b95fef4 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -184,6 +184,11 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) break; } } + + if (base < rgn->region[0].base) { + rgn->region[0].base = base; + rgn->region[0].size = size; + } rgn->cnt++; return 0; -- cgit v1.2.3