aboutsummaryrefslogtreecommitdiff
path: root/kernel/power/swsusp.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2007-05-06 14:50:47 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 12:12:59 -0700
commitd1d241cc2c5feec057c370aa71637380b1b945d5 (patch)
tree69c07d4c7a8b52b9ee6efba1511f3b4f8f79e6bb /kernel/power/swsusp.c
parent726162b5dad154a90dad51c0185b891312de5757 (diff)
swsusp: use rbtree for tracking allocated swap
Make swsusp use extents instead of a bitmap to trace swap pages allocated for saving the image (the tracking is only needed in case there's an error, so that the allocated swap pages can be released). This should allow us to reduce the memory usage, practically always, and improve performance. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: Nigel Cunningham <nigel@nigel.suspend2.net> Cc: Pavel Machek <pavel@ucw.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/power/swsusp.c')
-rw-r--r--kernel/power/swsusp.c137
1 files changed, 73 insertions, 64 deletions
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 175370824f3..1109023d835 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -50,6 +50,7 @@
#include <linux/syscalls.h>
#include <linux/highmem.h>
#include <linux/time.h>
+#include <linux/rbtree.h>
#include "power.h"
@@ -74,72 +75,69 @@ static inline unsigned int count_highmem_pages(void) { return 0; }
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
- *
- * The functions operate on a linked bitmap structure defined
- * in power.h
*/
-void free_bitmap(struct bitmap_page *bitmap)
-{
- struct bitmap_page *bp;
+struct swsusp_extent {
+ struct rb_node node;
+ unsigned long start;
+ unsigned long end;
+};
- while (bitmap) {
- bp = bitmap->next;
- free_page((unsigned long)bitmap);
- bitmap = bp;
- }
-}
+static struct rb_root swsusp_extents = RB_ROOT;
-struct bitmap_page *alloc_bitmap(unsigned int nr_bits)
+static int swsusp_extents_insert(unsigned long swap_offset)
{
- struct bitmap_page *bitmap, *bp;
- unsigned int n;
-
- if (!nr_bits)
- return NULL;
-
- bitmap = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL);
- bp = bitmap;
- for (n = BITMAP_PAGE_BITS; n < nr_bits; n += BITMAP_PAGE_BITS) {
- bp->next = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL);
- bp = bp->next;
- if (!bp) {
- free_bitmap(bitmap);
- return NULL;
+ struct rb_node **new = &(swsusp_extents.rb_node);
+ struct rb_node *parent = NULL;
+ struct swsusp_extent *ext;
+
+ /* Figure out where to put the new node */
+ while (*new) {
+ ext = container_of(*new, struct swsusp_extent, node);
+ parent = *new;
+ if (swap_offset < ext->start) {
+ /* Try to merge */
+ if (swap_offset == ext->start - 1) {
+ ext->start--;
+ return 0;
+ }
+ new = &((*new)->rb_left);
+ } else if (swap_offset > ext->end) {
+ /* Try to merge */
+ if (swap_offset == ext->end + 1) {
+ ext->end++;
+ return 0;
+ }
+ new = &((*new)->rb_right);
+ } else {
+ /* It already is in the tree */
+ return -EINVAL;
}
}
- return bitmap;
-}
-
-static int bitmap_set(struct bitmap_page *bitmap, unsigned long bit)
-{
- unsigned int n;
-
- n = BITMAP_PAGE_BITS;
- while (bitmap && n <= bit) {
- n += BITMAP_PAGE_BITS;
- bitmap = bitmap->next;
- }
- if (!bitmap)
- return -EINVAL;
- n -= BITMAP_PAGE_BITS;
- bit -= n;
- n = 0;
- while (bit >= BITS_PER_CHUNK) {
- bit -= BITS_PER_CHUNK;
- n++;
- }
- bitmap->chunks[n] |= (1UL << bit);
+ /* Add the new node and rebalance the tree. */
+ ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
+ if (!ext)
+ return -ENOMEM;
+
+ ext->start = swap_offset;
+ ext->end = swap_offset;
+ rb_link_node(&ext->node, parent, new);
+ rb_insert_color(&ext->node, &swsusp_extents);
return 0;
}
-sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap)
+/**
+ * alloc_swapdev_block - allocate a swap page and register that it has
+ * been allocated, so that it can be freed in case of an error.
+ */
+
+sector_t alloc_swapdev_block(int swap)
{
unsigned long offset;
offset = swp_offset(get_swap_page_of_type(swap));
if (offset) {
- if (bitmap_set(bitmap, offset))
+ if (swsusp_extents_insert(offset))
swap_free(swp_entry(swap, offset));
else
return swapdev_block(swap, offset);
@@ -147,23 +145,34 @@ sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap)
return 0;
}
-void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
+/**
+ * free_all_swap_pages - free swap pages allocated for saving image data.
+ * It also frees the extents used to register which swap entres had been
+ * allocated.
+ */
+
+void free_all_swap_pages(int swap)
{
- unsigned int bit, n;
- unsigned long test;
-
- bit = 0;
- while (bitmap) {
- for (n = 0; n < BITMAP_PAGE_CHUNKS; n++)
- for (test = 1UL; test; test <<= 1) {
- if (bitmap->chunks[n] & test)
- swap_free(swp_entry(swap, bit));
- bit++;
- }
- bitmap = bitmap->next;
+ struct rb_node *node;
+
+ while ((node = swsusp_extents.rb_node)) {
+ struct swsusp_extent *ext;
+ unsigned long offset;
+
+ ext = container_of(node, struct swsusp_extent, node);
+ rb_erase(node, &swsusp_extents);
+ for (offset = ext->start; offset <= ext->end; offset++)
+ swap_free(swp_entry(swap, offset));
+
+ kfree(ext);
}
}
+int swsusp_swap_in_use(void)
+{
+ return (swsusp_extents.rb_node != NULL);
+}
+
/**
* swsusp_show_speed - print the time elapsed between two events represented by
* @start and @stop