aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPavel Machek <pavel@ucw.cz>2005-01-03 05:00:51 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-01-03 05:00:51 -0800
commiteaefadeb9eda1611f34b186fcea3465387032ebc (patch)
treee118590101dd10c1d443688515f0f728777ba45f /mm
parented4a04b50ed5b7958d79b10cae085933a61b665c (diff)
downloadhistory-eaefadeb9eda1611f34b186fcea3465387032ebc.tar.gz
[PATCH] swsusp: Kill O(n^2) algorithm in swsusp
Some machines are spending minutes of CPU time during suspend in stupid O(n^2) algorithm. This patch replaces it with O(n) algorithm, making swsusp usable to some people. Signed-off-by: Pavel Machek <pavel@suse.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ab96578eb29de2..5c0b0f827c58bd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -442,26 +442,30 @@ static void __drain_pages(unsigned int cpu)
#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM
-int is_head_of_free_region(struct page *page)
+
+void mark_free_pages(struct zone *zone)
{
- struct zone *zone = page_zone(page);
- unsigned long flags;
+ unsigned long zone_pfn, flags;
int order;
struct list_head *curr;
- /*
- * Should not matter as we need quiescent system for
- * suspend anyway, but...
- */
+ if (!zone->spanned_pages)
+ return;
+
spin_lock_irqsave(&zone->lock, flags);
+ for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
+ ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));
+
for (order = MAX_ORDER - 1; order >= 0; --order)
- list_for_each(curr, &zone->free_area[order].free_list)
- if (page == list_entry(curr, struct page, lru)) {
- spin_unlock_irqrestore(&zone->lock, flags);
- return 1 << order;
- }
+ list_for_each(curr, &zone->free_area[order].free_list) {
+ unsigned long start_pfn, i;
+
+ start_pfn = page_to_pfn(list_entry(curr, struct page, lru));
+
+ for (i=0; i < (1<<order); i++)
+ SetPageNosaveFree(pfn_to_page(start_pfn+i));
+ }
spin_unlock_irqrestore(&zone->lock, flags);
- return 0;
}
/*