blob: 78c35047586dbaf6ad4766a88e73062a56fde543 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/power/swsusp.c
3 *
Pavel Machek96bc7ae2005-10-30 14:59:58 -08004 * This file provides code to write suspend image to swap and read it back.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08007 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * This file is released under the GPLv2.
10 *
11 * I'd like to thank the following people for their work:
Pavel Machek2e4d5822005-06-25 14:55:12 -070012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Pavel Machek <pavel@ucw.cz>:
14 * Modifications, defectiveness pointing, being with me at the very beginning,
15 * suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17.
16 *
Pavel Machek2e4d5822005-06-25 14:55:12 -070017 * Steve Doddi <dirk@loth.demon.co.uk>:
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * Support the possibility of hardware state restoring.
19 *
20 * Raph <grey.havens@earthling.net>:
21 * Support for preserving states of network devices and virtual console
22 * (including X and svgatextmode)
23 *
24 * Kurt Garloff <garloff@suse.de>:
25 * Straightened the critical function in order to prevent compilers from
26 * playing tricks with local variables.
27 *
28 * Andreas Mohr <a.mohr@mailto.de>
29 *
30 * Alex Badea <vampire@go.ro>:
31 * Fixed runaway init
32 *
Rafael J. Wysocki7088a5c2006-01-06 00:13:05 -080033 * Rafael J. Wysocki <rjw@sisk.pl>
Rafael J. Wysocki61159a32006-03-23 03:00:00 -080034 * Reworked the freeing of memory and the handling of swap
Rafael J. Wysocki7088a5c2006-01-06 00:13:05 -080035 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * More state savers are welcome. Especially for the scsi layer...
37 *
38 * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt
39 */
40
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mm.h>
42#include <linux/suspend.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/kernel.h>
45#include <linux/major.h>
46#include <linux/swap.h>
47#include <linux/pm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/swapops.h>
49#include <linux/bootmem.h>
50#include <linux/syscalls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/highmem.h>
Rafael J. Wysocki0d3a9ab2006-12-06 20:34:32 -080052#include <linux/time.h>
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -070053#include <linux/rbtree.h>
Magnus Damma8af7892009-03-31 15:23:37 -070054#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56#include "power.h"
57
Rafael J. Wysockica0aec02006-01-06 00:15:56 -080058/*
Rafael J. Wysocki853609b2006-02-01 03:05:07 -080059 * Preferred image size in bytes (tunable via /sys/power/image_size).
Rafael J. Wysockica0aec02006-01-06 00:15:56 -080060 * When it is set to N, swsusp will do its best to ensure the image
Rafael J. Wysocki853609b2006-02-01 03:05:07 -080061 * size will not exceed N bytes, but if that is impossible, it will
Rafael J. Wysockica0aec02006-01-06 00:15:56 -080062 * try to create the smallest image possible.
63 */
Rafael J. Wysocki853609b2006-02-01 03:05:07 -080064unsigned long image_size = 500 * 1024 * 1024;
Rafael J. Wysockica0aec02006-01-06 00:15:56 -080065
Rafael J. Wysockif577eb32006-03-23 02:59:59 -080066int in_suspend __nosavedata = 0;
67
Rafael J. Wysockif577eb32006-03-23 02:59:59 -080068/**
69 * The following functions are used for tracing the allocated
70 * swap pages, so that they can be freed in case of an error.
Rafael J. Wysockif577eb32006-03-23 02:59:59 -080071 */
72
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -070073struct swsusp_extent {
74 struct rb_node node;
75 unsigned long start;
76 unsigned long end;
77};
78
79static struct rb_root swsusp_extents = RB_ROOT;
80
81static int swsusp_extents_insert(unsigned long swap_offset)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -080082{
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -070083 struct rb_node **new = &(swsusp_extents.rb_node);
84 struct rb_node *parent = NULL;
85 struct swsusp_extent *ext;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -080086
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -070087 /* Figure out where to put the new node */
88 while (*new) {
89 ext = container_of(*new, struct swsusp_extent, node);
90 parent = *new;
91 if (swap_offset < ext->start) {
92 /* Try to merge */
93 if (swap_offset == ext->start - 1) {
94 ext->start--;
95 return 0;
96 }
97 new = &((*new)->rb_left);
98 } else if (swap_offset > ext->end) {
99 /* Try to merge */
100 if (swap_offset == ext->end + 1) {
101 ext->end++;
102 return 0;
103 }
104 new = &((*new)->rb_right);
105 } else {
106 /* It already is in the tree */
107 return -EINVAL;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800108 }
109 }
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700110 /* Add the new node and rebalance the tree. */
111 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
112 if (!ext)
113 return -ENOMEM;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800114
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700115 ext->start = swap_offset;
116 ext->end = swap_offset;
117 rb_link_node(&ext->node, parent, new);
118 rb_insert_color(&ext->node, &swsusp_extents);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800119 return 0;
120}
121
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700122/**
123 * alloc_swapdev_block - allocate a swap page and register that it has
124 * been allocated, so that it can be freed in case of an error.
125 */
126
127sector_t alloc_swapdev_block(int swap)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800128{
129 unsigned long offset;
130
131 offset = swp_offset(get_swap_page_of_type(swap));
132 if (offset) {
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700133 if (swsusp_extents_insert(offset))
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800134 swap_free(swp_entry(swap, offset));
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800135 else
136 return swapdev_block(swap, offset);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800137 }
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800138 return 0;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800139}
140
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700141/**
142 * free_all_swap_pages - free swap pages allocated for saving image data.
143 * It also frees the extents used to register which swap entres had been
144 * allocated.
145 */
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800146
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700147void free_all_swap_pages(int swap)
148{
149 struct rb_node *node;
150
151 while ((node = swsusp_extents.rb_node)) {
152 struct swsusp_extent *ext;
153 unsigned long offset;
154
155 ext = container_of(node, struct swsusp_extent, node);
156 rb_erase(node, &swsusp_extents);
157 for (offset = ext->start; offset <= ext->end; offset++)
158 swap_free(swp_entry(swap, offset));
159
160 kfree(ext);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800161 }
162}
163
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700164int swsusp_swap_in_use(void)
165{
166 return (swsusp_extents.rb_node != NULL);
167}
168
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800169/**
Rafael J. Wysocki0d3a9ab2006-12-06 20:34:32 -0800170 * swsusp_show_speed - print the time elapsed between two events represented by
171 * @start and @stop
172 *
173 * @nr_pages - number of pages processed between @start and @stop
174 * @msg - introductory message to print
175 */
176
177void swsusp_show_speed(struct timeval *start, struct timeval *stop,
178 unsigned nr_pages, char *msg)
179{
180 s64 elapsed_centisecs64;
181 int centisecs;
182 int k;
183 int kps;
184
185 elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
186 do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
187 centisecs = elapsed_centisecs64;
188 if (centisecs == 0)
189 centisecs = 1; /* avoid div-by-zero */
190 k = nr_pages * (PAGE_SIZE / 1024);
191 kps = (k * 100) / centisecs;
Rafael J. Wysocki23976722007-12-08 02:09:43 +0100192 printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
193 msg, k,
Rafael J. Wysocki0d3a9ab2006-12-06 20:34:32 -0800194 centisecs / 100, centisecs % 100,
195 kps / 1000, (kps % 1000) / 10);
196}
197
198/**
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800199 * swsusp_shrink_memory - Try to free as much memory as needed
200 *
201 * ... but do not OOM-kill anyone
202 *
203 * Notice: all userland should be stopped before it is called, or
204 * livelock is possible.
205 */
206
207#define SHRINK_BITE 10000
Rafael J. Wysockid6277db2006-06-23 02:03:18 -0700208static inline unsigned long __shrink_memory(long tmp)
209{
210 if (tmp > SHRINK_BITE)
211 tmp = SHRINK_BITE;
212 return shrink_all_memory(tmp);
213}
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800214
215int swsusp_shrink_memory(void)
216{
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800217 long tmp;
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800218 struct zone *zone;
219 unsigned long pages = 0;
220 unsigned int i = 0;
221 char *p = "-\\|/";
Rafael J. Wysocki0d3a9ab2006-12-06 20:34:32 -0800222 struct timeval start, stop;
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800223
Rafael J. Wysocki23976722007-12-08 02:09:43 +0100224 printk(KERN_INFO "PM: Shrinking memory... ");
Rafael J. Wysocki0d3a9ab2006-12-06 20:34:32 -0800225 do_gettimeofday(&start);
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800226 do {
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800227 long size, highmem_size;
228
229 highmem_size = count_highmem_pages();
Rafael J. Wysocki56f99bc2007-05-06 14:50:52 -0700230 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
Rafael J. Wysockib3a93a22006-01-06 00:15:22 -0800231 tmp = size;
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800232 size += highmem_size;
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700233 for_each_populated_zone(zone) {
234 tmp += snapshot_additional_pages(zone);
235 if (is_highmem(zone)) {
236 highmem_size -=
Christoph Lameterd23ad422007-02-10 01:43:02 -0800237 zone_page_state(zone, NR_FREE_PAGES);
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700238 } else {
239 tmp -= zone_page_state(zone, NR_FREE_PAGES);
240 tmp += zone->lowmem_reserve[ZONE_NORMAL];
Rafael J. Wysockia938c352006-06-23 02:04:46 -0700241 }
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700242 }
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800243
244 if (highmem_size < 0)
245 highmem_size = 0;
246
247 tmp += highmem_size;
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800248 if (tmp > 0) {
Rafael J. Wysockid6277db2006-06-23 02:03:18 -0700249 tmp = __shrink_memory(tmp);
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800250 if (!tmp)
251 return -ENOMEM;
252 pages += tmp;
Rafael J. Wysocki853609b2006-02-01 03:05:07 -0800253 } else if (size > image_size / PAGE_SIZE) {
Rafael J. Wysockid6277db2006-06-23 02:03:18 -0700254 tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
Rafael J. Wysockib3a93a22006-01-06 00:15:22 -0800255 pages += tmp;
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800256 }
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800257 printk("\b%c", p[i++%4]);
258 } while (tmp > 0);
Rafael J. Wysocki0d3a9ab2006-12-06 20:34:32 -0800259 do_gettimeofday(&stop);
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800260 printk("\bdone (%lu pages freed)\n", pages);
Rafael J. Wysocki0d3a9ab2006-12-06 20:34:32 -0800261 swsusp_show_speed(&start, &stop, pages, "Freed");
Rafael J. Wysocki72a97e02006-01-06 00:13:46 -0800262
263 return 0;
264}
Rafael J. Wysocki3f4b0ef2008-10-26 20:52:15 +0100265
266/*
267 * Platforms, like ACPI, may want us to save some memory used by them during
268 * hibernation and to restore the contents of this memory during the subsequent
269 * resume. The code below implements a mechanism allowing us to do that.
270 */
271
272struct nvs_page {
273 unsigned long phys_start;
274 unsigned int size;
275 void *kaddr;
276 void *data;
277 struct list_head node;
278};
279
280static LIST_HEAD(nvs_list);
281
282/**
283 * hibernate_nvs_register - register platform NVS memory region to save
284 * @start - physical address of the region
285 * @size - size of the region
286 *
287 * The NVS region need not be page-aligned (both ends) and we arrange
288 * things so that the data from page-aligned addresses in this region will
289 * be copied into separate RAM pages.
290 */
291int hibernate_nvs_register(unsigned long start, unsigned long size)
292{
293 struct nvs_page *entry, *next;
294
295 while (size > 0) {
296 unsigned int nr_bytes;
297
298 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
299 if (!entry)
300 goto Error;
301
302 list_add_tail(&entry->node, &nvs_list);
303 entry->phys_start = start;
304 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
305 entry->size = (size < nr_bytes) ? size : nr_bytes;
306
307 start += entry->size;
308 size -= entry->size;
309 }
310 return 0;
311
312 Error:
313 list_for_each_entry_safe(entry, next, &nvs_list, node) {
314 list_del(&entry->node);
315 kfree(entry);
316 }
317 return -ENOMEM;
318}
319
320/**
321 * hibernate_nvs_free - free data pages allocated for saving NVS regions
322 */
323void hibernate_nvs_free(void)
324{
325 struct nvs_page *entry;
326
327 list_for_each_entry(entry, &nvs_list, node)
328 if (entry->data) {
329 free_page((unsigned long)entry->data);
330 entry->data = NULL;
331 if (entry->kaddr) {
332 iounmap(entry->kaddr);
333 entry->kaddr = NULL;
334 }
335 }
336}
337
338/**
339 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
340 */
341int hibernate_nvs_alloc(void)
342{
343 struct nvs_page *entry;
344
345 list_for_each_entry(entry, &nvs_list, node) {
346 entry->data = (void *)__get_free_page(GFP_KERNEL);
347 if (!entry->data) {
348 hibernate_nvs_free();
349 return -ENOMEM;
350 }
351 }
352 return 0;
353}
354
355/**
356 * hibernate_nvs_save - save NVS memory regions
357 */
358void hibernate_nvs_save(void)
359{
360 struct nvs_page *entry;
361
362 printk(KERN_INFO "PM: Saving platform NVS memory\n");
363
364 list_for_each_entry(entry, &nvs_list, node)
365 if (entry->data) {
366 entry->kaddr = ioremap(entry->phys_start, entry->size);
367 memcpy(entry->data, entry->kaddr, entry->size);
368 }
369}
370
371/**
372 * hibernate_nvs_restore - restore NVS memory regions
373 *
374 * This function is going to be called with interrupts disabled, so it
375 * cannot iounmap the virtual addresses used to access the NVS region.
376 */
377void hibernate_nvs_restore(void)
378{
379 struct nvs_page *entry;
380
381 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
382
383 list_for_each_entry(entry, &nvs_list, node)
384 if (entry->data)
385 memcpy(entry->kaddr, entry->data, entry->size);
386}