Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/power/swsusp.c |
| 3 | * |
Pavel Machek | 96bc7ae | 2005-10-30 14:59:58 -0800 | [diff] [blame] | 4 | * This file provides code to write suspend image to swap and read it back. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu> |
Rafael J. Wysocki | 25761b6 | 2005-10-30 14:59:56 -0800 | [diff] [blame] | 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * |
| 9 | * This file is released under the GPLv2. |
| 10 | * |
| 11 | * I'd like to thank the following people for their work: |
Pavel Machek | 2e4d582 | 2005-06-25 14:55:12 -0700 | [diff] [blame] | 12 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * Pavel Machek <pavel@ucw.cz>: |
| 14 | * Modifications, defectiveness pointing, being with me at the very beginning, |
| 15 | * suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17. |
| 16 | * |
Pavel Machek | 2e4d582 | 2005-06-25 14:55:12 -0700 | [diff] [blame] | 17 | * Steve Doddi <dirk@loth.demon.co.uk>: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | * Support the possibility of hardware state restoring. |
| 19 | * |
| 20 | * Raph <grey.havens@earthling.net>: |
| 21 | * Support for preserving states of network devices and virtual console |
| 22 | * (including X and svgatextmode) |
| 23 | * |
| 24 | * Kurt Garloff <garloff@suse.de>: |
| 25 | * Straightened the critical function in order to prevent compilers from |
| 26 | * playing tricks with local variables. |
| 27 | * |
| 28 | * Andreas Mohr <a.mohr@mailto.de> |
| 29 | * |
| 30 | * Alex Badea <vampire@go.ro>: |
| 31 | * Fixed runaway init |
| 32 | * |
Rafael J. Wysocki | 7088a5c | 2006-01-06 00:13:05 -0800 | [diff] [blame] | 33 | * Rafael J. Wysocki <rjw@sisk.pl> |
Rafael J. Wysocki | 61159a3 | 2006-03-23 03:00:00 -0800 | [diff] [blame] | 34 | * Reworked the freeing of memory and the handling of swap |
Rafael J. Wysocki | 7088a5c | 2006-01-06 00:13:05 -0800 | [diff] [blame] | 35 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | * More state savers are welcome. Especially for the scsi layer... |
| 37 | * |
| 38 | * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt |
| 39 | */ |
| 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/mm.h> |
| 42 | #include <linux/suspend.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/spinlock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <linux/kernel.h> |
| 45 | #include <linux/major.h> |
| 46 | #include <linux/swap.h> |
| 47 | #include <linux/pm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/swapops.h> |
| 49 | #include <linux/bootmem.h> |
| 50 | #include <linux/syscalls.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/highmem.h> |
Rafael J. Wysocki | 0d3a9ab | 2006-12-06 20:34:32 -0800 | [diff] [blame] | 52 | #include <linux/time.h> |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 53 | #include <linux/rbtree.h> |
Magnus Damm | a8af789 | 2009-03-31 15:23:37 -0700 | [diff] [blame] | 54 | #include <linux/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
| 56 | #include "power.h" |
| 57 | |
Rafael J. Wysocki | ca0aec0 | 2006-01-06 00:15:56 -0800 | [diff] [blame] | 58 | /* |
Rafael J. Wysocki | 853609b | 2006-02-01 03:05:07 -0800 | [diff] [blame] | 59 | * Preferred image size in bytes (tunable via /sys/power/image_size). |
Rafael J. Wysocki | ca0aec0 | 2006-01-06 00:15:56 -0800 | [diff] [blame] | 60 | * When it is set to N, swsusp will do its best to ensure the image |
Rafael J. Wysocki | 853609b | 2006-02-01 03:05:07 -0800 | [diff] [blame] | 61 | * size will not exceed N bytes, but if that is impossible, it will |
Rafael J. Wysocki | ca0aec0 | 2006-01-06 00:15:56 -0800 | [diff] [blame] | 62 | * try to create the smallest image possible. |
| 63 | */ |
Rafael J. Wysocki | 853609b | 2006-02-01 03:05:07 -0800 | [diff] [blame] | 64 | unsigned long image_size = 500 * 1024 * 1024; |
Rafael J. Wysocki | ca0aec0 | 2006-01-06 00:15:56 -0800 | [diff] [blame] | 65 | |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 66 | int in_suspend __nosavedata = 0; |
| 67 | |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 68 | /** |
| 69 | * The following functions are used for tracing the allocated |
| 70 | * swap pages, so that they can be freed in case of an error. |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 71 | */ |
| 72 | |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 73 | struct swsusp_extent { |
| 74 | struct rb_node node; |
| 75 | unsigned long start; |
| 76 | unsigned long end; |
| 77 | }; |
| 78 | |
| 79 | static struct rb_root swsusp_extents = RB_ROOT; |
| 80 | |
| 81 | static int swsusp_extents_insert(unsigned long swap_offset) |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 82 | { |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 83 | struct rb_node **new = &(swsusp_extents.rb_node); |
| 84 | struct rb_node *parent = NULL; |
| 85 | struct swsusp_extent *ext; |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 86 | |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 87 | /* Figure out where to put the new node */ |
| 88 | while (*new) { |
| 89 | ext = container_of(*new, struct swsusp_extent, node); |
| 90 | parent = *new; |
| 91 | if (swap_offset < ext->start) { |
| 92 | /* Try to merge */ |
| 93 | if (swap_offset == ext->start - 1) { |
| 94 | ext->start--; |
| 95 | return 0; |
| 96 | } |
| 97 | new = &((*new)->rb_left); |
| 98 | } else if (swap_offset > ext->end) { |
| 99 | /* Try to merge */ |
| 100 | if (swap_offset == ext->end + 1) { |
| 101 | ext->end++; |
| 102 | return 0; |
| 103 | } |
| 104 | new = &((*new)->rb_right); |
| 105 | } else { |
| 106 | /* It already is in the tree */ |
| 107 | return -EINVAL; |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 108 | } |
| 109 | } |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 110 | /* Add the new node and rebalance the tree. */ |
| 111 | ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); |
| 112 | if (!ext) |
| 113 | return -ENOMEM; |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 114 | |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 115 | ext->start = swap_offset; |
| 116 | ext->end = swap_offset; |
| 117 | rb_link_node(&ext->node, parent, new); |
| 118 | rb_insert_color(&ext->node, &swsusp_extents); |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 119 | return 0; |
| 120 | } |
| 121 | |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 122 | /** |
| 123 | * alloc_swapdev_block - allocate a swap page and register that it has |
| 124 | * been allocated, so that it can be freed in case of an error. |
| 125 | */ |
| 126 | |
| 127 | sector_t alloc_swapdev_block(int swap) |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 128 | { |
| 129 | unsigned long offset; |
| 130 | |
| 131 | offset = swp_offset(get_swap_page_of_type(swap)); |
| 132 | if (offset) { |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 133 | if (swsusp_extents_insert(offset)) |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 134 | swap_free(swp_entry(swap, offset)); |
Rafael J. Wysocki | 3aef83e | 2006-12-06 20:34:10 -0800 | [diff] [blame] | 135 | else |
| 136 | return swapdev_block(swap, offset); |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 137 | } |
Rafael J. Wysocki | 3aef83e | 2006-12-06 20:34:10 -0800 | [diff] [blame] | 138 | return 0; |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 139 | } |
| 140 | |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 141 | /** |
| 142 | * free_all_swap_pages - free swap pages allocated for saving image data. |
| 143 | * It also frees the extents used to register which swap entres had been |
| 144 | * allocated. |
| 145 | */ |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 146 | |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 147 | void free_all_swap_pages(int swap) |
| 148 | { |
| 149 | struct rb_node *node; |
| 150 | |
| 151 | while ((node = swsusp_extents.rb_node)) { |
| 152 | struct swsusp_extent *ext; |
| 153 | unsigned long offset; |
| 154 | |
| 155 | ext = container_of(node, struct swsusp_extent, node); |
| 156 | rb_erase(node, &swsusp_extents); |
| 157 | for (offset = ext->start; offset <= ext->end; offset++) |
| 158 | swap_free(swp_entry(swap, offset)); |
| 159 | |
| 160 | kfree(ext); |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 161 | } |
| 162 | } |
| 163 | |
Rafael J. Wysocki | d1d241c | 2007-05-06 14:50:47 -0700 | [diff] [blame] | 164 | int swsusp_swap_in_use(void) |
| 165 | { |
| 166 | return (swsusp_extents.rb_node != NULL); |
| 167 | } |
| 168 | |
Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 169 | /** |
Rafael J. Wysocki | 0d3a9ab | 2006-12-06 20:34:32 -0800 | [diff] [blame] | 170 | * swsusp_show_speed - print the time elapsed between two events represented by |
| 171 | * @start and @stop |
| 172 | * |
| 173 | * @nr_pages - number of pages processed between @start and @stop |
| 174 | * @msg - introductory message to print |
| 175 | */ |
| 176 | |
| 177 | void swsusp_show_speed(struct timeval *start, struct timeval *stop, |
| 178 | unsigned nr_pages, char *msg) |
| 179 | { |
| 180 | s64 elapsed_centisecs64; |
| 181 | int centisecs; |
| 182 | int k; |
| 183 | int kps; |
| 184 | |
| 185 | elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start); |
| 186 | do_div(elapsed_centisecs64, NSEC_PER_SEC / 100); |
| 187 | centisecs = elapsed_centisecs64; |
| 188 | if (centisecs == 0) |
| 189 | centisecs = 1; /* avoid div-by-zero */ |
| 190 | k = nr_pages * (PAGE_SIZE / 1024); |
| 191 | kps = (k * 100) / centisecs; |
Rafael J. Wysocki | 2397672 | 2007-12-08 02:09:43 +0100 | [diff] [blame] | 192 | printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", |
| 193 | msg, k, |
Rafael J. Wysocki | 0d3a9ab | 2006-12-06 20:34:32 -0800 | [diff] [blame] | 194 | centisecs / 100, centisecs % 100, |
| 195 | kps / 1000, (kps % 1000) / 10); |
| 196 | } |
| 197 | |
| 198 | /** |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 199 | * swsusp_shrink_memory - Try to free as much memory as needed |
| 200 | * |
| 201 | * ... but do not OOM-kill anyone |
| 202 | * |
| 203 | * Notice: all userland should be stopped before it is called, or |
| 204 | * livelock is possible. |
| 205 | */ |
| 206 | |
| 207 | #define SHRINK_BITE 10000 |
Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 208 | static inline unsigned long __shrink_memory(long tmp) |
| 209 | { |
| 210 | if (tmp > SHRINK_BITE) |
| 211 | tmp = SHRINK_BITE; |
| 212 | return shrink_all_memory(tmp); |
| 213 | } |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 214 | |
| 215 | int swsusp_shrink_memory(void) |
| 216 | { |
Rafael J. Wysocki | 8357376 | 2006-12-06 20:34:18 -0800 | [diff] [blame] | 217 | long tmp; |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 218 | struct zone *zone; |
| 219 | unsigned long pages = 0; |
| 220 | unsigned int i = 0; |
| 221 | char *p = "-\\|/"; |
Rafael J. Wysocki | 0d3a9ab | 2006-12-06 20:34:32 -0800 | [diff] [blame] | 222 | struct timeval start, stop; |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 223 | |
Rafael J. Wysocki | 2397672 | 2007-12-08 02:09:43 +0100 | [diff] [blame] | 224 | printk(KERN_INFO "PM: Shrinking memory... "); |
Rafael J. Wysocki | 0d3a9ab | 2006-12-06 20:34:32 -0800 | [diff] [blame] | 225 | do_gettimeofday(&start); |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 226 | do { |
Rafael J. Wysocki | 8357376 | 2006-12-06 20:34:18 -0800 | [diff] [blame] | 227 | long size, highmem_size; |
| 228 | |
| 229 | highmem_size = count_highmem_pages(); |
Rafael J. Wysocki | 56f99bc | 2007-05-06 14:50:52 -0700 | [diff] [blame] | 230 | size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; |
Rafael J. Wysocki | b3a93a2 | 2006-01-06 00:15:22 -0800 | [diff] [blame] | 231 | tmp = size; |
Rafael J. Wysocki | 8357376 | 2006-12-06 20:34:18 -0800 | [diff] [blame] | 232 | size += highmem_size; |
KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 233 | for_each_populated_zone(zone) { |
| 234 | tmp += snapshot_additional_pages(zone); |
| 235 | if (is_highmem(zone)) { |
| 236 | highmem_size -= |
Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 237 | zone_page_state(zone, NR_FREE_PAGES); |
KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 238 | } else { |
| 239 | tmp -= zone_page_state(zone, NR_FREE_PAGES); |
| 240 | tmp += zone->lowmem_reserve[ZONE_NORMAL]; |
Rafael J. Wysocki | a938c35 | 2006-06-23 02:04:46 -0700 | [diff] [blame] | 241 | } |
KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 242 | } |
Rafael J. Wysocki | 8357376 | 2006-12-06 20:34:18 -0800 | [diff] [blame] | 243 | |
| 244 | if (highmem_size < 0) |
| 245 | highmem_size = 0; |
| 246 | |
| 247 | tmp += highmem_size; |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 248 | if (tmp > 0) { |
Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 249 | tmp = __shrink_memory(tmp); |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 250 | if (!tmp) |
| 251 | return -ENOMEM; |
| 252 | pages += tmp; |
Rafael J. Wysocki | 853609b | 2006-02-01 03:05:07 -0800 | [diff] [blame] | 253 | } else if (size > image_size / PAGE_SIZE) { |
Rafael J. Wysocki | d6277db | 2006-06-23 02:03:18 -0700 | [diff] [blame] | 254 | tmp = __shrink_memory(size - (image_size / PAGE_SIZE)); |
Rafael J. Wysocki | b3a93a2 | 2006-01-06 00:15:22 -0800 | [diff] [blame] | 255 | pages += tmp; |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 256 | } |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 257 | printk("\b%c", p[i++%4]); |
| 258 | } while (tmp > 0); |
Rafael J. Wysocki | 0d3a9ab | 2006-12-06 20:34:32 -0800 | [diff] [blame] | 259 | do_gettimeofday(&stop); |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 260 | printk("\bdone (%lu pages freed)\n", pages); |
Rafael J. Wysocki | 0d3a9ab | 2006-12-06 20:34:32 -0800 | [diff] [blame] | 261 | swsusp_show_speed(&start, &stop, pages, "Freed"); |
Rafael J. Wysocki | 72a97e0 | 2006-01-06 00:13:46 -0800 | [diff] [blame] | 262 | |
| 263 | return 0; |
| 264 | } |
Rafael J. Wysocki | 3f4b0ef | 2008-10-26 20:52:15 +0100 | [diff] [blame] | 265 | |
| 266 | /* |
| 267 | * Platforms, like ACPI, may want us to save some memory used by them during |
| 268 | * hibernation and to restore the contents of this memory during the subsequent |
| 269 | * resume. The code below implements a mechanism allowing us to do that. |
| 270 | */ |
| 271 | |
| 272 | struct nvs_page { |
| 273 | unsigned long phys_start; |
| 274 | unsigned int size; |
| 275 | void *kaddr; |
| 276 | void *data; |
| 277 | struct list_head node; |
| 278 | }; |
| 279 | |
| 280 | static LIST_HEAD(nvs_list); |
| 281 | |
| 282 | /** |
| 283 | * hibernate_nvs_register - register platform NVS memory region to save |
| 284 | * @start - physical address of the region |
| 285 | * @size - size of the region |
| 286 | * |
| 287 | * The NVS region need not be page-aligned (both ends) and we arrange |
| 288 | * things so that the data from page-aligned addresses in this region will |
| 289 | * be copied into separate RAM pages. |
| 290 | */ |
| 291 | int hibernate_nvs_register(unsigned long start, unsigned long size) |
| 292 | { |
| 293 | struct nvs_page *entry, *next; |
| 294 | |
| 295 | while (size > 0) { |
| 296 | unsigned int nr_bytes; |
| 297 | |
| 298 | entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); |
| 299 | if (!entry) |
| 300 | goto Error; |
| 301 | |
| 302 | list_add_tail(&entry->node, &nvs_list); |
| 303 | entry->phys_start = start; |
| 304 | nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); |
| 305 | entry->size = (size < nr_bytes) ? size : nr_bytes; |
| 306 | |
| 307 | start += entry->size; |
| 308 | size -= entry->size; |
| 309 | } |
| 310 | return 0; |
| 311 | |
| 312 | Error: |
| 313 | list_for_each_entry_safe(entry, next, &nvs_list, node) { |
| 314 | list_del(&entry->node); |
| 315 | kfree(entry); |
| 316 | } |
| 317 | return -ENOMEM; |
| 318 | } |
| 319 | |
| 320 | /** |
| 321 | * hibernate_nvs_free - free data pages allocated for saving NVS regions |
| 322 | */ |
| 323 | void hibernate_nvs_free(void) |
| 324 | { |
| 325 | struct nvs_page *entry; |
| 326 | |
| 327 | list_for_each_entry(entry, &nvs_list, node) |
| 328 | if (entry->data) { |
| 329 | free_page((unsigned long)entry->data); |
| 330 | entry->data = NULL; |
| 331 | if (entry->kaddr) { |
| 332 | iounmap(entry->kaddr); |
| 333 | entry->kaddr = NULL; |
| 334 | } |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | /** |
| 339 | * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions |
| 340 | */ |
| 341 | int hibernate_nvs_alloc(void) |
| 342 | { |
| 343 | struct nvs_page *entry; |
| 344 | |
| 345 | list_for_each_entry(entry, &nvs_list, node) { |
| 346 | entry->data = (void *)__get_free_page(GFP_KERNEL); |
| 347 | if (!entry->data) { |
| 348 | hibernate_nvs_free(); |
| 349 | return -ENOMEM; |
| 350 | } |
| 351 | } |
| 352 | return 0; |
| 353 | } |
| 354 | |
| 355 | /** |
| 356 | * hibernate_nvs_save - save NVS memory regions |
| 357 | */ |
| 358 | void hibernate_nvs_save(void) |
| 359 | { |
| 360 | struct nvs_page *entry; |
| 361 | |
| 362 | printk(KERN_INFO "PM: Saving platform NVS memory\n"); |
| 363 | |
| 364 | list_for_each_entry(entry, &nvs_list, node) |
| 365 | if (entry->data) { |
| 366 | entry->kaddr = ioremap(entry->phys_start, entry->size); |
| 367 | memcpy(entry->data, entry->kaddr, entry->size); |
| 368 | } |
| 369 | } |
| 370 | |
| 371 | /** |
| 372 | * hibernate_nvs_restore - restore NVS memory regions |
| 373 | * |
| 374 | * This function is going to be called with interrupts disabled, so it |
| 375 | * cannot iounmap the virtual addresses used to access the NVS region. |
| 376 | */ |
| 377 | void hibernate_nvs_restore(void) |
| 378 | { |
| 379 | struct nvs_page *entry; |
| 380 | |
| 381 | printk(KERN_INFO "PM: Restoring platform NVS memory\n"); |
| 382 | |
| 383 | list_for_each_entry(entry, &nvs_list, node) |
| 384 | if (entry->data) |
| 385 | memcpy(entry->kaddr, entry->data, entry->size); |
| 386 | } |