blob: 79699f46a4434d3d1d8269f185b42a07aeef72fd [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020011#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010013#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/setup.h>
16#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020017#include <asm/sections.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010018
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010019static DEFINE_MUTEX(vmem_mutex);
20
21struct memory_segment {
22 struct list_head list;
23 unsigned long start;
24 unsigned long size;
25};
26
27static LIST_HEAD(mem_segs);
28
Heiko Carstens67060d92008-05-30 10:03:27 +020029static void __ref *vmem_alloc_pages(unsigned int order)
30{
31 if (slab_is_available())
32 return (void *)__get_free_pages(GFP_KERNEL, order);
33 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
34}
35
36static inline pud_t *vmem_pud_alloc(void)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010037{
38 pud_t *pud = NULL;
39
40#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020041 pud = vmem_alloc_pages(2);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010042 if (!pud)
43 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020044 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010045#endif
46 return pud;
47}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020048
Heiko Carstens67060d92008-05-30 10:03:27 +020049static inline pmd_t *vmem_pmd_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010050{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020051 pmd_t *pmd = NULL;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010052
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020053#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020054 pmd = vmem_alloc_pages(2);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010055 if (!pmd)
56 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020057 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020058#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010059 return pmd;
60}
61
Martin Schwidefskye5992f22011-07-24 10:48:20 +020062static pte_t __ref *vmem_pte_alloc(unsigned long address)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010063{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010064 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010065
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010066 if (slab_is_available())
Martin Schwidefskye5992f22011-07-24 10:48:20 +020067 pte = (pte_t *) page_table_alloc(&init_mm, address);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010068 else
69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010070 if (!pte)
71 return NULL;
Christian Borntraeger6af7eea2010-04-09 13:43:01 +020072 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
73 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010074 return pte;
75}
76
77/*
78 * Add a physical memory range to the 1:1 mapping.
79 */
Heiko Carstens17f34582008-04-30 13:38:47 +020080static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010081{
Heiko Carstens378b1e72012-10-01 12:58:34 +020082 unsigned long end = start + size;
83 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010084 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020085 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010086 pmd_t *pm_dir;
87 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010088 int ret = -ENOMEM;
89
Heiko Carstens378b1e72012-10-01 12:58:34 +020090 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010091 pg_dir = pgd_offset_k(address);
92 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020093 pu_dir = vmem_pud_alloc();
94 if (!pu_dir)
95 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020096 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020097 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020098 pu_dir = pud_offset(pg_dir, address);
Heiko Carstens18da23692012-10-08 09:18:26 +020099#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
100 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100102 pud_val(*pu_dir) = __pa(address) |
103 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
104 (ro ? _REGION_ENTRY_RO : 0);
Heiko Carstens18da23692012-10-08 09:18:26 +0200105 address += PUD_SIZE;
106 continue;
107 }
108#endif
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200109 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100110 pm_dir = vmem_pmd_alloc();
111 if (!pm_dir)
112 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200113 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100114 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200115 pm_dir = pmd_offset(pu_dir, address);
Gerald Schaefer648609e2012-08-21 12:36:34 +0200116#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200117 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100119 pmd_val(*pm_dir) = __pa(address) |
120 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
121 (ro ? _SEGMENT_ENTRY_RO : 0);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200122 address += PMD_SIZE;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200123 continue;
124 }
125#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100126 if (pmd_none(*pm_dir)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200127 pt_dir = vmem_pte_alloc(address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100128 if (!pt_dir)
129 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200130 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100131 }
132
133 pt_dir = pte_offset_kernel(pm_dir, address);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100134 pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200135 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100136 }
137 ret = 0;
138out:
Heiko Carstens378b1e72012-10-01 12:58:34 +0200139 flush_tlb_kernel_range(start, end);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100140 return ret;
141}
142
143/*
144 * Remove a physical memory range from the 1:1 mapping.
145 * Currently only invalidates page table entries.
146 */
147static void vmem_remove_range(unsigned long start, unsigned long size)
148{
Heiko Carstens378b1e72012-10-01 12:58:34 +0200149 unsigned long end = start + size;
150 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100151 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200152 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100153 pmd_t *pm_dir;
154 pte_t *pt_dir;
155 pte_t pte;
156
157 pte_val(pte) = _PAGE_TYPE_EMPTY;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200158 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100159 pg_dir = pgd_offset_k(address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200160 if (pgd_none(*pg_dir)) {
161 address += PGDIR_SIZE;
162 continue;
163 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200164 pu_dir = pud_offset(pg_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200165 if (pud_none(*pu_dir)) {
166 address += PUD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100167 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200168 }
Heiko Carstens18da23692012-10-08 09:18:26 +0200169 if (pud_large(*pu_dir)) {
170 pud_clear(pu_dir);
171 address += PUD_SIZE;
172 continue;
173 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200174 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200175 if (pmd_none(*pm_dir)) {
176 address += PMD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100177 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200178 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200179 if (pmd_large(*pm_dir)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200180 pmd_clear(pm_dir);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200181 address += PMD_SIZE;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200182 continue;
183 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100184 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100185 *pt_dir = pte;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200186 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100187 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200188 flush_tlb_kernel_range(start, end);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100189}
190
191/*
192 * Add a backed mem_map array to the virtual mem_map array.
193 */
Heiko Carstens17f34582008-04-30 13:38:47 +0200194int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100195{
196 unsigned long address, start_addr, end_addr;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100197 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200198 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100199 pmd_t *pm_dir;
200 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100201 int ret = -ENOMEM;
202
Heiko Carstens17f34582008-04-30 13:38:47 +0200203 start_addr = (unsigned long) start;
204 end_addr = (unsigned long) (start + nr);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100205
Heiko Carstensf7817962012-10-17 12:18:05 +0200206 for (address = start_addr; address < end_addr;) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100207 pg_dir = pgd_offset_k(address);
208 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200209 pu_dir = vmem_pud_alloc();
210 if (!pu_dir)
211 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200212 pgd_populate(&init_mm, pg_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200213 }
214
215 pu_dir = pud_offset(pg_dir, address);
216 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100217 pm_dir = vmem_pmd_alloc();
218 if (!pm_dir)
219 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200220 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100221 }
222
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200223 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100224 if (pmd_none(*pm_dir)) {
Heiko Carstensf7817962012-10-17 12:18:05 +0200225#ifdef CONFIG_64BIT
226 /* Use 1MB frames for vmemmap if available. We always
227 * use large frames even if they are only partially
228 * used.
229 * Otherwise we would have also page tables since
230 * vmemmap_populate gets called for each section
231 * separately. */
232 if (MACHINE_HAS_EDAT1) {
233 void *new_page;
234
235 new_page = vmemmap_alloc_block(PMD_SIZE, node);
236 if (!new_page)
237 goto out;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100238 pmd_val(*pm_dir) = __pa(new_page) |
239 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
Heiko Carstensf7817962012-10-17 12:18:05 +0200240 address = (address + PMD_SIZE) & PMD_MASK;
241 continue;
242 }
243#endif
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200244 pt_dir = vmem_pte_alloc(address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100245 if (!pt_dir)
246 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200247 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf7817962012-10-17 12:18:05 +0200248 } else if (pmd_large(*pm_dir)) {
249 address = (address + PMD_SIZE) & PMD_MASK;
250 continue;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100251 }
252
253 pt_dir = pte_offset_kernel(pm_dir, address);
254 if (pte_none(*pt_dir)) {
255 unsigned long new_page;
256
Heiko Carstens67060d92008-05-30 10:03:27 +0200257 new_page =__pa(vmem_alloc_pages(0));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100258 if (!new_page)
259 goto out;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100260 pte_val(*pt_dir) = __pa(new_page);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100261 }
Heiko Carstensf7817962012-10-17 12:18:05 +0200262 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100263 }
Heiko Carstens67060d92008-05-30 10:03:27 +0200264 memset(start, 0, nr * sizeof(struct page));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100265 ret = 0;
266out:
267 flush_tlb_kernel_range(start_addr, end_addr);
268 return ret;
269}
270
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100271/*
272 * Add memory segment to the segment list if it doesn't overlap with
273 * an already present segment.
274 */
275static int insert_memory_segment(struct memory_segment *seg)
276{
277 struct memory_segment *tmp;
278
Heiko Carstensee0ddad2008-06-10 10:03:20 +0200279 if (seg->start + seg->size > VMEM_MAX_PHYS ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100280 seg->start + seg->size < seg->start)
281 return -ERANGE;
282
283 list_for_each_entry(tmp, &mem_segs, list) {
284 if (seg->start >= tmp->start + tmp->size)
285 continue;
286 if (seg->start + seg->size <= tmp->start)
287 continue;
288 return -ENOSPC;
289 }
290 list_add(&seg->list, &mem_segs);
291 return 0;
292}
293
294/*
295 * Remove memory segment from the segment list.
296 */
297static void remove_memory_segment(struct memory_segment *seg)
298{
299 list_del(&seg->list);
300}
301
302static void __remove_shared_memory(struct memory_segment *seg)
303{
304 remove_memory_segment(seg);
305 vmem_remove_range(seg->start, seg->size);
306}
307
Heiko Carstens17f34582008-04-30 13:38:47 +0200308int vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100309{
310 struct memory_segment *seg;
311 int ret;
312
313 mutex_lock(&vmem_mutex);
314
315 ret = -ENOENT;
316 list_for_each_entry(seg, &mem_segs, list) {
317 if (seg->start == start && seg->size == size)
318 break;
319 }
320
321 if (seg->start != start || seg->size != size)
322 goto out;
323
324 ret = 0;
325 __remove_shared_memory(seg);
326 kfree(seg);
327out:
328 mutex_unlock(&vmem_mutex);
329 return ret;
330}
331
Heiko Carstens17f34582008-04-30 13:38:47 +0200332int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100333{
334 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100335 int ret;
336
337 mutex_lock(&vmem_mutex);
338 ret = -ENOMEM;
339 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
340 if (!seg)
341 goto out;
342 seg->start = start;
343 seg->size = size;
344
345 ret = insert_memory_segment(seg);
346 if (ret)
347 goto out_free;
348
Gerald Schaefer53492b12008-04-30 13:38:46 +0200349 ret = vmem_add_mem(start, size, 0);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100350 if (ret)
351 goto out_remove;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100352 goto out;
353
354out_remove:
355 __remove_shared_memory(seg);
356out_free:
357 kfree(seg);
358out:
359 mutex_unlock(&vmem_mutex);
360 return ret;
361}
362
363/*
364 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100365 * we reserve enough space in the vmalloc area for vmemmap to hotplug
366 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100367 */
368void __init vmem_map_init(void)
369{
Gerald Schaefer53492b12008-04-30 13:38:46 +0200370 unsigned long ro_start, ro_end;
371 unsigned long start, end;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100372 int i;
373
Heiko Carstens8fe234d2012-10-04 17:02:02 +0200374 ro_start = PFN_ALIGN((unsigned long)&_stext);
375 ro_end = (unsigned long)&_eshared & PAGE_MASK;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200376 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
Michael Holzheu60a0c682011-10-30 15:16:40 +0100377 if (memory_chunk[i].type == CHUNK_CRASHK ||
378 memory_chunk[i].type == CHUNK_OLDMEM)
379 continue;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200380 start = memory_chunk[i].addr;
381 end = memory_chunk[i].addr + memory_chunk[i].size;
382 if (start >= ro_end || end <= ro_start)
383 vmem_add_mem(start, end - start, 0);
384 else if (start >= ro_start && end <= ro_end)
385 vmem_add_mem(start, end - start, 1);
386 else if (start >= ro_start) {
387 vmem_add_mem(start, ro_end - start, 1);
388 vmem_add_mem(ro_end, end - ro_end, 0);
389 } else if (end < ro_end) {
390 vmem_add_mem(start, ro_start - start, 0);
391 vmem_add_mem(ro_start, end - ro_start, 1);
392 } else {
393 vmem_add_mem(start, ro_start - start, 0);
394 vmem_add_mem(ro_start, ro_end - ro_start, 1);
395 vmem_add_mem(ro_end, end - ro_end, 0);
396 }
397 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100398}
399
400/*
401 * Convert memory chunk array to a memory segment list so there is a single
402 * list that contains both r/w memory and shared memory segments.
403 */
404static int __init vmem_convert_memory_chunk(void)
405{
406 struct memory_segment *seg;
407 int i;
408
409 mutex_lock(&vmem_mutex);
Heiko Carstens9f4b0ba2008-01-26 14:11:02 +0100410 for (i = 0; i < MEMORY_CHUNKS; i++) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100411 if (!memory_chunk[i].size)
412 continue;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100413 if (memory_chunk[i].type == CHUNK_CRASHK ||
414 memory_chunk[i].type == CHUNK_OLDMEM)
415 continue;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100416 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
417 if (!seg)
418 panic("Out of memory...\n");
419 seg->start = memory_chunk[i].addr;
420 seg->size = memory_chunk[i].size;
421 insert_memory_segment(seg);
422 }
423 mutex_unlock(&vmem_mutex);
424 return 0;
425}
426
427core_initcall(vmem_convert_memory_chunk);