s390/mm,vmem: fix vmem_add_mem()/vmem_remove_range()
vmem_add_mem() should only then insert a large page if pmd_none() is true
for the specific entry. We might have a leftover from a previous mapping.
In addition make vmem_remove_range()'s page table walk code more complete
and fix a couple of potential endless loops (which can never happen :).
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index d402b19..387c7c6 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -109,8 +109,8 @@
pm_dir = pmd_offset(pu_dir, address);
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
- if (MACHINE_HAS_EDAT1 && address && !(address & ~PMD_MASK) &&
- (address + PMD_SIZE <= end)) {
+ if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
+ !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
pmd_val(*pm_dir) = pte_val(pte);
address += PMD_SIZE;
@@ -151,12 +151,20 @@
pte_val(pte) = _PAGE_TYPE_EMPTY;
while (address < end) {
pg_dir = pgd_offset_k(address);
+ if (pgd_none(*pg_dir)) {
+ address += PGDIR_SIZE;
+ continue;
+ }
pu_dir = pud_offset(pg_dir, address);
- if (pud_none(*pu_dir))
+ if (pud_none(*pu_dir)) {
+ address += PUD_SIZE;
continue;
+ }
pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir))
+ if (pmd_none(*pm_dir)) {
+ address += PMD_SIZE;
continue;
+ }
if (pmd_large(*pm_dir)) {
pmd_clear(pm_dir);
address += PMD_SIZE;