Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 update from Martin Schwidefsky:
 "The most prominent change in this patch set is the software dirty bit
  patch for s390.  It removes __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY and
  the page_test_and_clear_dirty primitive which makes the common memory
  management code a bit less obscure.

  Heiko fixed most of the PCI related fallout, more often than not
  missing GENERIC_HARDIRQS dependencies.  Notable is one of the 3270
  patches which adds an export to tty_io to be able to resize a tty.

  The rest is the usual bunch of cleanups and bug fixes."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits)
  s390/module: Add missing R_390_NONE relocation type
  drivers/gpio: add missing GENERIC_HARDIRQ dependency
  drivers/input: add couple of missing GENERIC_HARDIRQS dependencies
  s390/cleanup: rename SPP to LPP
  s390/mm: implement software dirty bits
  s390/mm: Fix crst upgrade of mmap with MAP_FIXED
  s390/linker skript: discard exit.data at runtime
  drivers/media: add missing GENERIC_HARDIRQS dependency
  s390/bpf,jit: add vlan tag support
  drivers/net,AT91RM9200: add missing GENERIC_HARDIRQS dependency
  iucv: fix kernel panic at reboot
  s390/Kconfig: sort list of arch selected config options
  phylib: remove !S390 dependeny from Kconfig
  uio: remove !S390 dependency from Kconfig
  dasd: fix sysfs cleanup in dasd_generic_remove
  s390/pci: fix hotplug module init
  s390/pci: cleanup clp page allocation
  s390/pci: cleanup clp inline assembly
  s390/perf: cpum_cf: fallback to software sampling events
  s390/mm: provide PAGE_SHARED define
  ...
diff --git a/MAINTAINERS b/MAINTAINERS
index db061e9..73b7d50 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6519,7 +6519,7 @@
 F:	drivers/s390/net/
 
 S390 ZCRYPT DRIVER
-M:	Holger Dengler <hd@linux.vnet.ibm.com>
+M:	Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
 M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 27c91c3..b220e15 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -60,85 +60,86 @@
 
 config S390
 	def_bool y
-	select USE_GENERIC_SMP_HELPERS if SMP
-	select GENERIC_CPU_DEVICES if !SMP
-	select HAVE_SYSCALL_WRAPPERS
-	select HAVE_FUNCTION_TRACER
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
-	select HAVE_FTRACE_MCOUNT_RECORD
-	select HAVE_C_RECORDMCOUNT
-	select HAVE_SYSCALL_TRACEPOINTS
-	select SYSCTL_EXCEPTION_TRACE
-	select HAVE_DYNAMIC_FTRACE
-	select HAVE_FUNCTION_GRAPH_TRACER
-	select HAVE_REGS_AND_STACK_ACCESS_API
-	select HAVE_OPROFILE
-	select HAVE_KPROBES
-	select HAVE_KRETPROBES
-	select HAVE_KVM if 64BIT
-	select HAVE_ARCH_TRACEHOOK
-	select INIT_ALL_POSSIBLE
-	select HAVE_PERF_EVENTS
-	select ARCH_HAVE_NMI_SAFE_CMPXCHG
-	select HAVE_DEBUG_KMEMLEAK
-	select HAVE_KERNEL_GZIP
-	select HAVE_KERNEL_BZIP2
-	select HAVE_KERNEL_LZMA
-	select HAVE_KERNEL_LZO
-	select HAVE_KERNEL_XZ
-	select HAVE_ARCH_MUTEX_CPU_RELAX
-	select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
-	select HAVE_BPF_JIT if 64BIT && PACK_STACK
-	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
-	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-	select HAVE_MEMBLOCK
-	select HAVE_MEMBLOCK_NODE_MAP
-	select HAVE_CMPXCHG_LOCAL
-	select HAVE_CMPXCHG_DOUBLE
-	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
-	select HAVE_VIRT_CPU_ACCOUNTING
-	select VIRT_CPU_ACCOUNTING
 	select ARCH_DISCARD_MEMBLOCK
-	select BUILDTIME_EXTABLE_SORT
-	select ARCH_INLINE_SPIN_TRYLOCK
-	select ARCH_INLINE_SPIN_TRYLOCK_BH
-	select ARCH_INLINE_SPIN_LOCK
-	select ARCH_INLINE_SPIN_LOCK_BH
-	select ARCH_INLINE_SPIN_LOCK_IRQ
-	select ARCH_INLINE_SPIN_LOCK_IRQSAVE
-	select ARCH_INLINE_SPIN_UNLOCK
-	select ARCH_INLINE_SPIN_UNLOCK_BH
-	select ARCH_INLINE_SPIN_UNLOCK_IRQ
-	select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
-	select ARCH_INLINE_READ_TRYLOCK
+	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_INLINE_READ_LOCK
 	select ARCH_INLINE_READ_LOCK_BH
 	select ARCH_INLINE_READ_LOCK_IRQ
 	select ARCH_INLINE_READ_LOCK_IRQSAVE
+	select ARCH_INLINE_READ_TRYLOCK
 	select ARCH_INLINE_READ_UNLOCK
 	select ARCH_INLINE_READ_UNLOCK_BH
 	select ARCH_INLINE_READ_UNLOCK_IRQ
 	select ARCH_INLINE_READ_UNLOCK_IRQRESTORE
-	select ARCH_INLINE_WRITE_TRYLOCK
+	select ARCH_INLINE_SPIN_LOCK
+	select ARCH_INLINE_SPIN_LOCK_BH
+	select ARCH_INLINE_SPIN_LOCK_IRQ
+	select ARCH_INLINE_SPIN_LOCK_IRQSAVE
+	select ARCH_INLINE_SPIN_TRYLOCK
+	select ARCH_INLINE_SPIN_TRYLOCK_BH
+	select ARCH_INLINE_SPIN_UNLOCK
+	select ARCH_INLINE_SPIN_UNLOCK_BH
+	select ARCH_INLINE_SPIN_UNLOCK_IRQ
+	select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
 	select ARCH_INLINE_WRITE_LOCK
 	select ARCH_INLINE_WRITE_LOCK_BH
 	select ARCH_INLINE_WRITE_LOCK_IRQ
 	select ARCH_INLINE_WRITE_LOCK_IRQSAVE
+	select ARCH_INLINE_WRITE_TRYLOCK
 	select ARCH_INLINE_WRITE_UNLOCK
 	select ARCH_INLINE_WRITE_UNLOCK_BH
 	select ARCH_INLINE_WRITE_UNLOCK_IRQ
 	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
-	select HAVE_UID16 if 32BIT
+	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
 	select ARCH_WANT_IPC_PARSE_VERSION
-	select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
+	select BUILDTIME_EXTABLE_SORT
+	select CLONE_BACKWARDS2
+	select GENERIC_CLOCKEVENTS
+	select GENERIC_CPU_DEVICES if !SMP
+	select GENERIC_KERNEL_THREAD
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_TIME_VSYSCALL_OLD
-	select GENERIC_CLOCKEVENTS
-	select KTIME_SCALAR if 32BIT
+	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+	select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
+	select HAVE_ARCH_MUTEX_CPU_RELAX
 	select HAVE_ARCH_SECCOMP_FILTER
+	select HAVE_ARCH_TRACEHOOK
+	select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
+	select HAVE_BPF_JIT if 64BIT && PACK_STACK
+	select HAVE_CMPXCHG_DOUBLE
+	select HAVE_CMPXCHG_LOCAL
+	select HAVE_C_RECORDMCOUNT
+	select HAVE_DEBUG_KMEMLEAK
+	select HAVE_DYNAMIC_FTRACE
+	select HAVE_FTRACE_MCOUNT_RECORD
+	select HAVE_FUNCTION_GRAPH_TRACER
+	select HAVE_FUNCTION_TRACER
+	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+	select HAVE_KERNEL_BZIP2
+	select HAVE_KERNEL_GZIP
+	select HAVE_KERNEL_LZMA
+	select HAVE_KERNEL_LZO
+	select HAVE_KERNEL_XZ
+	select HAVE_KPROBES
+	select HAVE_KRETPROBES
+	select HAVE_KVM if 64BIT
+	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_MOD_ARCH_SPECIFIC
+	select HAVE_OPROFILE
+	select HAVE_PERF_EVENTS
+	select HAVE_REGS_AND_STACK_ACCESS_API
+	select HAVE_SYSCALL_TRACEPOINTS
+	select HAVE_SYSCALL_WRAPPERS
+	select HAVE_UID16 if 32BIT
+	select HAVE_VIRT_CPU_ACCOUNTING
+	select INIT_ALL_POSSIBLE
+	select KTIME_SCALAR if 32BIT
 	select MODULES_USE_ELF_RELA
-	select CLONE_BACKWARDS2
+	select SYSCTL_EXCEPTION_TRACE
+	select USE_GENERIC_SMP_HELPERS if SMP
+	select VIRT_CPU_ACCOUNTING
 
 config SCHED_OMIT_FRAME_POINTER
 	def_bool y
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 02d9a1c..7ef60b5 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -108,7 +108,7 @@
 	mem_data->totalswap = P2K(val.totalswap);
 	mem_data->freeswap  = P2K(val.freeswap);
 
-	mem_data->timestamp = get_clock();
+	mem_data->timestamp = get_tod_clock();
 	mem_data->sync_count_2++;
 }
 
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 1370e35..2d224b9 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -111,7 +111,7 @@
 	net_data->tx_dropped = tx_dropped;
 	net_data->collisions = collisions;
 
-	net_data->timestamp = get_clock();
+	net_data->timestamp = get_tod_clock();
 	net_data->sync_count_2++;
 }
 
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 87521ba..de8e2b3 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -156,7 +156,7 @@
 		}
 		ops.size = new_size;
 	}
-	os_data->timestamp = get_clock();
+	os_data->timestamp = get_tod_clock();
 	os_data->sync_count_2++;
 }
 
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 4f6afaa..f364dcf 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -245,7 +245,7 @@
 	d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
 	if (IS_ERR(d2fc))
 		return PTR_ERR(d2fc);
-	get_clock_ext(d2fc->hdr.tod_ext);
+	get_tod_clock_ext(d2fc->hdr.tod_ext);
 	d2fc->hdr.len = count * sizeof(struct diag2fc_data);
 	d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
 	d2fc->hdr.count = count;
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 10a5088..16760ee 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -13,15 +13,12 @@
  * to devices.
  */
 
-static inline void mb(void)
-{
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-	/* Fast-BCR without checkpoint synchronization */
-	asm volatile("bcr 14,0" : : : "memory");
+/* Fast-BCR without checkpoint synchronization */
+#define mb() do {  asm volatile("bcr 14,0" : : : "memory"); } while (0)
 #else
-	asm volatile("bcr 15,0" : : : "memory");
+#define mb() do {  asm volatile("bcr 15,0" : : : "memory"); } while (0)
 #endif
-}
 
 #define rmb()				mb()
 #define wmb()				mb()
diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h
index 6c3aecc..a0e71a5 100644
--- a/arch/s390/include/asm/clp.h
+++ b/arch/s390/include/asm/clp.h
@@ -2,7 +2,7 @@
 #define _ASM_S390_CLP_H
 
 /* CLP common request & response block size */
-#define CLP_BLK_SIZE			(PAGE_SIZE * 2)
+#define CLP_BLK_SIZE			PAGE_SIZE
 
 struct clp_req_hdr {
 	u16 len;
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 35f0020..f1eddd1 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -34,12 +34,12 @@
 /* CPU measurement facility support */
 static inline int cpum_cf_avail(void)
 {
-	return MACHINE_HAS_SPP && test_facility(67);
+	return MACHINE_HAS_LPP && test_facility(67);
 }
 
 static inline int cpum_sf_avail(void)
 {
-	return MACHINE_HAS_SPP && test_facility(68);
+	return MACHINE_HAS_LPP && test_facility(68);
 }
 
 
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 8a32f7d..9411db65 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -19,9 +19,11 @@
 }
 
 extern int dma_set_mask(struct device *dev, u64 mask);
-extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
-extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-			   enum dma_data_direction direction);
+
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+				  enum dma_data_direction direction)
+{
+}
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index 0e47a57..9977e08 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -9,7 +9,7 @@
 #include <uapi/asm/mman.h>
 
 #if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
-int s390_mmap_check(unsigned long addr, unsigned long len);
-#define arch_mmap_check(addr,len,flags)	s390_mmap_check(addr,len)
+int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
+#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
 #endif
 #endif /* __S390_MMAN_H__ */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a86ad40840..75ce9b0 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -155,28 +155,6 @@
 #define _PAGE_ACC_BITS		0xf0	/* HW access control bits	*/
 
 /*
- * Test and clear dirty bit in storage key.
- * We can't clear the changed bit atomically. This is a potential
- * race against modification of the referenced bit. This function
- * should therefore only be called if it is not mapped in any
- * address space.
- *
- * Note that the bit gets set whenever page content is changed. That means
- * also when the page is modified by DMA or from inside the kernel.
- */
-#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
-{
-	unsigned char skey;
-
-	skey = page_get_storage_key(pfn << PAGE_SHIFT);
-	if (!(skey & _PAGE_CHANGED))
-		return 0;
-	page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped);
-	return 1;
-}
-
-/*
  * Test and clear referenced bit in storage key.
  */
 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index b1fa93c..05333b7 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -160,9 +160,14 @@
 int zpci_msihash_init(void);
 void zpci_msihash_exit(void);
 
+#ifdef CONFIG_PCI
 /* Error handling and recovery */
 void zpci_event_error(void *);
 void zpci_event_availability(void *);
+#else /* CONFIG_PCI */
+static inline void zpci_event_error(void *e) {}
+static inline void zpci_event_availability(void *e) {}
+#endif /* CONFIG_PCI */
 
 /* Helpers */
 struct zpci_dev *get_zdev(struct pci_dev *);
@@ -180,8 +185,10 @@
 /* Hotplug */
 extern struct mutex zpci_list_lock;
 extern struct list_head zpci_list;
-extern struct pci_hp_callback_ops hotplug_ops;
-extern unsigned int pci_probe;
+extern unsigned int s390_pci_probe;
+
+void zpci_register_hp_ops(struct pci_hp_callback_ops *);
+void zpci_deregister_hp_ops(void);
 
 /* FMB */
 int zpci_fmb_enable_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 098adbb..97de120 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -29,6 +29,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/sched.h>
 #include <linux/mm_types.h>
+#include <linux/page-flags.h>
 #include <asm/bug.h>
 #include <asm/page.h>
 
@@ -221,13 +222,15 @@
 /* Software bits in the page table entry */
 #define _PAGE_SWT	0x001		/* SW pte type bit t */
 #define _PAGE_SWX	0x002		/* SW pte type bit x */
-#define _PAGE_SWC	0x004		/* SW pte changed bit (for KVM) */
-#define _PAGE_SWR	0x008		/* SW pte referenced bit (for KVM) */
-#define _PAGE_SPECIAL	0x010		/* SW associated with special page */
+#define _PAGE_SWC	0x004		/* SW pte changed bit */
+#define _PAGE_SWR	0x008		/* SW pte referenced bit */
+#define _PAGE_SWW	0x010		/* SW pte write bit */
+#define _PAGE_SPECIAL	0x020		/* SW associated with special page */
 #define __HAVE_ARCH_PTE_SPECIAL
 
 /* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
+#define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
+				 _PAGE_SWC | _PAGE_SWR)
 
 /* Six different types of pages. */
 #define _PAGE_TYPE_EMPTY	0x400
@@ -321,6 +324,7 @@
 
 /* Bits in the region table entry */
 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
+#define _REGION_ENTRY_RO	0x200	/* region protection bit	    */
 #define _REGION_ENTRY_INV	0x20	/* invalid region table entry	    */
 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
@@ -382,9 +386,11 @@
  */
 #define PAGE_NONE	__pgprot(_PAGE_TYPE_NONE)
 #define PAGE_RO		__pgprot(_PAGE_TYPE_RO)
-#define PAGE_RW		__pgprot(_PAGE_TYPE_RW)
+#define PAGE_RW		__pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
+#define PAGE_RWC	__pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
 
-#define PAGE_KERNEL	PAGE_RW
+#define PAGE_KERNEL	PAGE_RWC
+#define PAGE_SHARED	PAGE_KERNEL
 #define PAGE_COPY	PAGE_RO
 
 /*
@@ -631,23 +637,23 @@
 	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 	/* Clear page changed & referenced bit in the storage key */
 	if (bits & _PAGE_CHANGED)
-		page_set_storage_key(address, skey ^ bits, 1);
+		page_set_storage_key(address, skey ^ bits, 0);
 	else if (bits)
 		page_reset_referenced(address);
 	/* Transfer page changed & referenced bit to guest bits in pgste */
 	pgste_val(pgste) |= bits << 48;		/* RCP_GR_BIT & RCP_GC_BIT */
 	/* Get host changed & referenced bits from pgste */
 	bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
-	/* Clear host bits in pgste. */
+	/* Transfer page changed & referenced bit to kvm user bits */
+	pgste_val(pgste) |= bits << 45;		/* KVM_UR_BIT & KVM_UC_BIT */
+	/* Clear relevant host bits in pgste. */
 	pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
 	pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
 	/* Copy page access key and fetch protection bit to pgste */
 	pgste_val(pgste) |=
 		(unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
-	/* Transfer changed and referenced to kvm user bits */
-	pgste_val(pgste) |= bits << 45;		/* KVM_UR_BIT & KVM_UC_BIT */
-	/* Transfer changed & referenced to pte sofware bits */
-	pte_val(*ptep) |= bits << 1;		/* _PAGE_SWR & _PAGE_SWC */
+	/* Transfer referenced bit to pte */
+	pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
 #endif
 	return pgste;
 
@@ -660,20 +666,25 @@
 
 	if (!pte_present(*ptep))
 		return pgste;
+	/* Get referenced bit from storage key */
 	young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
-	/* Transfer page referenced bit to pte software bit (host view) */
-	if (young || (pgste_val(pgste) & RCP_HR_BIT))
+	if (young)
+		pgste_val(pgste) |= RCP_GR_BIT;
+	/* Get host referenced bit from pgste */
+	if (pgste_val(pgste) & RCP_HR_BIT) {
+		pgste_val(pgste) &= ~RCP_HR_BIT;
+		young = 1;
+	}
+	/* Transfer referenced bit to kvm user bits and pte */
+	if (young) {
+		pgste_val(pgste) |= KVM_UR_BIT;
 		pte_val(*ptep) |= _PAGE_SWR;
-	/* Clear host referenced bit in pgste. */
-	pgste_val(pgste) &= ~RCP_HR_BIT;
-	/* Transfer page referenced bit to guest bit in pgste */
-	pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
+	}
 #endif
 	return pgste;
-
 }
 
-static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
+static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
 {
 #ifdef CONFIG_PGSTE
 	unsigned long address;
@@ -687,10 +698,23 @@
 	/* Set page access key and fetch protection bit from pgste */
 	nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
 	if (okey != nkey)
-		page_set_storage_key(address, nkey, 1);
+		page_set_storage_key(address, nkey, 0);
 #endif
 }
 
+static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
+{
+	if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
+		/*
+		 * Without enhanced suppression-on-protection force
+		 * the dirty bit on for all writable ptes.
+		 */
+		pte_val(entry) |= _PAGE_SWC;
+		pte_val(entry) &= ~_PAGE_RO;
+	}
+	*ptep = entry;
+}
+
 /**
  * struct gmap_struct - guest address space
  * @mm: pointer to the parent mm_struct
@@ -749,11 +773,14 @@
 
 	if (mm_has_pgste(mm)) {
 		pgste = pgste_get_lock(ptep);
-		pgste_set_pte(ptep, pgste, entry);
-		*ptep = entry;
+		pgste_set_key(ptep, pgste, entry);
+		pgste_set_pte(ptep, entry);
 		pgste_set_unlock(ptep, pgste);
-	} else
+	} else {
+		if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
+			pte_val(entry) |= _PAGE_CO;
 		*ptep = entry;
+	}
 }
 
 /*
@@ -762,16 +789,12 @@
  */
 static inline int pte_write(pte_t pte)
 {
-	return (pte_val(pte) & _PAGE_RO) == 0;
+	return (pte_val(pte) & _PAGE_SWW) != 0;
 }
 
 static inline int pte_dirty(pte_t pte)
 {
-#ifdef CONFIG_PGSTE
-	if (pte_val(pte) & _PAGE_SWC)
-		return 1;
-#endif
-	return 0;
+	return (pte_val(pte) & _PAGE_SWC) != 0;
 }
 
 static inline int pte_young(pte_t pte)
@@ -821,11 +844,14 @@
 {
 	pte_val(pte) &= _PAGE_CHG_MASK;
 	pte_val(pte) |= pgprot_val(newprot);
+	if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
+		pte_val(pte) &= ~_PAGE_RO;
 	return pte;
 }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
+	pte_val(pte) &= ~_PAGE_SWW;
 	/* Do not clobber _PAGE_TYPE_NONE pages!  */
 	if (!(pte_val(pte) & _PAGE_INVALID))
 		pte_val(pte) |= _PAGE_RO;
@@ -834,20 +860,26 @@
 
 static inline pte_t pte_mkwrite(pte_t pte)
 {
-	pte_val(pte) &= ~_PAGE_RO;
+	pte_val(pte) |= _PAGE_SWW;
+	if (pte_val(pte) & _PAGE_SWC)
+		pte_val(pte) &= ~_PAGE_RO;
 	return pte;
 }
 
 static inline pte_t pte_mkclean(pte_t pte)
 {
-#ifdef CONFIG_PGSTE
 	pte_val(pte) &= ~_PAGE_SWC;
-#endif
+	/* Do not clobber _PAGE_TYPE_NONE pages!  */
+	if (!(pte_val(pte) & _PAGE_INVALID))
+		pte_val(pte) |= _PAGE_RO;
 	return pte;
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
+	pte_val(pte) |= _PAGE_SWC;
+	if (pte_val(pte) & _PAGE_SWW)
+		pte_val(pte) &= ~_PAGE_RO;
 	return pte;
 }
 
@@ -885,10 +917,10 @@
 		pte_val(pte) |= _SEGMENT_ENTRY_INV;
 	}
 	/*
-	 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
-	 * table entry.
+	 * Clear SW pte bits, there are no SW bits in a segment table entry.
 	 */
-	pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
+	pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC |
+			  _PAGE_SWR | _PAGE_SWW);
 	/*
 	 * Also set the change-override bit because we don't need dirty bit
 	 * tracking for hugetlbfs pages.
@@ -1040,9 +1072,11 @@
 					   unsigned long address,
 					   pte_t *ptep, pte_t pte)
 {
-	*ptep = pte;
-	if (mm_has_pgste(mm))
+	if (mm_has_pgste(mm)) {
+		pgste_set_pte(ptep, pte);
 		pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
+	} else
+		*ptep = pte;
 }
 
 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
@@ -1110,10 +1144,13 @@
 
 		if (!mm_exclusive(mm))
 			__ptep_ipte(address, ptep);
-		*ptep = pte_wrprotect(pte);
+		pte = pte_wrprotect(pte);
 
-		if (mm_has_pgste(mm))
+		if (mm_has_pgste(mm)) {
+			pgste_set_pte(ptep, pte);
 			pgste_set_unlock(ptep, pgste);
+		} else
+			*ptep = pte;
 	}
 	return pte;
 }
@@ -1131,10 +1168,12 @@
 		pgste = pgste_get_lock(ptep);
 
 	__ptep_ipte(address, ptep);
-	*ptep = entry;
 
-	if (mm_has_pgste(vma->vm_mm))
+	if (mm_has_pgste(vma->vm_mm)) {
+		pgste_set_pte(ptep, entry);
 		pgste_set_unlock(ptep, pgste);
+	} else
+		*ptep = entry;
 	return 1;
 }
 
@@ -1152,8 +1191,13 @@
 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 {
 	unsigned long physpage = page_to_phys(page);
+	pte_t __pte = mk_pte_phys(physpage, pgprot);
 
-	return mk_pte_phys(physpage, pgprot);
+	if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
+		pte_val(__pte) |= _PAGE_SWC;
+		pte_val(__pte) &= ~_PAGE_RO;
+	}
+	return __pte;
 }
 
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
@@ -1245,6 +1289,8 @@
 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 			      pmd_t *pmdp, pmd_t entry)
 {
+	if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
+		pmd_val(entry) |= _SEGMENT_ENTRY_CO;
 	*pmdp = entry;
 }
 
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 8337886..06a1361 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -46,7 +46,6 @@
 void sclp_facilities_detect(void);
 unsigned long long sclp_get_rnmax(void);
 unsigned long long sclp_get_rzm(void);
-u8 sclp_get_fac85(void);
 int sclp_sdias_blk_count(void);
 int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
 int sclp_chp_configure(struct chp_id chpid);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index f69f76b..ff67d73 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -64,17 +64,18 @@
 
 #define MACHINE_FLAG_VM		(1UL << 0)
 #define MACHINE_FLAG_IEEE	(1UL << 1)
-#define MACHINE_FLAG_CSP	(1UL << 3)
-#define MACHINE_FLAG_MVPG	(1UL << 4)
-#define MACHINE_FLAG_DIAG44	(1UL << 5)
-#define MACHINE_FLAG_IDTE	(1UL << 6)
-#define MACHINE_FLAG_DIAG9C	(1UL << 7)
-#define MACHINE_FLAG_MVCOS	(1UL << 8)
-#define MACHINE_FLAG_KVM	(1UL << 9)
+#define MACHINE_FLAG_CSP	(1UL << 2)
+#define MACHINE_FLAG_MVPG	(1UL << 3)
+#define MACHINE_FLAG_DIAG44	(1UL << 4)
+#define MACHINE_FLAG_IDTE	(1UL << 5)
+#define MACHINE_FLAG_DIAG9C	(1UL << 6)
+#define MACHINE_FLAG_MVCOS	(1UL << 7)
+#define MACHINE_FLAG_KVM	(1UL << 8)
+#define MACHINE_FLAG_ESOP	(1UL << 9)
 #define MACHINE_FLAG_EDAT1	(1UL << 10)
 #define MACHINE_FLAG_EDAT2	(1UL << 11)
 #define MACHINE_FLAG_LPAR	(1UL << 12)
-#define MACHINE_FLAG_SPP	(1UL << 13)
+#define MACHINE_FLAG_LPP	(1UL << 13)
 #define MACHINE_FLAG_TOPOLOGY	(1UL << 14)
 #define MACHINE_FLAG_TE		(1UL << 15)
 #define MACHINE_FLAG_RRBM	(1UL << 16)
@@ -84,6 +85,7 @@
 #define MACHINE_IS_LPAR		(S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
 
 #define MACHINE_HAS_DIAG9C	(S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
+#define MACHINE_HAS_ESOP	(S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
 #define MACHINE_HAS_PFMF	MACHINE_HAS_EDAT1
 #define MACHINE_HAS_HPAGE	MACHINE_HAS_EDAT1
 
@@ -96,7 +98,7 @@
 #define MACHINE_HAS_MVCOS	(0)
 #define MACHINE_HAS_EDAT1	(0)
 #define MACHINE_HAS_EDAT2	(0)
-#define MACHINE_HAS_SPP		(0)
+#define MACHINE_HAS_LPP		(0)
 #define MACHINE_HAS_TOPOLOGY	(0)
 #define MACHINE_HAS_TE		(0)
 #define MACHINE_HAS_RRBM	(0)
@@ -109,7 +111,7 @@
 #define MACHINE_HAS_MVCOS	(S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
 #define MACHINE_HAS_EDAT1	(S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
 #define MACHINE_HAS_EDAT2	(S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
-#define MACHINE_HAS_SPP		(S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
+#define MACHINE_HAS_LPP		(S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
 #define MACHINE_HAS_TOPOLOGY	(S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
 #define MACHINE_HAS_TE		(S390_lowcore.machine_flags & MACHINE_FLAG_TE)
 #define MACHINE_HAS_RRBM	(S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 4c060bb..8ad8af9 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -15,7 +15,7 @@
 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
 
 /* Inline functions for clock register access. */
-static inline int set_clock(__u64 time)
+static inline int set_tod_clock(__u64 time)
 {
 	int cc;
 
@@ -27,7 +27,7 @@
 	return cc;
 }
 
-static inline int store_clock(__u64 *time)
+static inline int store_tod_clock(__u64 *time)
 {
 	int cc;
 
@@ -71,7 +71,7 @@
 
 typedef unsigned long long cycles_t;
 
-static inline unsigned long long get_clock(void)
+static inline unsigned long long get_tod_clock(void)
 {
 	unsigned long long clk;
 
@@ -83,21 +83,21 @@
 	return clk;
 }
 
-static inline void get_clock_ext(char *clk)
+static inline void get_tod_clock_ext(char *clk)
 {
 	asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
 }
 
-static inline unsigned long long get_clock_xt(void)
+static inline unsigned long long get_tod_clock_xt(void)
 {
 	unsigned char clk[16];
-	get_clock_ext(clk);
+	get_tod_clock_ext(clk);
 	return *((unsigned long long *)&clk[1]);
 }
 
 static inline cycles_t get_cycles(void)
 {
-	return (cycles_t) get_clock() >> 2;
+	return (cycles_t) get_tod_clock() >> 2;
 }
 
 int get_sync_clock(unsigned long long *clock);
@@ -123,9 +123,9 @@
  * function, otherwise the returned value is not guaranteed to
  * be monotonic.
  */
-static inline unsigned long long get_clock_monotonic(void)
+static inline unsigned long long get_tod_clock_monotonic(void)
 {
-	return get_clock_xt() - sched_clock_base_cc;
+	return get_tod_clock_xt() - sched_clock_base_cc;
 }
 
 /**
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 4e8215e..09a94cd 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -867,7 +867,7 @@
 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
 			int exception)
 {
-	active->id.stck = get_clock();
+	active->id.stck = get_tod_clock();
 	active->id.fields.cpuid = smp_processor_id();
 	active->caller = __builtin_return_address(0);
 	active->id.fields.exception = exception;
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index a7f9abd..c50665f 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -840,7 +840,6 @@
 	{ "stcke", 0x78, INSTR_S_RD },
 	{ "sacf", 0x79, INSTR_S_RD },
 	{ "stsi", 0x7d, INSTR_S_RD },
-	{ "spp", 0x80, INSTR_S_RD },
 	{ "srnm", 0x99, INSTR_S_RD },
 	{ "stfpc", 0x9c, INSTR_S_RD },
 	{ "lfpc", 0x9d, INSTR_S_RD },
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 1f0eee9..bda011e 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -47,10 +47,10 @@
 {
 	u64 time;
 
-	if (store_clock(&time) == 0)
+	if (store_tod_clock(&time) == 0)
 		return;
 	/* TOD clock not running. Set the clock to Unix Epoch. */
-	if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
+	if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
 		disabled_wait(0);
 
 	sched_clock_base_cc = TOD_UNIX_EPOCH;
@@ -173,7 +173,7 @@
 	}
 
 	/* re-initialize cputime accounting. */
-	sched_clock_base_cc = get_clock();
+	sched_clock_base_cc = get_tod_clock();
 	S390_lowcore.last_update_clock = sched_clock_base_cc;
 	S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
 	S390_lowcore.user_timer = 0;
@@ -381,7 +381,7 @@
 	if (test_facility(27))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
 	if (test_facility(40))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
+		S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
 	if (test_facility(50) && test_facility(73))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
 	if (test_facility(66))
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 6d34e0c..9c837c1 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -72,9 +72,9 @@
 #endif
 	.endm
 
-	.macro SPP newpp
+	.macro LPP newpp
 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
-	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_SPP
+	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
 	jz	.+8
 	.insn	s,0xb2800000,\newpp
 #endif
@@ -96,7 +96,7 @@
 	jhe	.+22
 	.endif
 	lg	%r9,BASED(.Lsie_loop)
-	SPP	BASED(.Lhost_id)	# set host id
+	LPP	BASED(.Lhost_id)	# set host id
 #endif
 	.endm
 
@@ -967,10 +967,10 @@
 	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
 sie_gmap:
 	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
-	SPP	__SF_EMPTY(%r15)		# set guest id
+	LPP	__SF_EMPTY(%r15)		# set guest id
 	sie	0(%r14)
 sie_done:
-	SPP	__SF_EMPTY+16(%r15)		# set host id
+	LPP	__SF_EMPTY+16(%r15)		# set host id
 	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
 sie_exit:
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 6ffcd320..d8a6a38 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1414,6 +1414,16 @@
 
 static struct kset *dump_kset;
 
+static void diag308_dump(void *dump_block)
+{
+	diag308(DIAG308_SET, dump_block);
+	while (1) {
+		if (diag308(DIAG308_DUMP, NULL) != 0x302)
+			break;
+		udelay_simple(USEC_PER_SEC);
+	}
+}
+
 static void __dump_run(void *unused)
 {
 	struct ccw_dev_id devid;
@@ -1432,12 +1442,10 @@
 		__cpcmd(buf, NULL, 0, NULL);
 		break;
 	case DUMP_METHOD_CCW_DIAG:
-		diag308(DIAG308_SET, dump_block_ccw);
-		diag308(DIAG308_DUMP, NULL);
+		diag308_dump(dump_block_ccw);
 		break;
 	case DUMP_METHOD_FCP_DIAG:
-		diag308(DIAG308_SET, dump_block_fcp);
-		diag308(DIAG308_DUMP, NULL);
+		diag308_dump(dump_block_fcp);
 		break;
 	default:
 		break;
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 4610dea..f750bd7 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -65,8 +65,7 @@
 	vfree(module_region);
 }
 
-static void
-check_rela(Elf_Rela *rela, struct module *me)
+static void check_rela(Elf_Rela *rela, struct module *me)
 {
 	struct mod_arch_syminfo *info;
 
@@ -115,9 +114,8 @@
  * Account for GOT and PLT relocations. We can't add sections for
  * got and plt but we can increase the core module size.
  */
-int
-module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
-			  char *secstrings, struct module *me)
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+			      char *secstrings, struct module *me)
 {
 	Elf_Shdr *symtab;
 	Elf_Sym *symbols;
@@ -179,13 +177,52 @@
 	return 0;
 }
 
-static int
-apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 
-	   struct module *me)
+static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
+			   int sign, int bits, int shift)
+{
+	unsigned long umax;
+	long min, max;
+
+	if (val & ((1UL << shift) - 1))
+		return -ENOEXEC;
+	if (sign) {
+		val = (Elf_Addr)(((long) val) >> shift);
+		min = -(1L << (bits - 1));
+		max = (1L << (bits - 1)) - 1;
+		if ((long) val < min || (long) val > max)
+			return -ENOEXEC;
+	} else {
+		val >>= shift;
+		umax = ((1UL << (bits - 1)) << 1) - 1;
+		if ((unsigned long) val > umax)
+			return -ENOEXEC;
+	}
+
+	if (bits == 8)
+		*(unsigned char *) loc = val;
+	else if (bits == 12)
+		*(unsigned short *) loc = (val & 0xfff) |
+			(*(unsigned short *) loc & 0xf000);
+	else if (bits == 16)
+		*(unsigned short *) loc = val;
+	else if (bits == 20)
+		*(unsigned int *) loc = (val & 0xfff) << 16 |
+			(val & 0xff000) >> 4 |
+			(*(unsigned int *) loc & 0xf00000ff);
+	else if (bits == 32)
+		*(unsigned int *) loc = val;
+	else if (bits == 64)
+		*(unsigned long *) loc = val;
+	return 0;
+}
+
+static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+		      const char *strtab, struct module *me)
 {
 	struct mod_arch_syminfo *info;
 	Elf_Addr loc, val;
 	int r_type, r_sym;
+	int rc;
 
 	/* This is where to make the change */
 	loc = base + rela->r_offset;
@@ -197,6 +234,9 @@
 	val = symtab[r_sym].st_value;
 
 	switch (r_type) {
+	case R_390_NONE:	/* No relocation.  */
+		rc = 0;
+		break;
 	case R_390_8:		/* Direct 8 bit.   */
 	case R_390_12:		/* Direct 12 bit.  */
 	case R_390_16:		/* Direct 16 bit.  */
@@ -205,20 +245,17 @@
 	case R_390_64:		/* Direct 64 bit.  */
 		val += rela->r_addend;
 		if (r_type == R_390_8)
-			*(unsigned char *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 8, 0);
 		else if (r_type == R_390_12)
-			*(unsigned short *) loc = (val & 0xfff) |
-				(*(unsigned short *) loc & 0xf000);
+			rc = apply_rela_bits(loc, val, 0, 12, 0);
 		else if (r_type == R_390_16)
-			*(unsigned short *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 16, 0);
 		else if (r_type == R_390_20)
-			*(unsigned int *) loc =
-				(*(unsigned int *) loc & 0xf00000ff) |
-				(val & 0xfff) << 16 | (val & 0xff000) >> 4;
+			rc = apply_rela_bits(loc, val, 1, 20, 0);
 		else if (r_type == R_390_32)
-			*(unsigned int *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 32, 0);
 		else if (r_type == R_390_64)
-			*(unsigned long *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 64, 0);
 		break;
 	case R_390_PC16:	/* PC relative 16 bit.  */
 	case R_390_PC16DBL:	/* PC relative 16 bit shifted by 1.  */
@@ -227,15 +264,15 @@
 	case R_390_PC64:	/* PC relative 64 bit.	*/
 		val += rela->r_addend - loc;
 		if (r_type == R_390_PC16)
-			*(unsigned short *) loc = val;
+			rc = apply_rela_bits(loc, val, 1, 16, 0);
 		else if (r_type == R_390_PC16DBL)
-			*(unsigned short *) loc = val >> 1;
+			rc = apply_rela_bits(loc, val, 1, 16, 1);
 		else if (r_type == R_390_PC32DBL)
-			*(unsigned int *) loc = val >> 1;
+			rc = apply_rela_bits(loc, val, 1, 32, 1);
 		else if (r_type == R_390_PC32)
-			*(unsigned int *) loc = val;
+			rc = apply_rela_bits(loc, val, 1, 32, 0);
 		else if (r_type == R_390_PC64)
-			*(unsigned long *) loc = val;
+			rc = apply_rela_bits(loc, val, 1, 64, 0);
 		break;
 	case R_390_GOT12:	/* 12 bit GOT offset.  */
 	case R_390_GOT16:	/* 16 bit GOT offset.  */
@@ -260,26 +297,24 @@
 		val = info->got_offset + rela->r_addend;
 		if (r_type == R_390_GOT12 ||
 		    r_type == R_390_GOTPLT12)
-			*(unsigned short *) loc = (val & 0xfff) |
-				(*(unsigned short *) loc & 0xf000);
+			rc = apply_rela_bits(loc, val, 0, 12, 0);
 		else if (r_type == R_390_GOT16 ||
 			 r_type == R_390_GOTPLT16)
-			*(unsigned short *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 16, 0);
 		else if (r_type == R_390_GOT20 ||
 			 r_type == R_390_GOTPLT20)
-			*(unsigned int *) loc =
-				(*(unsigned int *) loc & 0xf00000ff) |
-				(val & 0xfff) << 16 | (val & 0xff000) >> 4;
+			rc = apply_rela_bits(loc, val, 1, 20, 0);
 		else if (r_type == R_390_GOT32 ||
 			 r_type == R_390_GOTPLT32)
-			*(unsigned int *) loc = val;
-		else if (r_type == R_390_GOTENT ||
-			 r_type == R_390_GOTPLTENT)
-			*(unsigned int *) loc =
-				(val + (Elf_Addr) me->module_core - loc) >> 1;
+			rc = apply_rela_bits(loc, val, 0, 32, 0);
 		else if (r_type == R_390_GOT64 ||
 			 r_type == R_390_GOTPLT64)
-			*(unsigned long *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 64, 0);
+		else if (r_type == R_390_GOTENT ||
+			 r_type == R_390_GOTPLTENT) {
+			val += (Elf_Addr) me->module_core - loc;
+			rc = apply_rela_bits(loc, val, 1, 32, 1);
+		}
 		break;
 	case R_390_PLT16DBL:	/* 16 bit PC rel. PLT shifted by 1.  */
 	case R_390_PLT32DBL:	/* 32 bit PC rel. PLT shifted by 1.  */
@@ -321,17 +356,17 @@
 			val += rela->r_addend - loc;
 		}
 		if (r_type == R_390_PLT16DBL)
-			*(unsigned short *) loc = val >> 1;
+			rc = apply_rela_bits(loc, val, 1, 16, 1);
 		else if (r_type == R_390_PLTOFF16)
-			*(unsigned short *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 16, 0);
 		else if (r_type == R_390_PLT32DBL)
-			*(unsigned int *) loc = val >> 1;
+			rc = apply_rela_bits(loc, val, 1, 32, 1);
 		else if (r_type == R_390_PLT32 ||
 			 r_type == R_390_PLTOFF32)
-			*(unsigned int *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 32, 0);
 		else if (r_type == R_390_PLT64 ||
 			 r_type == R_390_PLTOFF64)
-			*(unsigned long *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 64, 0);
 		break;
 	case R_390_GOTOFF16:	/* 16 bit offset to GOT.  */
 	case R_390_GOTOFF32:	/* 32 bit offset to GOT.  */
@@ -339,20 +374,20 @@
 		val = val + rela->r_addend -
 			((Elf_Addr) me->module_core + me->arch.got_offset);
 		if (r_type == R_390_GOTOFF16)
-			*(unsigned short *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 16, 0);
 		else if (r_type == R_390_GOTOFF32)
-			*(unsigned int *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 32, 0);
 		else if (r_type == R_390_GOTOFF64)
-			*(unsigned long *) loc = val;
+			rc = apply_rela_bits(loc, val, 0, 64, 0);
 		break;
 	case R_390_GOTPC:	/* 32 bit PC relative offset to GOT. */
 	case R_390_GOTPCDBL:	/* 32 bit PC rel. off. to GOT shifted by 1. */
 		val = (Elf_Addr) me->module_core + me->arch.got_offset +
 			rela->r_addend - loc;
 		if (r_type == R_390_GOTPC)
-			*(unsigned int *) loc = val;
+			rc = apply_rela_bits(loc, val, 1, 32, 0);
 		else if (r_type == R_390_GOTPCDBL)
-			*(unsigned int *) loc = val >> 1;
+			rc = apply_rela_bits(loc, val, 1, 32, 1);
 		break;
 	case R_390_COPY:
 	case R_390_GLOB_DAT:	/* Create GOT entry.  */
@@ -360,19 +395,25 @@
 	case R_390_RELATIVE:	/* Adjust by program base.  */
 		/* Only needed if we want to support loading of 
 		   modules linked with -shared. */
-		break;
+		return -ENOEXEC;
 	default:
-		printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+		printk(KERN_ERR "module %s: unknown relocation: %u\n",
 		       me->name, r_type);
 		return -ENOEXEC;
 	}
+	if (rc) {
+		printk(KERN_ERR "module %s: relocation error for symbol %s "
+		       "(r_type %i, value 0x%lx)\n",
+		       me->name, strtab + symtab[r_sym].st_name,
+		       r_type, (unsigned long) val);
+		return rc;
+	}
 	return 0;
 }
 
-int
-apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
-		   unsigned int symindex, unsigned int relsec,
-		   struct module *me)
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+		       unsigned int symindex, unsigned int relsec,
+		       struct module *me)
 {
 	Elf_Addr base;
 	Elf_Sym *symtab;
@@ -388,7 +429,7 @@
 	n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
 
 	for (i = 0; i < n; i++, rela++) {
-		rc = apply_rela(rela, base, symtab, me);
+		rc = apply_rela(rela, base, symtab, strtab, me);
 		if (rc)
 			return rc;
 	}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 7918fbe..504175e 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -293,7 +293,7 @@
 			 * retry this instruction.
 			 */
 			spin_lock(&ipd_lock);
-			tmp = get_clock();
+			tmp = get_tod_clock();
 			if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
 				ipd_count++;
 			else
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 86ec744..390d9ae 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -367,13 +367,6 @@
 	if (ev >= PERF_CPUM_CF_MAX_CTR)
 		return -EINVAL;
 
-	/* The CPU measurement counter facility does not have any interrupts
-	 * to do sampling.  Sampling must be provided by external means,
-	 * for example, by timers.
-	 */
-	if (hwc->sample_period)
-		return -EINVAL;
-
 	/* Use the hardware perf event structure to store the counter number
 	 * in 'config' member and the counter set to which the counter belongs
 	 * in the 'config_base'.  The counter set (config_base) is then used
@@ -418,6 +411,12 @@
 	case PERF_TYPE_HARDWARE:
 	case PERF_TYPE_HW_CACHE:
 	case PERF_TYPE_RAW:
+		/* The CPU measurement counter facility does not have overflow
+		 * interrupts to do sampling.  Sampling must be provided by
+		 * external means, for example, by timers.
+		 */
+		if (is_sampling_event(event))
+			return -ENOENT;
 		err = __hw_perf_event_init(event);
 		break;
 	default:
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7433a2f..549c9d1 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -365,16 +365,16 @@
 	u64 end;
 	int cpu;
 
-	end = get_clock() + (1000000UL << 12);
+	end = get_tod_clock() + (1000000UL << 12);
 	for_each_cpu(cpu, cpumask) {
 		struct pcpu *pcpu = pcpu_devices + cpu;
 		set_bit(ec_stop_cpu, &pcpu->ec_mask);
 		while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
 				   0, NULL) == SIGP_CC_BUSY &&
-		       get_clock() < end)
+		       get_tod_clock() < end)
 			cpu_relax();
 	}
-	while (get_clock() < end) {
+	while (get_tod_clock() < end) {
 		for_each_cpu(cpu, cpumask)
 			if (pcpu_stopped(pcpu_devices + cpu))
 				cpumask_clear_cpu(cpu, cpumask);
@@ -694,7 +694,7 @@
  */
 static void __cpuinit smp_start_secondary(void *cpuvoid)
 {
-	S390_lowcore.last_update_clock = get_clock();
+	S390_lowcore.last_update_clock = get_tod_clock();
 	S390_lowcore.restart_stack = (unsigned long) restart_stack;
 	S390_lowcore.restart_fn = (unsigned long) do_restart;
 	S390_lowcore.restart_data = 0;
@@ -947,7 +947,7 @@
 	unsigned int sequence;
 
 	do {
-		now = get_clock();
+		now = get_tod_clock();
 		sequence = ACCESS_ONCE(idle->sequence);
 		idle_time = ACCESS_ONCE(idle->idle_time);
 		idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0aa98db..876546b 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -63,7 +63,7 @@
  */
 unsigned long long notrace __kprobes sched_clock(void)
 {
-	return tod_to_ns(get_clock_monotonic());
+	return tod_to_ns(get_tod_clock_monotonic());
 }
 
 /*
@@ -194,7 +194,7 @@
 
 void read_persistent_clock(struct timespec *ts)
 {
-	tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts);
+	tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts);
 }
 
 void read_boot_clock(struct timespec *ts)
@@ -204,7 +204,7 @@
 
 static cycle_t read_tod_clock(struct clocksource *cs)
 {
-	return get_clock();
+	return get_tod_clock();
 }
 
 static struct clocksource clocksource_tod = {
@@ -342,7 +342,7 @@
 
 	sw_ptr = &get_cpu_var(clock_sync_word);
 	sw0 = atomic_read(sw_ptr);
-	*clock = get_clock();
+	*clock = get_tod_clock();
 	sw1 = atomic_read(sw_ptr);
 	put_cpu_var(clock_sync_word);
 	if (sw0 == sw1 && (sw0 & 0x80000000U))
@@ -486,7 +486,7 @@
 		.p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
 		.es = 0, .sl = 0 };
 	if (etr_setr(&etr_eacr) == 0) {
-		etr_tolec = get_clock();
+		etr_tolec = get_tod_clock();
 		set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
 		if (etr_port0_online && etr_port1_online)
 			set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
@@ -768,8 +768,8 @@
 	__ctl_set_bit(14, 21);
 	__ctl_set_bit(0, 29);
 	clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
-	old_clock = get_clock();
-	if (set_clock(clock) == 0) {
+	old_clock = get_tod_clock();
+	if (set_tod_clock(clock) == 0) {
 		__udelay(1);	/* Wait for the clock to start. */
 		__ctl_clear_bit(0, 29);
 		__ctl_clear_bit(14, 21);
@@ -845,7 +845,7 @@
 			 * assume that this can have caused an stepping
 			 * port switch.
 			 */
-			etr_tolec = get_clock();
+			etr_tolec = get_tod_clock();
 		eacr.p0 = etr_port0_online;
 		if (!eacr.p0)
 			eacr.e0 = 0;
@@ -858,7 +858,7 @@
 			 * assume that this can have caused an stepping
 			 * port switch.
 			 */
-			etr_tolec = get_clock();
+			etr_tolec = get_tod_clock();
 		eacr.p1 = etr_port1_online;
 		if (!eacr.p1)
 			eacr.e1 = 0;
@@ -974,7 +974,7 @@
 	etr_eacr = eacr;
 	etr_setr(&etr_eacr);
 	if (dp_changed)
-		etr_tolec = get_clock();
+		etr_tolec = get_tod_clock();
 }
 
 /*
@@ -1012,7 +1012,7 @@
 	/* Store aib to get the current ETR status word. */
 	BUG_ON(etr_stetr(&aib) != 0);
 	etr_port0.esw = etr_port1.esw = aib.esw;	/* Copy status word. */
-	now = get_clock();
+	now = get_tod_clock();
 
 	/*
 	 * Update the port information if the last stepping port change
@@ -1537,10 +1537,10 @@
 	if (stp_info.todoff[0] || stp_info.todoff[1] ||
 	    stp_info.todoff[2] || stp_info.todoff[3] ||
 	    stp_info.tmd != 2) {
-		old_clock = get_clock();
+		old_clock = get_tod_clock();
 		rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
 		if (rc == 0) {
-			delta = adjust_time(old_clock, get_clock(), 0);
+			delta = adjust_time(old_clock, get_tod_clock(), 0);
 			fixup_clock_comparator(delta);
 			rc = chsc_sstpi(stp_page, &stp_info,
 					sizeof(struct stp_sstpi));
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 79cb51a..35b13ed 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -75,6 +75,10 @@
 		EXIT_TEXT
 	}
 
+	.exit.data : {
+		EXIT_DATA
+	}
+
 	/* early.c uses stsi, which requires page aligned data. */
 	. = ALIGN(PAGE_SIZE);
 	INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index ce9cc5a..a0042ac 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -191,7 +191,7 @@
 	unsigned int sequence;
 
 	do {
-		now = get_clock();
+		now = get_tod_clock();
 		sequence = ACCESS_ONCE(idle->sequence);
 		idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
 		idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 82c481d..87418b5 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -362,7 +362,7 @@
 	}
 
 	if ((!rc) && (vcpu->arch.sie_block->ckc <
-		get_clock() + vcpu->arch.sie_block->epoch)) {
+		get_tod_clock() + vcpu->arch.sie_block->epoch)) {
 		if ((!psw_extint_disabled(vcpu)) &&
 			(vcpu->arch.sie_block->gcr[0] & 0x800ul))
 			rc = 1;
@@ -402,7 +402,7 @@
 		goto no_timer;
 	}
 
-	now = get_clock() + vcpu->arch.sie_block->epoch;
+	now = get_tod_clock() + vcpu->arch.sie_block->epoch;
 	if (vcpu->arch.sie_block->ckc < now) {
 		__unset_cpu_idle(vcpu);
 		return 0;
@@ -492,7 +492,7 @@
 	}
 
 	if ((vcpu->arch.sie_block->ckc <
-		get_clock() + vcpu->arch.sie_block->epoch))
+		get_tod_clock() + vcpu->arch.sie_block->epoch))
 		__try_deliver_ckc_interrupt(vcpu);
 
 	if (atomic_read(&fi->active)) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f090e81..2923781 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -147,7 +147,7 @@
 		r = KVM_MAX_VCPUS;
 		break;
 	case KVM_CAP_S390_COW:
-		r = sclp_get_fac85() & 0x2;
+		r = MACHINE_HAS_ESOP;
 		break;
 	default:
 		r = 0;
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 42d0cf8..c61b9fa 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -32,7 +32,7 @@
 	unsigned long cr0, cr6, new;
 	u64 clock_saved, end;
 
-	end = get_clock() + (usecs << 12);
+	end = get_tod_clock() + (usecs << 12);
 	clock_saved = local_tick_disable();
 	__ctl_store(cr0, 0, 0);
 	__ctl_store(cr6, 6, 6);
@@ -45,7 +45,7 @@
 		set_clock_comparator(end);
 		vtime_stop_cpu();
 		local_irq_disable();
-	} while (get_clock() < end);
+	} while (get_tod_clock() < end);
 	lockdep_on();
 	__ctl_load(cr0, 0, 0);
 	__ctl_load(cr6, 6, 6);
@@ -56,7 +56,7 @@
 {
 	u64 clock_saved, end;
 
-	end = get_clock() + (usecs << 12);
+	end = get_tod_clock() + (usecs << 12);
 	do {
 		clock_saved = 0;
 		if (end < S390_lowcore.clock_comparator) {
@@ -67,7 +67,7 @@
 		local_irq_disable();
 		if (clock_saved)
 			local_tick_enable(clock_saved);
-	} while (get_clock() < end);
+	} while (get_tod_clock() < end);
 }
 
 /*
@@ -111,8 +111,8 @@
 {
 	u64 end;
 
-	end = get_clock() + (usecs << 12);
-	while (get_clock() < end)
+	end = get_tod_clock() + (usecs << 12);
+	while (get_tod_clock() < end)
 		cpu_relax();
 }
 
@@ -122,10 +122,10 @@
 
 	nsecs <<= 9;
 	do_div(nsecs, 125);
-	end = get_clock() + nsecs;
+	end = get_tod_clock() + nsecs;
 	if (nsecs & ~0xfffUL)
 		__udelay(nsecs >> 12);
-	while (get_clock() < end)
+	while (get_tod_clock() < end)
 		barrier();
 }
 EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 9017a63..a70ee84 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -50,7 +50,7 @@
 	ptep = pte_offset_map(pmd, addr);
 	if (!pte_present(*ptep))
 		return -0x11UL;
-	if (write && !pte_write(*ptep))
+	if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
 		return -0x04UL;
 
 	return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index c59a5ef..06bafec 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,12 +101,15 @@
 
 #else
 
-int s390_mmap_check(unsigned long addr, unsigned long len)
+int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
 {
 	int rc;
 
-	if (!is_compat_task() &&
-	    len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
+	if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
+		return 0;
+	if (!(flags & MAP_FIXED))
+		addr = 0;
+	if ((addr + len) >= TASK_SIZE) {
 		rc = crst_table_upgrade(current->mm, 1UL << 53);
 		if (rc)
 			return rc;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 29ccee3..d21040e 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -127,7 +127,7 @@
 			pte_val(*pte) = _PAGE_TYPE_EMPTY;
 			continue;
 		}
-		*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
+		pte_val(*pte) = __pa(address);
 	}
 }
 
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 6ed1426..79699f46 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -85,11 +85,9 @@
 	pud_t *pu_dir;
 	pmd_t *pm_dir;
 	pte_t *pt_dir;
-	pte_t  pte;
 	int ret = -ENOMEM;
 
 	while (address < end) {
-		pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
 		pg_dir = pgd_offset_k(address);
 		if (pgd_none(*pg_dir)) {
 			pu_dir = vmem_pud_alloc();
@@ -101,9 +99,9 @@
 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
 		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
 		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
-			pte_val(pte) |= _REGION3_ENTRY_LARGE;
-			pte_val(pte) |= _REGION_ENTRY_TYPE_R3;
-			pud_val(*pu_dir) = pte_val(pte);
+			pud_val(*pu_dir) = __pa(address) |
+				_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
+				(ro ? _REGION_ENTRY_RO : 0);
 			address += PUD_SIZE;
 			continue;
 		}
@@ -118,8 +116,9 @@
 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
 		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
 		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
-			pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
-			pmd_val(*pm_dir) = pte_val(pte);
+			pmd_val(*pm_dir) = __pa(address) |
+				_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
+				(ro ? _SEGMENT_ENTRY_RO : 0);
 			address += PMD_SIZE;
 			continue;
 		}
@@ -132,7 +131,7 @@
 		}
 
 		pt_dir = pte_offset_kernel(pm_dir, address);
-		*pt_dir = pte;
+		pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0);
 		address += PAGE_SIZE;
 	}
 	ret = 0;
@@ -199,7 +198,6 @@
 	pud_t *pu_dir;
 	pmd_t *pm_dir;
 	pte_t *pt_dir;
-	pte_t  pte;
 	int ret = -ENOMEM;
 
 	start_addr = (unsigned long) start;
@@ -237,9 +235,8 @@
 				new_page = vmemmap_alloc_block(PMD_SIZE, node);
 				if (!new_page)
 					goto out;
-				pte = mk_pte_phys(__pa(new_page), PAGE_RW);
-				pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
-				pmd_val(*pm_dir) = pte_val(pte);
+				pmd_val(*pm_dir) = __pa(new_page) |
+					_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
 				address = (address + PMD_SIZE) & PMD_MASK;
 				continue;
 			}
@@ -260,8 +257,7 @@
 			new_page =__pa(vmem_alloc_pages(0));
 			if (!new_page)
 				goto out;
-			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
-			*pt_dir = pte;
+			pte_val(*pt_dir) = __pa(new_page);
 		}
 		address += PAGE_SIZE;
 	}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bb28441..0972e91 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -7,6 +7,7 @@
  */
 #include <linux/moduleloader.h>
 #include <linux/netdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/filter.h>
 #include <asm/cacheflush.h>
 #include <asm/processor.h>
@@ -254,6 +255,8 @@
 	case BPF_S_ANC_HATYPE:
 	case BPF_S_ANC_RXHASH:
 	case BPF_S_ANC_CPU:
+	case BPF_S_ANC_VLAN_TAG:
+	case BPF_S_ANC_VLAN_TAG_PRESENT:
 	case BPF_S_RET_K:
 		/* first instruction sets A register */
 		break;
@@ -699,6 +702,24 @@
 		/* l %r5,<d(rxhash)>(%r2) */
 		EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
 		break;
+	case BPF_S_ANC_VLAN_TAG:
+	case BPF_S_ANC_VLAN_TAG_PRESENT:
+		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+		BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+		/* lhi %r5,0 */
+		EMIT4(0xa7580000);
+		/* icm	%r5,3,<d(vlan_tci)>(%r2) */
+		EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
+		if (filter->code == BPF_S_ANC_VLAN_TAG) {
+			/* nill %r5,0xefff */
+			EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
+		} else {
+			/* nill %r5,0x1000 */
+			EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT);
+			/* srl %r5,12 */
+			EMIT4_DISP(0x88500000, 12);
+		}
+		break;
 	case BPF_S_ANC_CPU: /* A = smp_processor_id() */
 #ifdef CONFIG_SMP
 		/* l %r5,<d(cpu_nr)> */
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 60e0372..27b4c17 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -51,8 +51,7 @@
 DEFINE_MUTEX(zpci_list_lock);
 EXPORT_SYMBOL_GPL(zpci_list_lock);
 
-struct pci_hp_callback_ops hotplug_ops;
-EXPORT_SYMBOL_GPL(hotplug_ops);
+static struct pci_hp_callback_ops *hotplug_ops;
 
 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
 static DEFINE_SPINLOCK(zpci_domain_lock);
@@ -974,8 +973,8 @@
 
 	mutex_lock(&zpci_list_lock);
 	list_add_tail(&zdev->entry, &zpci_list);
-	if (hotplug_ops.create_slot)
-		hotplug_ops.create_slot(zdev);
+	if (hotplug_ops)
+		hotplug_ops->create_slot(zdev);
 	mutex_unlock(&zpci_list_lock);
 
 	if (zdev->state == ZPCI_FN_STATE_STANDBY)
@@ -989,8 +988,8 @@
 out_start:
 	mutex_lock(&zpci_list_lock);
 	list_del(&zdev->entry);
-	if (hotplug_ops.remove_slot)
-		hotplug_ops.remove_slot(zdev);
+	if (hotplug_ops)
+		hotplug_ops->remove_slot(zdev);
 	mutex_unlock(&zpci_list_lock);
 out_bus:
 	zpci_free_domain(zdev);
@@ -1072,13 +1071,29 @@
 	kmem_cache_destroy(zdev_fmb_cache);
 }
 
-unsigned int pci_probe = 1;
-EXPORT_SYMBOL_GPL(pci_probe);
+void zpci_register_hp_ops(struct pci_hp_callback_ops *ops)
+{
+	mutex_lock(&zpci_list_lock);
+	hotplug_ops = ops;
+	mutex_unlock(&zpci_list_lock);
+}
+EXPORT_SYMBOL_GPL(zpci_register_hp_ops);
+
+void zpci_deregister_hp_ops(void)
+{
+	mutex_lock(&zpci_list_lock);
+	hotplug_ops = NULL;
+	mutex_unlock(&zpci_list_lock);
+}
+EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
+
+unsigned int s390_pci_probe = 1;
+EXPORT_SYMBOL_GPL(s390_pci_probe);
 
 char * __init pcibios_setup(char *str)
 {
 	if (!strcmp(str, "off")) {
-		pci_probe = 0;
+		s390_pci_probe = 0;
 		return NULL;
 	}
 	return str;
@@ -1088,7 +1103,7 @@
 {
 	int rc;
 
-	if (!pci_probe)
+	if (!s390_pci_probe)
 		return 0;
 
 	if (!test_facility(2) || !test_facility(69)
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 2c847143..f339fe2f 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -19,25 +19,25 @@
  * Call Logical Processor
  * Retry logic is handled by the caller.
  */
-static inline u8 clp_instr(void *req)
+static inline u8 clp_instr(void *data)
 {
-	u64 ilpm;
+	struct { u8 _[CLP_BLK_SIZE]; } *req = data;
+	u64 ignored;
 	u8 cc;
 
 	asm volatile (
-		"	.insn	rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n"
+		"	.insn	rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
 		"	ipm	%[cc]\n"
 		"	srl	%[cc],28\n"
-		: [cc] "=d" (cc), [ilpm] "=d" (ilpm)
+		: [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req)
 		: [req] "a" (req)
-		: "cc", "memory");
+		: "cc");
 	return cc;
 }
 
 static void *clp_alloc_block(void)
 {
-	struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
-	return (page) ? page_address(page) : NULL;
+	return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
 }
 
 static void clp_free_block(void *ptr)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cdadce2..4cfb720 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -14,7 +14,7 @@
 	tristate "Serial ATA and Parallel ATA drivers"
 	depends on HAS_IOMEM
 	depends on BLOCK
-	depends on !(M32R || M68K) || BROKEN
+	depends on !(M32R || M68K || S390) || BROKEN
 	select SCSI
 	---help---
 	  If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1855a6f..b89d250 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -277,7 +277,7 @@
 
 config GPIO_VX855
 	tristate "VIA VX855/VX875 GPIO"
-	depends on PCI
+	depends on PCI && GENERIC_HARDIRQS
 	select MFD_CORE
 	select MFD_VX855
 	help
@@ -599,7 +599,7 @@
 
 config GPIO_RDC321X
 	tristate "RDC R-321x GPIO support"
-	depends on PCI
+	depends on PCI && GENERIC_HARDIRQS
 	select MFD_CORE
 	select MFD_RDC321X
 	help
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 8090b87..9e58016 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -180,7 +180,7 @@
 
 config RADIO_WL1273
 	tristate "Texas Instruments WL1273 I2C FM Radio"
-	depends on I2C && VIDEO_V4L2
+	depends on I2C && VIDEO_V4L2 && GENERIC_HARDIRQS
 	select MFD_CORE
 	select MFD_WL1273_CORE
 	select FW_LOADER
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index ceb0de0..1194446 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,6 +22,7 @@
 
 config ARM_AT91_ETHER
 	tristate "AT91RM9200 Ethernet support"
+	depends on GENERIC_HARDIRQS
 	select NET_CORE
 	select MACB
 	---help---
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 961f0b2..4503452 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -4,7 +4,6 @@
 
 menuconfig PHYLIB
 	tristate "PHY Device support and infrastructure"
-	depends on !S390
 	depends on NETDEVICES
 	help
 	  Ethernet controllers are usually attached to PHY
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 0e60438..24e12d4 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -35,7 +35,7 @@
 
 config PARPORT_PC
 	tristate "PC-style hardware"
-	depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \
+	depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
 		(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA
 	---help---
 	  You should say Y here if you have a PC-style parallel port. All
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index dee68e0..7db249a 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -172,25 +172,6 @@
 	return -ENOMEM;
 }
 
-static int __init init_pci_slots(void)
-{
-	struct zpci_dev *zdev;
-	int device = 0;
-
-	/*
-	 * Create a structure for each slot, and register that slot
-	 * with the pci_hotplug subsystem.
-	 */
-	mutex_lock(&zpci_list_lock);
-	list_for_each_entry(zdev, &zpci_list, entry) {
-		init_pci_slot(zdev);
-		device++;
-	}
-
-	mutex_unlock(&zpci_list_lock);
-	return (device) ? 0 : -ENODEV;
-}
-
 static void exit_pci_slot(struct zpci_dev *zdev)
 {
 	struct list_head *tmp, *n;
@@ -205,6 +186,26 @@
 	}
 }
 
+static struct pci_hp_callback_ops hp_ops = {
+	.create_slot = init_pci_slot,
+	.remove_slot = exit_pci_slot,
+};
+
+static void __init init_pci_slots(void)
+{
+	struct zpci_dev *zdev;
+
+	/*
+	 * Create a structure for each slot, and register that slot
+	 * with the pci_hotplug subsystem.
+	 */
+	mutex_lock(&zpci_list_lock);
+	list_for_each_entry(zdev, &zpci_list, entry) {
+		init_pci_slot(zdev);
+	}
+	mutex_unlock(&zpci_list_lock);
+}
+
 static void __exit exit_pci_slots(void)
 {
 	struct list_head *tmp, *n;
@@ -224,28 +225,19 @@
 
 static int __init pci_hotplug_s390_init(void)
 {
-	/*
-	 * Do specific initialization stuff for your driver here
-	 * like initializing your controller hardware (if any) and
-	 * determining the number of slots you have in the system
-	 * right now.
-	 */
-
-	if (!pci_probe)
+	if (!s390_pci_probe)
 		return -EOPNOTSUPP;
 
-	/* register callbacks for slot handling from arch code */
-	mutex_lock(&zpci_list_lock);
-	hotplug_ops.create_slot = init_pci_slot;
-	hotplug_ops.remove_slot = exit_pci_slot;
-	mutex_unlock(&zpci_list_lock);
-	pr_info("registered hotplug slot callbacks\n");
-	return init_pci_slots();
+	zpci_register_hp_ops(&hp_ops);
+	init_pci_slots();
+
+	return 0;
 }
 
 static void __exit pci_hotplug_s390_exit(void)
 {
 	exit_pci_slots();
+	zpci_deregister_hp_ops();
 }
 
 module_init(pci_hotplug_s390_init);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 29225e1c..f1b7fdc 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1352,7 +1352,7 @@
 		switch (rc) {
 		case 0:	/* termination successful */
 			cqr->status = DASD_CQR_CLEAR_PENDING;
-			cqr->stopclk = get_clock();
+			cqr->stopclk = get_tod_clock();
 			cqr->starttime = 0;
 			DBF_DEV_EVENT(DBF_DEBUG, device,
 				      "terminate cqr %p successful",
@@ -1420,7 +1420,7 @@
 		cqr->status = DASD_CQR_ERROR;
 		return -EIO;
 	}
-	cqr->startclk = get_clock();
+	cqr->startclk = get_tod_clock();
 	cqr->starttime = jiffies;
 	cqr->retries--;
 	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
@@ -1623,7 +1623,7 @@
 		return;
 	}
 
-	now = get_clock();
+	now = get_tod_clock();
 	cqr = (struct dasd_ccw_req *) intparm;
 	/* check for conditions that should be handled immediately */
 	if (!cqr ||
@@ -1963,7 +1963,7 @@
 			}
 			break;
 		case DASD_CQR_QUEUED:
-			cqr->stopclk = get_clock();
+			cqr->stopclk = get_tod_clock();
 			cqr->status = DASD_CQR_CLEARED;
 			break;
 		default: /* no need to modify the others */
@@ -2210,7 +2210,7 @@
 			wait_event(generic_waitq, _wait_for_wakeup(cqr));
 	}
 
-	maincqr->endclk = get_clock();
+	maincqr->endclk = get_tod_clock();
 	if ((maincqr->status != DASD_CQR_DONE) &&
 	    (maincqr->intrc != -ERESTARTSYS))
 		dasd_log_sense(maincqr, &maincqr->irb);
@@ -2340,7 +2340,7 @@
 				"Cancelling request %p failed with rc=%d\n",
 				cqr, rc);
 		} else {
-			cqr->stopclk = get_clock();
+			cqr->stopclk = get_tod_clock();
 		}
 		break;
 	default: /* already finished or clear pending - do nothing */
@@ -2568,7 +2568,7 @@
 		}
 
 		/* Rechain finished requests to final queue */
-		cqr->endclk = get_clock();
+		cqr->endclk = get_tod_clock();
 		list_move_tail(&cqr->blocklist, final_queue);
 	}
 }
@@ -2711,7 +2711,7 @@
 		}
 		/* call the callback function */
 		spin_lock_irq(&block->request_queue_lock);
-		cqr->endclk = get_clock();
+		cqr->endclk = get_tod_clock();
 		list_del_init(&cqr->blocklist);
 		__dasd_cleanup_cqr(cqr);
 		spin_unlock_irq(&block->request_queue_lock);
@@ -3042,12 +3042,15 @@
 	cdev->handler = NULL;
 
 	device = dasd_device_from_cdev(cdev);
-	if (IS_ERR(device))
+	if (IS_ERR(device)) {
+		dasd_remove_sysfs_files(cdev);
 		return;
+	}
 	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
 	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
 		/* Already doing offline processing */
 		dasd_put_device(device);
+		dasd_remove_sysfs_files(cdev);
 		return;
 	}
 	/*
@@ -3504,7 +3507,7 @@
 	cqr->memdev = device;
 	cqr->expires = 10*HZ;
 	cqr->retries = 256;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
 }
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index f8212d5..d261347 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -229,7 +229,7 @@
 	dctl_cqr->expires = 5 * 60 * HZ;
 	dctl_cqr->retries = 2;
 
-	dctl_cqr->buildclk = get_clock();
+	dctl_cqr->buildclk = get_tod_clock();
 
 	dctl_cqr->status = DASD_CQR_FILLED;
 
@@ -1719,7 +1719,7 @@
 	erp->magic = default_erp->magic;
 	erp->expires = default_erp->expires;
 	erp->retries = 256;
-	erp->buildclk = get_clock();
+	erp->buildclk = get_tod_clock();
 	erp->status = DASD_CQR_FILLED;
 
 	/* remove the default erp */
@@ -2322,7 +2322,7 @@
 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
 				    "Unable to allocate ERP request");
 			cqr->status = DASD_CQR_FAILED;
-                        cqr->stopclk = get_clock ();
+			cqr->stopclk = get_tod_clock();
 		} else {
 			DBF_DEV_EVENT(DBF_ERR, device,
                                      "Unable to allocate ERP request "
@@ -2364,7 +2364,7 @@
 	erp->magic    = cqr->magic;
 	erp->expires  = cqr->expires;
 	erp->retries  = 256;
-	erp->buildclk = get_clock();
+	erp->buildclk = get_tod_clock();
 	erp->status = DASD_CQR_FILLED;
 
 	return erp;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 6b55699..a2597e6 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -448,7 +448,7 @@
 	ccw->count = sizeof(*(lcu->uac));
 	ccw->cda = (__u32)(addr_t) lcu->uac;
 
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 
 	/* need to unset flag here to detect race with summary unit check */
@@ -733,7 +733,7 @@
 	cqr->memdev = device;
 	cqr->block = NULL;
 	cqr->expires = 5 * HZ;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 
 	rc = dasd_sleep_on_immediatly(cqr);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 704488d..cc06033 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -184,14 +184,14 @@
 	private->iob.bio_list = dreq->bio;
 	private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
 
-	cqr->startclk = get_clock();
+	cqr->startclk = get_tod_clock();
 	cqr->starttime = jiffies;
 	cqr->retries--;
 
 	rc = dia250(&private->iob, RW_BIO);
 	switch (rc) {
 	case 0: /* Synchronous I/O finished successfully */
-		cqr->stopclk = get_clock();
+		cqr->stopclk = get_tod_clock();
 		cqr->status = DASD_CQR_SUCCESS;
 		/* Indicate to calling function that only a dasd_schedule_bh()
 		   and no timer is needed */
@@ -222,7 +222,7 @@
 	mdsk_term_io(device);
 	mdsk_init_io(device, device->block->bp_block, 0, NULL);
 	cqr->status = DASD_CQR_CLEAR_PENDING;
-	cqr->stopclk = get_clock();
+	cqr->stopclk = get_tod_clock();
 	dasd_schedule_device_bh(device);
 	return 0;
 }
@@ -276,7 +276,7 @@
 		return;
 	}
 
-	cqr->stopclk = get_clock();
+	cqr->stopclk = get_tod_clock();
 
 	expires = 0;
 	if ((ext_code.subcode & 0xff) == 0) {
@@ -556,7 +556,7 @@
 		}
 	}
 	cqr->retries = DIAG_MAX_RETRIES;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	if (blk_noretry_request(req) ||
 	    block->base->features & DASD_FEATURE_FAILFAST)
 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index e37bc16..33f26bf 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -862,7 +862,7 @@
 	cqr->expires = 10*HZ;
 	cqr->lpm = lpm;
 	cqr->retries = 256;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
 }
@@ -1449,7 +1449,7 @@
 	ccw->count = sizeof(struct dasd_rssd_features);
 	ccw->cda = (__u32)(addr_t) features;
 
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	rc = dasd_sleep_on(cqr);
 	if (rc == 0) {
@@ -1501,7 +1501,7 @@
 	cqr->block = NULL;
 	cqr->retries = 256;
 	cqr->expires = 10*HZ;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
 }
@@ -1841,7 +1841,7 @@
 	cqr->startdev = device;
 	cqr->memdev = device;
 	cqr->retries = 255;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
 }
@@ -2241,7 +2241,7 @@
 	fcp->startdev = device;
 	fcp->memdev = device;
 	fcp->retries = 256;
-	fcp->buildclk = get_clock();
+	fcp->buildclk = get_tod_clock();
 	fcp->status = DASD_CQR_FILLED;
 	return fcp;
 }
@@ -2530,7 +2530,7 @@
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
 	cqr->lpm = startdev->path_data.ppm;
 	cqr->retries = 256;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
 }
@@ -2705,7 +2705,7 @@
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
 	cqr->lpm = startdev->path_data.ppm;
 	cqr->retries = 256;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
 }
@@ -2998,7 +2998,7 @@
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
 	cqr->lpm = startdev->path_data.ppm;
 	cqr->retries = 256;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
 out_error:
@@ -3201,7 +3201,7 @@
 	cqr->expires = startdev->default_expires * HZ;
 	cqr->lpm = startdev->path_data.ppm;
 	cqr->retries = 256;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 
 	if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
@@ -3402,7 +3402,7 @@
 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
 	cqr->expires = 2 * HZ;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 
 	rc = dasd_sleep_on_immediatly(cqr);
@@ -3457,7 +3457,7 @@
 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
 	cqr->expires = 2 * HZ;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 
 	rc = dasd_sleep_on_immediatly(cqr);
@@ -3511,7 +3511,7 @@
 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
 	cqr->expires = 2 * HZ;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 
 	rc = dasd_sleep_on_immediatly(cqr);
@@ -3572,7 +3572,7 @@
 	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
 	cqr->retries = 5;
 	cqr->expires = 10 * HZ;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	cqr->lpm = usrparm.path_mask;
 
@@ -3642,7 +3642,7 @@
 	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
 	ccw->cda = (__u32)(addr_t) stats;
 
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	rc = dasd_sleep_on(cqr);
 	if (rc == 0) {
@@ -3768,7 +3768,7 @@
 	cqr->memdev = device;
 	cqr->retries = 3;
 	cqr->expires = 10 * HZ;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 
 	/* Build the ccws */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index ff901b5..21ef63c 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -481,7 +481,7 @@
 	ccw->flags = 0;
 	ccw->cda = (__u32)(addr_t) cqr->data;
 
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	cqr->callback = dasd_eer_snss_cb;
 
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d01ef82..3250cb4 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -102,7 +102,7 @@
 		pr_err("%s: default ERP has run out of retries and failed\n",
 		       dev_name(&device->cdev->dev));
 		cqr->status = DASD_CQR_FAILED;
-		cqr->stopclk = get_clock();
+		cqr->stopclk = get_tod_clock();
         }
         return cqr;
 }				/* end dasd_default_erp_action */
@@ -146,7 +146,7 @@
 		cqr->status = DASD_CQR_DONE;
 	else {
 		cqr->status = DASD_CQR_FAILED;
-		cqr->stopclk = get_clock();
+		cqr->stopclk = get_tod_clock();
 	}
 
 	return cqr;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 4146985..4dd0e2f 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -370,7 +370,7 @@
 	cqr->block = block;
 	cqr->expires = memdev->default_expires * HZ;	/* default 5 minutes */
 	cqr->retries = 32;
-	cqr->buildclk = get_clock();
+	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
 }
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 7ac6bad..3c1ccf4 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -68,19 +68,34 @@
 void scm_cluster_request_irq(struct scm_request *);
 bool scm_test_cluster_request(struct scm_request *);
 bool scm_cluster_size_valid(void);
-#else
-#define __scm_free_rq_cluster(scmrq) {}
-#define __scm_alloc_rq_cluster(scmrq) 0
-#define scm_request_cluster_init(scmrq) {}
-#define scm_reserve_cluster(scmrq) true
-#define scm_release_cluster(scmrq) {}
-#define scm_blk_dev_cluster_setup(bdev) {}
-#define scm_need_cluster_request(scmrq) false
-#define scm_initiate_cluster_request(scmrq) {}
-#define scm_cluster_request_irq(scmrq) {}
-#define scm_test_cluster_request(scmrq) false
-#define scm_cluster_size_valid() true
-#endif
+#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
+static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
+static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
+{
+	return 0;
+}
+static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
+static inline bool scm_reserve_cluster(struct scm_request *scmrq)
+{
+	return true;
+}
+static inline void scm_release_cluster(struct scm_request *scmrq) {}
+static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
+static inline bool scm_need_cluster_request(struct scm_request *scmrq)
+{
+	return false;
+}
+static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
+static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
+static inline bool scm_test_cluster_request(struct scm_request *scmrq)
+{
+	return false;
+}
+static inline bool scm_cluster_size_valid(void)
+{
+	return true;
+}
+#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
 
 extern debug_info_t *scm_debug;
 
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 9117045..230697a 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -443,7 +443,7 @@
 			tty_kref_put(tty);
 			return -ENODEV;
 		}
-		minor = tty->index + RAW3270_FIRSTMINOR;
+		minor = tty->index;
 		tty_kref_put(tty);
 	}
 	mutex_lock(&fs3270_mutex);
@@ -524,6 +524,25 @@
 	.llseek		= no_llseek,
 };
 
+void fs3270_create_cb(int minor)
+{
+	__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
+	device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
+		      NULL, "3270/tub%d", minor);
+}
+
+void fs3270_destroy_cb(int minor)
+{
+	device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
+	__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
+}
+
+struct raw3270_notifier fs3270_notifier =
+{
+	.create = fs3270_create_cb,
+	.destroy = fs3270_destroy_cb,
+};
+
 /*
  * 3270 fullscreen driver initialization.
  */
@@ -532,16 +551,20 @@
 {
 	int rc;
 
-	rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
+	rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
 	if (rc)
 		return rc;
+	device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
+		      NULL, "3270/tub");
+	raw3270_register_notifier(&fs3270_notifier);
 	return 0;
 }
 
 static void __exit
 fs3270_exit(void)
 {
-	unregister_chrdev(IBM_FS3270_MAJOR, "fs3270");
+	raw3270_unregister_notifier(&fs3270_notifier);
+	__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 9a6c140..4c9030a 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -28,7 +28,7 @@
 #include <linux/device.h>
 #include <linux/mutex.h>
 
-static struct class *class3270;
+struct class *class3270;
 
 /* The main 3270 data structure. */
 struct raw3270 {
@@ -37,6 +37,7 @@
 	int minor;
 
 	short model, rows, cols;
+	unsigned int state;
 	unsigned long flags;
 
 	struct list_head req_queue;	/* Request queue. */
@@ -46,20 +47,26 @@
 	struct timer_list timer;	/* Device timer. */
 
 	unsigned char *ascebc;		/* ascii -> ebcdic table */
-	struct device *clttydev;	/* 3270-class tty device ptr */
-	struct device *cltubdev;	/* 3270-class tub device ptr */
 
-	struct raw3270_request init_request;
+	struct raw3270_view init_view;
+	struct raw3270_request init_reset;
+	struct raw3270_request init_readpart;
+	struct raw3270_request init_readmod;
 	unsigned char init_data[256];
 };
 
+/* raw3270->state */
+#define RAW3270_STATE_INIT	0	/* Initial state */
+#define RAW3270_STATE_RESET	1	/* Reset command is pending */
+#define RAW3270_STATE_W4ATTN	2	/* Wait for attention interrupt */
+#define RAW3270_STATE_READMOD	3	/* Read partition is pending */
+#define RAW3270_STATE_READY	4	/* Device is usable by views */
+
 /* raw3270->flags */
 #define RAW3270_FLAGS_14BITADDR	0	/* 14-bit buffer addresses */
 #define RAW3270_FLAGS_BUSY	1	/* Device busy, leave it alone */
-#define RAW3270_FLAGS_ATTN	2	/* Device sent an ATTN interrupt */
-#define RAW3270_FLAGS_READY	4	/* Device is useable by views */
-#define RAW3270_FLAGS_CONSOLE	8	/* Device is the console. */
-#define RAW3270_FLAGS_FROZEN	16	/* set if 3270 is frozen for suspend */
+#define RAW3270_FLAGS_CONSOLE	2	/* Device is the console. */
+#define RAW3270_FLAGS_FROZEN	3	/* set if 3270 is frozen for suspend */
 
 /* Semaphore to protect global data of raw3270 (devices, views, etc). */
 static DEFINE_MUTEX(raw3270_mutex);
@@ -97,6 +104,17 @@
 	0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
 };
 
+static inline int raw3270_state_ready(struct raw3270 *rp)
+{
+	return rp->state == RAW3270_STATE_READY;
+}
+
+static inline int raw3270_state_final(struct raw3270 *rp)
+{
+	return rp->state == RAW3270_STATE_INIT ||
+		rp->state == RAW3270_STATE_READY;
+}
+
 void
 raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
 {
@@ -214,7 +232,7 @@
  * Stop running ccw.
  */
 static int
-raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
+__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
 {
 	int retries;
 	int rc;
@@ -233,18 +251,6 @@
 	return rc;
 }
 
-static int
-raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
-{
-	unsigned long flags;
-	int rc;
-
-	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
-	rc = raw3270_halt_io_nolock(rp, rq);
-	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
-	return rc;
-}
-
 /*
  * Add the request to the request queue, try to start it if the
  * 3270 device is idle. Return without waiting for end of i/o.
@@ -281,8 +287,8 @@
 	if (!rp || rp->view != view ||
 	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
 		rc = -EACCES;
-	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
-		rc = -ENODEV;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
 	else
 		rc =  __raw3270_start(rp, view, rq);
 	spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
@@ -299,8 +305,8 @@
 	if (!rp || rp->view != view ||
 	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
 		rc = -EACCES;
-	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
-		rc = -ENODEV;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
 	else
 		rc =  __raw3270_start(rp, view, rq);
 	return rc;
@@ -378,7 +384,7 @@
 	case RAW3270_IO_STOP:
 		if (!rq)
 			break;
-		raw3270_halt_io_nolock(rp, rq);
+		__raw3270_halt_io(rp, rq);
 		rq->rc = -EIO;
 		break;
 	default:
@@ -413,9 +419,14 @@
 }
 
 /*
- * Size sensing.
+ * To determine the size of the 3270 device we need to do:
+ * 1) send a 'read partition' data stream to the device
+ * 2) wait for the attn interrupt that precedes the query reply
+ * 3) do a read modified to get the query reply
+ * To make things worse we have to cope with intervention
+ * required (3270 device switched to 'stand-by') and command
+ * rejects (old devices that can't do 'read partition').
  */
-
 struct raw3270_ua {	/* Query Reply structure for Usable Area */
 	struct {	/* Usable Area Query Reply Base */
 		short l;	/* Length of this structured field */
@@ -451,117 +462,21 @@
 	} __attribute__ ((packed)) aua;
 } __attribute__ ((packed));
 
-static struct diag210 raw3270_init_diag210;
-static DEFINE_MUTEX(raw3270_init_mutex);
-
-static int
-raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
-		 struct irb *irb)
-{
-	/*
-	 * Unit-Check Processing:
-	 * Expect Command Reject or Intervention Required.
-	 */
-	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
-		/* Request finished abnormally. */
-		if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
-			set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
-			return RAW3270_IO_BUSY;
-		}
-	}
-	if (rq) {
-		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
-			if (irb->ecw[0] & SNS0_CMD_REJECT)
-				rq->rc = -EOPNOTSUPP;
-			else
-				rq->rc = -EIO;
-		} else
-			/* Request finished normally. Copy residual count. */
-			rq->rescnt = irb->scsw.cmd.count;
-	}
-	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
-		set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
-		wake_up(&raw3270_wait_queue);
-	}
-	return RAW3270_IO_DONE;
-}
-
-static struct raw3270_fn raw3270_init_fn = {
-	.intv = raw3270_init_irq
-};
-
-static struct raw3270_view raw3270_init_view = {
-	.fn = &raw3270_init_fn
-};
-
-/*
- * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
- * Wait for end of request. The request must have been started
- * with raw3270_start, rc = 0. The device lock may NOT have been
- * released between calling raw3270_start and raw3270_wait.
- */
 static void
-raw3270_wake_init(struct raw3270_request *rq, void *data)
-{
-	wake_up((wait_queue_head_t *) data);
-}
-
-/*
- * Special wait function that can cope with console initialization.
- */
-static int
-raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
-		   struct raw3270_request *rq)
-{
-	unsigned long flags;
-	int rc;
-
-#ifdef CONFIG_TN3270_CONSOLE
-	if (raw3270_registered == 0) {
-		spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
-		rq->callback = NULL;
-		rc = __raw3270_start(rp, view, rq);
-		if (rc == 0)
-			while (!raw3270_request_final(rq)) {
-				wait_cons_dev();
-				barrier();
-			}
-		spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
-		return rq->rc;
-	}
-#endif
-	rq->callback = raw3270_wake_init;
-	rq->callback_data = &raw3270_wait_queue;
-	spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
-	rc = __raw3270_start(rp, view, rq);
-	spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
-	if (rc)
-		return rc;
-	/* Now wait for the completion. */
-	rc = wait_event_interruptible(raw3270_wait_queue,
-				      raw3270_request_final(rq));
-	if (rc == -ERESTARTSYS) {	/* Interrupted by a signal. */
-		raw3270_halt_io(view->dev, rq);
-		/* No wait for the halt to complete. */
-		wait_event(raw3270_wait_queue, raw3270_request_final(rq));
-		return -ERESTARTSYS;
-	}
-	return rq->rc;
-}
-
-static int
-__raw3270_size_device_vm(struct raw3270 *rp)
+raw3270_size_device_vm(struct raw3270 *rp)
 {
 	int rc, model;
 	struct ccw_dev_id dev_id;
+	struct diag210 diag_data;
 
 	ccw_device_get_id(rp->cdev, &dev_id);
-	raw3270_init_diag210.vrdcdvno = dev_id.devno;
-	raw3270_init_diag210.vrdclen = sizeof(struct diag210);
-	rc = diag210(&raw3270_init_diag210);
-	if (rc)
-		return rc;
-	model = raw3270_init_diag210.vrdccrmd;
+	diag_data.vrdcdvno = dev_id.devno;
+	diag_data.vrdclen = sizeof(struct diag210);
+	rc = diag210(&diag_data);
+	model = diag_data.vrdccrmd;
+	/* Use default model 2 if the size could not be detected */
+	if (rc || model < 2 || model > 5)
+		model = 2;
 	switch (model) {
 	case 2:
 		rp->model = model;
@@ -583,77 +498,25 @@
 		rp->rows = 27;
 		rp->cols = 132;
 		break;
-	default:
-		rc = -EOPNOTSUPP;
-		break;
 	}
-	return rc;
 }
 
-static int
-__raw3270_size_device(struct raw3270 *rp)
+static void
+raw3270_size_device(struct raw3270 *rp)
 {
-	static const unsigned char wbuf[] =
-		{ 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
 	struct raw3270_ua *uap;
-	int rc;
 
-	/*
-	 * To determine the size of the 3270 device we need to do:
-	 * 1) send a 'read partition' data stream to the device
-	 * 2) wait for the attn interrupt that precedes the query reply
-	 * 3) do a read modified to get the query reply
-	 * To make things worse we have to cope with intervention
-	 * required (3270 device switched to 'stand-by') and command
-	 * rejects (old devices that can't do 'read partition').
-	 */
-	memset(&rp->init_request, 0, sizeof(rp->init_request));
-	memset(&rp->init_data, 0, 256);
-	/* Store 'read partition' data stream to init_data */
-	memcpy(&rp->init_data, wbuf, sizeof(wbuf));
-	INIT_LIST_HEAD(&rp->init_request.list);
-	rp->init_request.ccw.cmd_code = TC_WRITESF;
-	rp->init_request.ccw.flags = CCW_FLAG_SLI;
-	rp->init_request.ccw.count = sizeof(wbuf);
-	rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data);
-
-	rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
-	if (rc)
-		/* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
-		return rc;
-
-	/* Wait for attention interrupt. */
-#ifdef CONFIG_TN3270_CONSOLE
-	if (raw3270_registered == 0) {
-		unsigned long flags;
-
-		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
-		while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
-			wait_cons_dev();
-		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
-	} else
-#endif
-		rc = wait_event_interruptible(raw3270_wait_queue,
-			test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
-	if (rc)
-		return rc;
-
-	/*
-	 * The device accepted the 'read partition' command. Now
-	 * set up a read ccw and issue it.
-	 */
-	rp->init_request.ccw.cmd_code = TC_READMOD;
-	rp->init_request.ccw.flags = CCW_FLAG_SLI;
-	rp->init_request.ccw.count = sizeof(rp->init_data);
-	rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
-	rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
-	if (rc)
-		return rc;
 	/* Got a Query Reply */
 	uap = (struct raw3270_ua *) (rp->init_data + 1);
 	/* Paranoia check. */
-	if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
-		return -EOPNOTSUPP;
+	if (rp->init_readmod.rc || rp->init_data[0] != 0x88 ||
+	    uap->uab.qcode != 0x81) {
+		/* Couldn't detect size. Use default model 2. */
+		rp->model = 2;
+		rp->rows = 24;
+		rp->cols = 80;
+		return;
+	}
 	/* Copy rows/columns of default Usable Area */
 	rp->rows = uap->uab.h;
 	rp->cols = uap->uab.w;
@@ -666,66 +529,131 @@
 		rp->rows = uap->aua.hauai;
 		rp->cols = uap->aua.wauai;
 	}
-	return 0;
+	/* Try to find a model. */
+	rp->model = 0;
+	if (rp->rows == 24 && rp->cols == 80)
+		rp->model = 2;
+	if (rp->rows == 32 && rp->cols == 80)
+		rp->model = 3;
+	if (rp->rows == 43 && rp->cols == 80)
+		rp->model = 4;
+	if (rp->rows == 27 && rp->cols == 132)
+		rp->model = 5;
+}
+
+static void
+raw3270_size_device_done(struct raw3270 *rp)
+{
+	struct raw3270_view *view;
+
+	rp->view = NULL;
+	rp->state = RAW3270_STATE_READY;
+	/* Notify views about new size */
+	list_for_each_entry(view, &rp->view_list, list)
+		if (view->fn->resize)
+			view->fn->resize(view, rp->model, rp->rows, rp->cols);
+	/* Setup processing done, now activate a view */
+	list_for_each_entry(view, &rp->view_list, list) {
+		rp->view = view;
+		if (view->fn->activate(view) == 0)
+			break;
+		rp->view = NULL;
+	}
+}
+
+static void
+raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
+{
+	struct raw3270 *rp = rq->view->dev;
+
+	raw3270_size_device(rp);
+	raw3270_size_device_done(rp);
+}
+
+static void
+raw3270_read_modified(struct raw3270 *rp)
+{
+	if (rp->state != RAW3270_STATE_W4ATTN)
+		return;
+	/* Use 'read modified' to get the result of a read partition. */
+	memset(&rp->init_readmod, 0, sizeof(rp->init_readmod));
+	memset(&rp->init_data, 0, sizeof(rp->init_data));
+	rp->init_readmod.ccw.cmd_code = TC_READMOD;
+	rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
+	rp->init_readmod.ccw.count = sizeof(rp->init_data);
+	rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data);
+	rp->init_readmod.callback = raw3270_read_modified_cb;
+	rp->state = RAW3270_STATE_READMOD;
+	raw3270_start_irq(&rp->init_view, &rp->init_readmod);
+}
+
+static void
+raw3270_writesf_readpart(struct raw3270 *rp)
+{
+	static const unsigned char wbuf[] =
+		{ 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
+
+	/* Store 'read partition' data stream to init_data */
+	memset(&rp->init_readpart, 0, sizeof(rp->init_readpart));
+	memset(&rp->init_data, 0, sizeof(rp->init_data));
+	memcpy(&rp->init_data, wbuf, sizeof(wbuf));
+	rp->init_readpart.ccw.cmd_code = TC_WRITESF;
+	rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
+	rp->init_readpart.ccw.count = sizeof(wbuf);
+	rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data);
+	rp->state = RAW3270_STATE_W4ATTN;
+	raw3270_start_irq(&rp->init_view, &rp->init_readpart);
+}
+
+/*
+ * Device reset
+ */
+static void
+raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
+{
+	struct raw3270 *rp = rq->view->dev;
+
+	if (rp->state != RAW3270_STATE_RESET)
+		return;
+	if (rq && rq->rc) {
+		/* Reset command failed. */
+		rp->state = RAW3270_STATE_INIT;
+	} else if (0 && MACHINE_IS_VM) {
+		raw3270_size_device_vm(rp);
+		raw3270_size_device_done(rp);
+	} else
+		raw3270_writesf_readpart(rp);
 }
 
 static int
-raw3270_size_device(struct raw3270 *rp)
+__raw3270_reset_device(struct raw3270 *rp)
 {
 	int rc;
 
-	mutex_lock(&raw3270_init_mutex);
-	rp->view = &raw3270_init_view;
-	raw3270_init_view.dev = rp;
-	if (MACHINE_IS_VM)
-		rc = __raw3270_size_device_vm(rp);
-	else
-		rc = __raw3270_size_device(rp);
-	raw3270_init_view.dev = NULL;
-	rp->view = NULL;
-	mutex_unlock(&raw3270_init_mutex);
-	if (rc == 0) {	/* Found something. */
-		/* Try to find a model. */
-		rp->model = 0;
-		if (rp->rows == 24 && rp->cols == 80)
-			rp->model = 2;
-		if (rp->rows == 32 && rp->cols == 80)
-			rp->model = 3;
-		if (rp->rows == 43 && rp->cols == 80)
-			rp->model = 4;
-		if (rp->rows == 27 && rp->cols == 132)
-			rp->model = 5;
-	} else {
-		/* Couldn't detect size. Use default model 2. */
-		rp->model = 2;
-		rp->rows = 24;
-		rp->cols = 80;
-		return 0;
-	}
+	/* Store reset data stream to init_data/init_reset */
+	memset(&rp->init_reset, 0, sizeof(rp->init_reset));
+	memset(&rp->init_data, 0, sizeof(rp->init_data));
+	rp->init_data[0] = TW_KR;
+	rp->init_reset.ccw.cmd_code = TC_EWRITEA;
+	rp->init_reset.ccw.flags = CCW_FLAG_SLI;
+	rp->init_reset.ccw.count = 1;
+	rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data);
+	rp->init_reset.callback = raw3270_reset_device_cb;
+	rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
+	if (rc == 0 && rp->state == RAW3270_STATE_INIT)
+		rp->state = RAW3270_STATE_RESET;
 	return rc;
 }
 
 static int
 raw3270_reset_device(struct raw3270 *rp)
 {
+	unsigned long flags;
 	int rc;
 
-	mutex_lock(&raw3270_init_mutex);
-	memset(&rp->init_request, 0, sizeof(rp->init_request));
-	memset(&rp->init_data, 0, sizeof(rp->init_data));
-	/* Store reset data stream to init_data/init_request */
-	rp->init_data[0] = TW_KR;
-	INIT_LIST_HEAD(&rp->init_request.list);
-	rp->init_request.ccw.cmd_code = TC_EWRITEA;
-	rp->init_request.ccw.flags = CCW_FLAG_SLI;
-	rp->init_request.ccw.count = 1;
-	rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
-	rp->view = &raw3270_init_view;
-	raw3270_init_view.dev = rp;
-	rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
-	raw3270_init_view.dev = NULL;
-	rp->view = NULL;
-	mutex_unlock(&raw3270_init_mutex);
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	rc = __raw3270_reset_device(rp);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
 	return rc;
 }
 
@@ -739,13 +667,50 @@
 	if (!rp || rp->view != view ||
 	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
 		rc = -EACCES;
-	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
-		rc = -ENODEV;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
 	else
 		rc = raw3270_reset_device(view->dev);
 	return rc;
 }
 
+static int
+raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
+		 struct irb *irb)
+{
+	struct raw3270 *rp;
+
+	/*
+	 * Unit-Check Processing:
+	 * Expect Command Reject or Intervention Required.
+	 */
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+		/* Request finished abnormally. */
+		if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
+			set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
+			return RAW3270_IO_BUSY;
+		}
+	}
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+			if (irb->ecw[0] & SNS0_CMD_REJECT)
+				rq->rc = -EOPNOTSUPP;
+			else
+				rq->rc = -EIO;
+		}
+	}
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		/* Queue read modified after attention interrupt */
+		rp = view->dev;
+		raw3270_read_modified(rp);
+	}
+	return RAW3270_IO_DONE;
+}
+
+static struct raw3270_fn raw3270_init_fn = {
+	.intv = raw3270_init_irq
+};
+
 /*
  * Setup new 3270 device.
  */
@@ -774,6 +739,10 @@
 	INIT_LIST_HEAD(&rp->req_queue);
 	INIT_LIST_HEAD(&rp->view_list);
 
+	rp->init_view.dev = rp;
+	rp->init_view.fn = &raw3270_init_fn;
+	rp->view = &rp->init_view;
+
 	/*
 	 * Add device to list and find the smallest unused minor
 	 * number for it. Note: there is no device with minor 0,
@@ -812,6 +781,7 @@
  */
 struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
 {
+	unsigned long flags;
 	struct raw3270 *rp;
 	char *ascebc;
 	int rc;
@@ -822,16 +792,15 @@
 	if (rc)
 		return ERR_PTR(rc);
 	set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
-	rc = raw3270_reset_device(rp);
-	if (rc)
-		return ERR_PTR(rc);
-	rc = raw3270_size_device(rp);
-	if (rc)
-		return ERR_PTR(rc);
-	rc = raw3270_reset_device(rp);
-	if (rc)
-		return ERR_PTR(rc);
-	set_bit(RAW3270_FLAGS_READY, &rp->flags);
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	do {
+		__raw3270_reset_device(rp);
+		while (!raw3270_state_final(rp)) {
+			wait_cons_dev();
+			barrier();
+		}
+	} while (rp->state != RAW3270_STATE_READY);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
 	return rp;
 }
 
@@ -893,13 +862,13 @@
 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
 	if (rp->view == view)
 		rc = 0;
-	else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
-		rc = -ENODEV;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
 	else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
 		rc = -EACCES;
 	else {
 		oldview = NULL;
-		if (rp->view) {
+		if (rp->view && rp->view->fn->deactivate) {
 			oldview = rp->view;
 			oldview->fn->deactivate(oldview);
 		}
@@ -944,7 +913,7 @@
 		list_del_init(&view->list);
 		list_add_tail(&view->list, &rp->view_list);
 		/* Try to activate another view. */
-		if (test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+		if (raw3270_state_ready(rp) &&
 		    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
 			list_for_each_entry(view, &rp->view_list, list) {
 				rp->view = view;
@@ -975,18 +944,16 @@
 		if (rp->minor != minor)
 			continue;
 		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
-		if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
-			atomic_set(&view->ref_count, 2);
-			view->dev = rp;
-			view->fn = fn;
-			view->model = rp->model;
-			view->rows = rp->rows;
-			view->cols = rp->cols;
-			view->ascebc = rp->ascebc;
-			spin_lock_init(&view->lock);
-			list_add(&view->list, &rp->view_list);
-			rc = 0;
-		}
+		atomic_set(&view->ref_count, 2);
+		view->dev = rp;
+		view->fn = fn;
+		view->model = rp->model;
+		view->rows = rp->rows;
+		view->cols = rp->cols;
+		view->ascebc = rp->ascebc;
+		spin_lock_init(&view->lock);
+		list_add(&view->list, &rp->view_list);
+		rc = 0;
 		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
 		break;
 	}
@@ -1010,14 +977,11 @@
 		if (rp->minor != minor)
 			continue;
 		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
-		if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
-			view = ERR_PTR(-ENOENT);
-			list_for_each_entry(tmp, &rp->view_list, list) {
-				if (tmp->fn == fn) {
-					raw3270_get_view(tmp);
-					view = tmp;
-					break;
-				}
+		list_for_each_entry(tmp, &rp->view_list, list) {
+			if (tmp->fn == fn) {
+				raw3270_get_view(tmp);
+				view = tmp;
+				break;
 			}
 		}
 		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
@@ -1044,7 +1008,7 @@
 		rp->view = NULL;
 	}
 	list_del_init(&view->list);
-	if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+	if (!rp->view && raw3270_state_ready(rp) &&
 	    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
 		/* Try to activate another view. */
 		list_for_each_entry(nv, &rp->view_list, list) {
@@ -1072,10 +1036,6 @@
 
 	/* Remove from device chain. */
 	mutex_lock(&raw3270_mutex);
-	if (rp->clttydev && !IS_ERR(rp->clttydev))
-		device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
-	if (rp->cltubdev && !IS_ERR(rp->cltubdev))
-		device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor));
 	list_del_init(&rp->list);
 	mutex_unlock(&raw3270_mutex);
 
@@ -1139,75 +1099,34 @@
 
 static int raw3270_create_attributes(struct raw3270 *rp)
 {
-	int rc;
-
-	rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
-	if (rc)
-		goto out;
-
-	rp->clttydev = device_create(class3270, &rp->cdev->dev,
-				     MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL,
-				     "tty%s", dev_name(&rp->cdev->dev));
-	if (IS_ERR(rp->clttydev)) {
-		rc = PTR_ERR(rp->clttydev);
-		goto out_ttydev;
-	}
-
-	rp->cltubdev = device_create(class3270, &rp->cdev->dev,
-				     MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL,
-				     "tub%s", dev_name(&rp->cdev->dev));
-	if (!IS_ERR(rp->cltubdev))
-		goto out;
-
-	rc = PTR_ERR(rp->cltubdev);
-	device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
-
-out_ttydev:
-	sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
-out:
-	return rc;
+	return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
 }
 
 /*
  * Notifier for device addition/removal
  */
-struct raw3270_notifier {
-	struct list_head list;
-	void (*notifier)(int, int);
-};
-
 static LIST_HEAD(raw3270_notifier);
 
-int raw3270_register_notifier(void (*notifier)(int, int))
+int raw3270_register_notifier(struct raw3270_notifier *notifier)
 {
-	struct raw3270_notifier *np;
 	struct raw3270 *rp;
 
-	np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
-	if (!np)
-		return -ENOMEM;
-	np->notifier = notifier;
 	mutex_lock(&raw3270_mutex);
-	list_add_tail(&np->list, &raw3270_notifier);
-	list_for_each_entry(rp, &raw3270_devices, list) {
-		get_device(&rp->cdev->dev);
-		notifier(rp->minor, 1);
-	}
+	list_add_tail(&notifier->list, &raw3270_notifier);
+	list_for_each_entry(rp, &raw3270_devices, list)
+		notifier->create(rp->minor);
 	mutex_unlock(&raw3270_mutex);
 	return 0;
 }
 
-void raw3270_unregister_notifier(void (*notifier)(int, int))
+void raw3270_unregister_notifier(struct raw3270_notifier *notifier)
 {
-	struct raw3270_notifier *np;
+	struct raw3270 *rp;
 
 	mutex_lock(&raw3270_mutex);
-	list_for_each_entry(np, &raw3270_notifier, list)
-		if (np->notifier == notifier) {
-			list_del(&np->list);
-			kfree(np);
-			break;
-		}
+	list_for_each_entry(rp, &raw3270_devices, list)
+		notifier->destroy(rp->minor);
+	list_del(&notifier->list);
 	mutex_unlock(&raw3270_mutex);
 }
 
@@ -1217,29 +1136,20 @@
 static int
 raw3270_set_online (struct ccw_device *cdev)
 {
-	struct raw3270 *rp;
 	struct raw3270_notifier *np;
+	struct raw3270 *rp;
 	int rc;
 
 	rp = raw3270_create_device(cdev);
 	if (IS_ERR(rp))
 		return PTR_ERR(rp);
-	rc = raw3270_reset_device(rp);
-	if (rc)
-		goto failure;
-	rc = raw3270_size_device(rp);
-	if (rc)
-		goto failure;
-	rc = raw3270_reset_device(rp);
-	if (rc)
-		goto failure;
 	rc = raw3270_create_attributes(rp);
 	if (rc)
 		goto failure;
-	set_bit(RAW3270_FLAGS_READY, &rp->flags);
+	raw3270_reset_device(rp);
 	mutex_lock(&raw3270_mutex);
 	list_for_each_entry(np, &raw3270_notifier, list)
-		np->notifier(rp->minor, 1);
+		np->create(rp->minor);
 	mutex_unlock(&raw3270_mutex);
 	return 0;
 
@@ -1268,14 +1178,14 @@
 	 */
 	if (rp == NULL)
 		return;
-	clear_bit(RAW3270_FLAGS_READY, &rp->flags);
 
 	sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
 
 	/* Deactivate current view and remove all views. */
 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
 	if (rp->view) {
-		rp->view->fn->deactivate(rp->view);
+		if (rp->view->fn->deactivate)
+			rp->view->fn->deactivate(rp->view);
 		rp->view = NULL;
 	}
 	while (!list_empty(&rp->view_list)) {
@@ -1290,7 +1200,7 @@
 
 	mutex_lock(&raw3270_mutex);
 	list_for_each_entry(np, &raw3270_notifier, list)
-		np->notifier(rp->minor, 0);
+		np->destroy(rp->minor);
 	mutex_unlock(&raw3270_mutex);
 
 	/* Reset 3270 device. */
@@ -1324,7 +1234,7 @@
 	if (!rp)
 		return 0;
 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
-	if (rp->view)
+	if (rp->view && rp->view->fn->deactivate)
 		rp->view->fn->deactivate(rp->view);
 	if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
 		/*
@@ -1351,7 +1261,7 @@
 		return 0;
 	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
 	clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
-	if (rp->view)
+	if (rp->view && rp->view->fn->activate)
 		rp->view->fn->activate(rp->view);
 	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
 	return 0;
@@ -1434,6 +1344,7 @@
 module_init(raw3270_init);
 module_exit(raw3270_exit);
 
+EXPORT_SYMBOL(class3270);
 EXPORT_SYMBOL(raw3270_request_alloc);
 EXPORT_SYMBOL(raw3270_request_free);
 EXPORT_SYMBOL(raw3270_request_reset);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index ed34eb2..7b73ff8 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -91,6 +91,7 @@
 
 struct raw3270;
 struct raw3270_view;
+extern struct class *class3270;
 
 /* 3270 CCW request */
 struct raw3270_request {
@@ -140,6 +141,7 @@
 		     struct raw3270_request *, struct irb *);
 	void (*release)(struct raw3270_view *);
 	void (*free)(struct raw3270_view *);
+	void (*resize)(struct raw3270_view *, int, int, int);
 };
 
 /*
@@ -192,8 +194,14 @@
 void raw3270_wait_cons_dev(struct raw3270 *);
 
 /* Notifier for device addition/removal */
-int raw3270_register_notifier(void (*notifier)(int, int));
-void raw3270_unregister_notifier(void (*notifier)(int, int));
+struct raw3270_notifier {
+	struct list_head list;
+	void (*create)(int minor);
+	void (*destroy)(int minor);
+};
+
+int raw3270_register_notifier(struct raw3270_notifier *);
+void raw3270_unregister_notifier(struct raw3270_notifier *);
 void raw3270_pm_unfreeze(struct raw3270_view *);
 
 /*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 12c16a6..bd6871b 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -450,7 +450,7 @@
 	timeout = 0;
 	if (timer_pending(&sclp_request_timer)) {
 		/* Get timeout TOD value */
-		timeout = get_clock() +
+		timeout = get_tod_clock() +
 			  sclp_tod_from_jiffies(sclp_request_timer.expires -
 						jiffies);
 	}
@@ -472,7 +472,7 @@
 	while (sclp_running_state != sclp_running_state_idle) {
 		/* Check for expired request timer */
 		if (timer_pending(&sclp_request_timer) &&
-		    get_clock() > timeout &&
+		    get_tod_clock() > timeout &&
 		    del_timer(&sclp_request_timer))
 			sclp_request_timer.function(sclp_request_timer.data);
 		cpu_relax();
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index c44d13f..30a2255 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -56,7 +56,6 @@
 
 u64 sclp_facilities;
 static u8 sclp_fac84;
-static u8 sclp_fac85;
 static unsigned long long rzm;
 static unsigned long long rnmax;
 
@@ -131,7 +130,8 @@
 	sccb = &early_read_info_sccb;
 	sclp_facilities = sccb->facilities;
 	sclp_fac84 = sccb->fac84;
-	sclp_fac85 = sccb->fac85;
+	if (sccb->fac85 & 0x02)
+		S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
 	rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
 	rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
 	rzm <<= 20;
@@ -171,12 +171,6 @@
 	return rzm;
 }
 
-u8 sclp_get_fac85(void)
-{
-	return sclp_fac85;
-}
-EXPORT_SYMBOL_GPL(sclp_get_fac85);
-
 /*
  * This function will be called after sclp_facilities_detect(), which gets
  * called from early.c code. Therefore the sccb should have valid contents.
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 3860e79..b907dba 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/interrupt.h>
+#include <linux/workqueue.h>
 
 #include <linux/slab.h>
 #include <linux/bootmem.h>
@@ -80,6 +81,8 @@
 	unsigned int highlight;		/* Blink/reverse/underscore */
 	unsigned int f_color;		/* Foreground color */
 	struct tty3270_line *screen;
+	unsigned int n_model, n_cols, n_rows;	/* New model & size */
+	struct work_struct resize_work;
 
 	/* Input stuff. */
 	struct string *prompt;		/* Output string for input area. */
@@ -115,6 +118,7 @@
 #define TTY_UPDATE_ALL		16	/* Recreate screen. */
 
 static void tty3270_update(struct tty3270 *);
+static void tty3270_resize_work(struct work_struct *work);
 
 /*
  * Setup timeout for a device. On timeout trigger an update.
@@ -683,12 +687,6 @@
 	INIT_LIST_HEAD(&tp->update);
 	INIT_LIST_HEAD(&tp->rcl_lines);
 	tp->rcl_max = 20;
-	tty_port_init(&tp->port);
-	setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
-		    (unsigned long) tp);
-	tasklet_init(&tp->readlet,
-		     (void (*)(unsigned long)) tty3270_read_tasklet,
-		     (unsigned long) tp->read);
 
 	for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
 		tp->freemem_pages[pages] = (void *)
@@ -710,6 +708,15 @@
 	tp->kbd = kbd_alloc();
 	if (!tp->kbd)
 		goto out_reset;
+
+	tty_port_init(&tp->port);
+	setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
+		    (unsigned long) tp);
+	tasklet_init(&tp->readlet,
+		     (void (*)(unsigned long)) tty3270_read_tasklet,
+		     (unsigned long) tp->read);
+	INIT_WORK(&tp->resize_work, tty3270_resize_work);
+
 	return tp;
 
 out_reset:
@@ -752,42 +759,96 @@
 /*
  * Allocate tty3270 screen.
  */
-static int
-tty3270_alloc_screen(struct tty3270 *tp)
+static struct tty3270_line *
+tty3270_alloc_screen(unsigned int rows, unsigned int cols)
 {
+	struct tty3270_line *screen;
 	unsigned long size;
 	int lines;
 
-	size = sizeof(struct tty3270_line) * (tp->view.rows - 2);
-	tp->screen = kzalloc(size, GFP_KERNEL);
-	if (!tp->screen)
+	size = sizeof(struct tty3270_line) * (rows - 2);
+	screen = kzalloc(size, GFP_KERNEL);
+	if (!screen)
 		goto out_err;
-	for (lines = 0; lines < tp->view.rows - 2; lines++) {
-		size = sizeof(struct tty3270_cell) * tp->view.cols;
-		tp->screen[lines].cells = kzalloc(size, GFP_KERNEL);
-		if (!tp->screen[lines].cells)
+	for (lines = 0; lines < rows - 2; lines++) {
+		size = sizeof(struct tty3270_cell) * cols;
+		screen[lines].cells = kzalloc(size, GFP_KERNEL);
+		if (!screen[lines].cells)
 			goto out_screen;
 	}
-	return 0;
+	return screen;
 out_screen:
 	while (lines--)
-		kfree(tp->screen[lines].cells);
-	kfree(tp->screen);
+		kfree(screen[lines].cells);
+	kfree(screen);
 out_err:
-	return -ENOMEM;
+	return ERR_PTR(-ENOMEM);
 }
 
 /*
  * Free tty3270 screen.
  */
 static void
-tty3270_free_screen(struct tty3270 *tp)
+tty3270_free_screen(struct tty3270_line *screen, unsigned int rows)
 {
 	int lines;
 
-	for (lines = 0; lines < tp->view.rows - 2; lines++)
-		kfree(tp->screen[lines].cells);
-	kfree(tp->screen);
+	for (lines = 0; lines < rows - 2; lines++)
+		kfree(screen[lines].cells);
+	kfree(screen);
+}
+
+/*
+ * Resize tty3270 screen
+ */
+static void tty3270_resize_work(struct work_struct *work)
+{
+	struct tty3270 *tp = container_of(work, struct tty3270, resize_work);
+	struct tty3270_line *screen, *oscreen;
+	struct tty_struct *tty;
+	unsigned int orows;
+	struct winsize ws;
+
+	screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
+	if (!screen)
+		return;
+	/* Switch to new output size */
+	spin_lock_bh(&tp->view.lock);
+	oscreen = tp->screen;
+	orows = tp->view.rows;
+	tp->view.model = tp->n_model;
+	tp->view.rows = tp->n_rows;
+	tp->view.cols = tp->n_cols;
+	tp->screen = screen;
+	free_string(&tp->freemem, tp->prompt);
+	free_string(&tp->freemem, tp->status);
+	tty3270_create_prompt(tp);
+	tty3270_create_status(tp);
+	tp->nr_up = 0;
+	while (tp->nr_lines < tp->view.rows - 2)
+		tty3270_blank_line(tp);
+	tp->update_flags = TTY_UPDATE_ALL;
+	spin_unlock_bh(&tp->view.lock);
+	tty3270_free_screen(oscreen, orows);
+	tty3270_set_timer(tp, 1);
+	/* Informat tty layer about new size */
+	tty = tty_port_tty_get(&tp->port);
+	if (!tty)
+		return;
+	ws.ws_row = tp->view.rows - 2;
+	ws.ws_col = tp->view.cols;
+	tty_do_resize(tty, &ws);
+}
+
+static void
+tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+	tp->n_model = model;
+	tp->n_rows = rows;
+	tp->n_cols = cols;
+	schedule_work(&tp->resize_work);
 }
 
 /*
@@ -815,7 +876,8 @@
 tty3270_free(struct raw3270_view *view)
 {
 	struct tty3270 *tp = container_of(view, struct tty3270, view);
-	tty3270_free_screen(tp);
+
+	tty3270_free_screen(tp->screen, tp->view.rows);
 	tty3270_free_view(tp);
 }
 
@@ -827,9 +889,8 @@
 {
 	int i;
 
-	for (i = 0; i < tty3270_max_index; i++) {
-		struct raw3270_view *view =
-			raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR);
+	for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
+		struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
 		if (!IS_ERR(view))
 			raw3270_del_view(view);
 	}
@@ -840,7 +901,8 @@
 	.deactivate = tty3270_deactivate,
 	.intv = (void *) tty3270_irq,
 	.release = tty3270_release,
-	.free = tty3270_free
+	.free = tty3270_free,
+	.resize = tty3270_resize
 };
 
 /*
@@ -853,8 +915,7 @@
 	int i, rc;
 
 	/* Check if the tty3270 is already there. */
-	view = raw3270_find_view(&tty3270_fn,
-				  tty->index + RAW3270_FIRSTMINOR);
+	view = raw3270_find_view(&tty3270_fn, tty->index);
 	if (!IS_ERR(view)) {
 		tp = container_of(view, struct tty3270, view);
 		tty->driver_data = tp;
@@ -866,29 +927,26 @@
 		tp->inattr = TF_INPUT;
 		return tty_port_install(&tp->port, driver, tty);
 	}
-	if (tty3270_max_index < tty->index + 1)
-		tty3270_max_index = tty->index + 1;
-
-	/* Quick exit if there is no device for tty->index. */
-	if (PTR_ERR(view) == -ENODEV)
-		return -ENODEV;
+	if (tty3270_max_index < tty->index)
+		tty3270_max_index = tty->index;
 
 	/* Allocate tty3270 structure on first open. */
 	tp = tty3270_alloc_view();
 	if (IS_ERR(tp))
 		return PTR_ERR(tp);
 
-	rc = raw3270_add_view(&tp->view, &tty3270_fn,
-			      tty->index + RAW3270_FIRSTMINOR);
+	rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
 	if (rc) {
 		tty3270_free_view(tp);
 		return rc;
 	}
 
-	rc = tty3270_alloc_screen(tp);
-	if (rc) {
+	tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
+	if (IS_ERR(tp->screen)) {
+		rc = PTR_ERR(tp->screen);
 		raw3270_put_view(&tp->view);
 		raw3270_del_view(&tp->view);
+		tty3270_free_view(tp);
 		return rc;
 	}
 
@@ -926,6 +984,20 @@
 }
 
 /*
+ * This routine is called whenever a 3270 tty is opened.
+ */
+static int
+tty3270_open(struct tty_struct *tty, struct file *filp)
+{
+	struct tty3270 *tp = tty->driver_data;
+	struct tty_port *port = &tp->port;
+
+	port->count++;
+	tty_port_tty_set(port, tty);
+	return 0;
+}
+
+/*
  * This routine is called when the 3270 tty is closed. We wait
  * for the remaining request to be completed. Then we clean up.
  */
@@ -1753,6 +1825,7 @@
 static const struct tty_operations tty3270_ops = {
 	.install = tty3270_install,
 	.cleanup = tty3270_cleanup,
+	.open = tty3270_open,
 	.close = tty3270_close,
 	.write = tty3270_write,
 	.put_char = tty3270_put_char,
@@ -1771,6 +1844,22 @@
 	.set_termios = tty3270_set_termios
 };
 
+void tty3270_create_cb(int minor)
+{
+	tty_register_device(tty3270_driver, minor, NULL);
+}
+
+void tty3270_destroy_cb(int minor)
+{
+	tty_unregister_device(tty3270_driver, minor);
+}
+
+struct raw3270_notifier tty3270_notifier =
+{
+	.create = tty3270_create_cb,
+	.destroy = tty3270_destroy_cb,
+};
+
 /*
  * 3270 tty registration code called from tty_init().
  * Most kernel services (incl. kmalloc) are available at this poimt.
@@ -1780,23 +1869,25 @@
 	struct tty_driver *driver;
 	int ret;
 
-	driver = alloc_tty_driver(RAW3270_MAXDEVS);
-	if (!driver)
-		return -ENOMEM;
+	driver = tty_alloc_driver(RAW3270_MAXDEVS,
+				  TTY_DRIVER_REAL_RAW |
+				  TTY_DRIVER_DYNAMIC_DEV |
+				  TTY_DRIVER_RESET_TERMIOS);
+	if (IS_ERR(driver))
+		return PTR_ERR(driver);
 
 	/*
 	 * Initialize the tty_driver structure
 	 * Entries in tty3270_driver that are NOT initialized:
 	 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
 	 */
-	driver->driver_name = "ttyTUB";
-	driver->name = "ttyTUB";
+	driver->driver_name = "tty3270";
+	driver->name = "3270/tty";
 	driver->major = IBM_TTY3270_MAJOR;
-	driver->minor_start = RAW3270_FIRSTMINOR;
+	driver->minor_start = 0;
 	driver->type = TTY_DRIVER_TYPE_SYSTEM;
 	driver->subtype = SYSTEM_TYPE_TTY;
 	driver->init_termios = tty_std_termios;
-	driver->flags = TTY_DRIVER_RESET_TERMIOS;
 	tty_set_operations(driver, &tty3270_ops);
 	ret = tty_register_driver(driver);
 	if (ret) {
@@ -1804,6 +1895,7 @@
 		return ret;
 	}
 	tty3270_driver = driver;
+	raw3270_register_notifier(&tty3270_notifier);
 	return 0;
 }
 
@@ -1812,6 +1904,7 @@
 {
 	struct tty_driver *driver;
 
+	raw3270_unregister_notifier(&tty3270_notifier);
 	driver = tty3270_driver;
 	tty3270_driver = NULL;
 	tty_unregister_driver(driver);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index e3b9308..1d61a01 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -62,6 +62,7 @@
 static struct dentry *zcore_file;
 static struct dentry *zcore_memmap_file;
 static struct dentry *zcore_reipl_file;
+static struct dentry *zcore_hsa_file;
 static struct ipl_parameter_block *ipl_block;
 
 /*
@@ -77,6 +78,8 @@
 	int offs, blk_num;
 	static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
 
+	if (!hsa_available)
+		return -ENODATA;
 	if (count == 0)
 		return 0;
 
@@ -278,6 +281,15 @@
 }
 
 /*
+ * Release the HSA
+ */
+static void release_hsa(void)
+{
+	diag308(DIAG308_REL_HSA, NULL);
+	hsa_available = 0;
+}
+
+/*
  * Read routine for zcore character device
  * First 4K are dump header
  * Next 32MB are HSA Memory
@@ -363,8 +375,8 @@
 
 static int zcore_release(struct inode *inode, struct file *filep)
 {
-	diag308(DIAG308_REL_HSA, NULL);
-	hsa_available = 0;
+	if (hsa_available)
+		release_hsa();
 	return 0;
 }
 
@@ -474,6 +486,41 @@
 	.llseek		= no_llseek,
 };
 
+static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	static char str[18];
+
+	if (hsa_available)
+		snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE);
+	else
+		snprintf(str, sizeof(str), "0\n");
+	return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
+}
+
+static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	char value;
+
+	if (*ppos != 0)
+		return -EPIPE;
+	if (copy_from_user(&value, buf, 1))
+		return -EFAULT;
+	if (value != '0')
+		return -EINVAL;
+	release_hsa();
+	return count;
+}
+
+static const struct file_operations zcore_hsa_fops = {
+	.owner		= THIS_MODULE,
+	.write		= zcore_hsa_write,
+	.read		= zcore_hsa_read,
+	.open		= nonseekable_open,
+	.llseek		= no_llseek,
+};
+
 #ifdef CONFIG_32BIT
 
 static void __init set_lc_mask(struct save_area *map)
@@ -590,7 +637,7 @@
 	hdr->rmem_size = memory;
 	hdr->mem_end = sys_info.mem_size;
 	hdr->num_pages = memory / PAGE_SIZE;
-	hdr->tod = get_clock();
+	hdr->tod = get_tod_clock();
 	get_cpu_id(&hdr->cpu_id);
 	for (i = 0; zfcpdump_save_areas[i]; i++) {
 		prefix = zfcpdump_save_areas[i]->pref_reg;
@@ -658,6 +705,7 @@
 	rc = check_sdias();
 	if (rc)
 		goto fail;
+	hsa_available = 1;
 
 	rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
 	if (rc)
@@ -714,9 +762,16 @@
 		rc = -ENOMEM;
 		goto fail_memmap_file;
 	}
-	hsa_available = 1;
+	zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
+					     NULL, &zcore_hsa_fops);
+	if (!zcore_hsa_file) {
+		rc = -ENOMEM;
+		goto fail_reipl_file;
+	}
 	return 0;
 
+fail_reipl_file:
+	debugfs_remove(zcore_reipl_file);
 fail_memmap_file:
 	debugfs_remove(zcore_memmap_file);
 fail_file:
@@ -733,6 +788,7 @@
 	debug_unregister(zcore_dbf);
 	sclp_sdias_exit();
 	free_page((unsigned long) ipl_block);
+	debugfs_remove(zcore_hsa_file);
 	debugfs_remove(zcore_reipl_file);
 	debugfs_remove(zcore_memmap_file);
 	debugfs_remove(zcore_file);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 10729bb..31ceef1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -435,7 +435,6 @@
 
 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
 {
-#ifdef CONFIG_PCI
 	switch (sei_area->cc) {
 	case 1:
 		zpci_event_error(sei_area->ccdf);
@@ -444,11 +443,10 @@
 		zpci_event_availability(sei_area->ccdf);
 		break;
 	default:
-		CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
+		CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
 			      sei_area->cc);
 		break;
 	}
-#endif
 }
 
 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
@@ -471,13 +469,19 @@
 		chsc_process_sei_scm_change(sei_area);
 		break;
 	default: /* other stuff */
-		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
+		CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
 			      sei_area->cc);
 		break;
 	}
+
+	/* Check if we might have lost some information. */
+	if (sei_area->flags & 0x40) {
+		CIO_CRW_EVENT(2, "chsc: event overflow\n");
+		css_schedule_eval_all();
+	}
 }
 
-static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
 {
 	do {
 		memset(sei, 0, sizeof(*sei));
@@ -488,40 +492,37 @@
 		if (chsc(sei))
 			break;
 
-		if (sei->response.code == 0x0001) {
-			CIO_CRW_EVENT(2, "chsc: sei successful\n");
-
-			/* Check if we might have lost some information. */
-			if (sei->u.nt0_area.flags & 0x40) {
-				CIO_CRW_EVENT(2, "chsc: event overflow\n");
-				css_schedule_eval_all();
-			}
-
-			switch (sei->nt) {
-			case 0:
-				chsc_process_sei_nt0(&sei->u.nt0_area);
-				break;
-			case 2:
-				chsc_process_sei_nt2(&sei->u.nt2_area);
-				break;
-			default:
-				CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n",
-					      sei->nt);
-				break;
-			}
-		} else {
+		if (sei->response.code != 0x0001) {
 			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
 				      sei->response.code);
 			break;
 		}
-	} while (sei->u.nt0_area.flags & 0x80);
 
-	return 0;
+		CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+		switch (sei->nt) {
+		case 0:
+			chsc_process_sei_nt0(&sei->u.nt0_area);
+			break;
+		case 2:
+			chsc_process_sei_nt2(&sei->u.nt2_area);
+			break;
+		default:
+			CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+			break;
+		}
+	} while (sei->u.nt0_area.flags & 0x80);
 }
 
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
 {
-	struct chsc_sei *sei;
+	struct chsc_sei *sei = sei_page;
 
 	if (overflow) {
 		css_schedule_eval_all();
@@ -531,14 +532,9 @@
 		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
 		      crw0->erc, crw0->rsid);
-	if (!sei_page)
-		return;
-	/* Access to sei_page is serialized through machine check handler
-	 * thread, so no need for locking. */
-	sei = sei_page;
 
 	CIO_TRACE_EVENT(2, "prcss");
-	__chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
+	chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
 }
 
 void chsc_chp_online(struct chp_id chpid)
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 662dab4..227e05f 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -157,7 +157,7 @@
 #ifdef CONFIG_SCM_BUS
 int scm_update_information(void);
 #else /* CONFIG_SCM_BUS */
-#define scm_update_information() 0
+static inline int scm_update_information(void) { return 0; }
 #endif /* CONFIG_SCM_BUS */
 
 
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index c8faf62..986ef6a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -962,9 +962,9 @@
 			atomic_inc(&chpid_reset_count);
 	}
 	/* Wait for machine check for all channel paths. */
-	timeout = get_clock() + (RCHP_TIMEOUT << 12);
+	timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
 	while (atomic_read(&chpid_reset_count) != 0) {
-		if (get_clock() > timeout)
+		if (get_tod_clock() > timeout)
 			break;
 		cpu_relax();
 	}
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index c9fc61c..4495e06 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -33,7 +33,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/slab.h>
-#include <linux/timex.h>	/* get_clock() */
+#include <linux/timex.h>	/* get_tod_clock() */
 
 #include <asm/ccwdev.h>
 #include <asm/cio.h>
@@ -326,7 +326,7 @@
 		memcpy(cmb_data->last_block, hw_block, cmb_data->size);
 		memcpy(reference_buf, hw_block, cmb_data->size);
 	} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
-	cmb_data->last_update = get_clock();
+	cmb_data->last_update = get_tod_clock();
 	kfree(reference_buf);
 	return 0;
 }
@@ -428,7 +428,7 @@
 		memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
 		cmb_data->last_update = 0;
 	}
-	cdev->private->cmb_start_time = get_clock();
+	cdev->private->cmb_start_time = get_tod_clock();
 	spin_unlock_irq(cdev->ccwlock);
 }
 
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fd00afd..a239237 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -780,7 +780,7 @@
 	css->cssid = nr;
 	dev_set_name(&css->device, "css%x", nr);
 	css->device.release = channel_subsystem_release;
-	tod_high = (u32) (get_clock() >> 32);
+	tod_high = (u32) (get_tod_clock() >> 32);
 	css_generate_pgid(css, tod_high);
 	return 0;
 }
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 7cd5c68..c6767f5 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -632,6 +632,14 @@
 	return count;
 }
 
+static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+
+	return sprintf(buf, "%02x\n", sch->vpm);
+}
+
 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
@@ -640,11 +648,13 @@
 static DEVICE_ATTR(online, 0644, online_show, online_store);
 static DEVICE_ATTR(availability, 0444, available_show, NULL);
 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
 
 static struct attribute *io_subchannel_attrs[] = {
 	&dev_attr_chpids.attr,
 	&dev_attr_pimpampom.attr,
 	&dev_attr_logging.attr,
+	&dev_attr_vpm.attr,
 	NULL,
 };
 
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1bb1d00..c7638c5 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -47,7 +47,7 @@
 	cc = stsch_err(sch->schid, &schib);
 
 	printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
-	       "device information:\n", get_clock());
+	       "device information:\n", get_tod_clock());
 	printk(KERN_WARNING "cio: orb:\n");
 	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
 		       orb, sizeof(*orb), 0);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 908d287..37ada05e 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -23,6 +23,8 @@
 #define PGID_RETRIES	256
 #define PGID_TIMEOUT	(10 * HZ)
 
+static void verify_start(struct ccw_device *cdev);
+
 /*
  * Process path verification data and report result.
  */
@@ -70,8 +72,8 @@
 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
 	struct ccw_request *req = &cdev->private->req;
 
-	/* Adjust lpm. */
-	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
+	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
+			      ~cdev->private->path_noirq_mask);
 	if (!req->lpm)
 		goto out_nopath;
 	nop_build_cp(cdev);
@@ -102,10 +104,20 @@
 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
 	struct ccw_request *req = &cdev->private->req;
 
-	if (rc == 0)
+	switch (rc) {
+	case 0:
 		sch->vpm |= req->lpm;
-	else if (rc != -EACCES)
+		break;
+	case -ETIME:
+		cdev->private->path_noirq_mask |= req->lpm;
+		break;
+	case -EACCES:
+		cdev->private->path_notoper_mask |= req->lpm;
+		break;
+	default:
 		goto err;
+	}
+	/* Continue on the next path. */
 	req->lpm >>= 1;
 	nop_do(cdev);
 	return;
@@ -132,6 +144,48 @@
 	req->cp		= cp;
 }
 
+static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	if (rc) {
+		/* We don't know the path groups' state. Abort. */
+		verify_done(cdev, rc);
+		return;
+	}
+	/*
+	 * Path groups have been reset. Restart path verification but
+	 * leave paths in path_noirq_mask out.
+	 */
+	cdev->private->flags.pgid_unknown = 0;
+	verify_start(cdev);
+}
+
+/*
+ * Reset pathgroups and restart path verification, leave unusable paths out.
+ */
+static void pgid_wipeout_start(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+	struct ccw_request *req = &cdev->private->req;
+	u8 fn;
+
+	CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
+		      id->ssid, id->devno, cdev->private->pgid_valid_mask,
+		      cdev->private->path_noirq_mask);
+
+	/* Initialize request data. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= sch->schib.pmcw.pam;
+	req->callback	= pgid_wipeout_callback;
+	fn = SPID_FUNC_DISBAND;
+	if (cdev->private->flags.mpath)
+		fn |= SPID_FUNC_MULTI_PATH;
+	spid_build_cp(cdev, fn);
+	ccw_request_start(cdev);
+}
+
 /*
  * Perform establish/resign SET PGID on a single path.
  */
@@ -157,11 +211,14 @@
 	return;
 
 out_nopath:
+	if (cdev->private->flags.pgid_unknown) {
+		/* At least one SPID could be partially done. */
+		pgid_wipeout_start(cdev);
+		return;
+	}
 	verify_done(cdev, sch->vpm ? 0 : -EACCES);
 }
 
-static void verify_start(struct ccw_device *cdev);
-
 /*
  * Process SET PGID request result for a single path.
  */
@@ -174,7 +231,12 @@
 	case 0:
 		sch->vpm |= req->lpm & sch->opm;
 		break;
+	case -ETIME:
+		cdev->private->flags.pgid_unknown = 1;
+		cdev->private->path_noirq_mask |= req->lpm;
+		break;
 	case -EACCES:
+		cdev->private->path_notoper_mask |= req->lpm;
 		break;
 	case -EOPNOTSUPP:
 		if (cdev->private->flags.mpath) {
@@ -330,8 +392,9 @@
 	else {
 		donepm = pgid_to_donepm(cdev);
 		sch->vpm = donepm & sch->opm;
-		cdev->private->pgid_todo_mask &= ~donepm;
 		cdev->private->pgid_reset_mask |= reset;
+		cdev->private->pgid_todo_mask &=
+			~(donepm | cdev->private->path_noirq_mask);
 		pgid_fill(cdev, pgid);
 	}
 out:
@@ -341,6 +404,10 @@
 		      cdev->private->pgid_todo_mask, mismatch, reserved, reset);
 	switch (rc) {
 	case 0:
+		if (cdev->private->flags.pgid_unknown) {
+			pgid_wipeout_start(cdev);
+			return;
+		}
 		/* Anything left to do? */
 		if (cdev->private->pgid_todo_mask == 0) {
 			verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
@@ -384,9 +451,10 @@
 {
 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
 	struct ccw_request *req = &cdev->private->req;
+	int ret;
 
-	/* Adjust lpm if paths are not set in pam. */
-	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
+	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
+			      ~cdev->private->path_noirq_mask);
 	if (!req->lpm)
 		goto out_nopath;
 	snid_build_cp(cdev);
@@ -394,7 +462,13 @@
 	return;
 
 out_nopath:
-	snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
+	if (cdev->private->pgid_valid_mask)
+		ret = 0;
+	else if (cdev->private->path_noirq_mask)
+		ret = -ETIME;
+	else
+		ret = -EACCES;
+	snid_done(cdev, ret);
 }
 
 /*
@@ -404,10 +478,21 @@
 {
 	struct ccw_request *req = &cdev->private->req;
 
-	if (rc == 0)
+	switch (rc) {
+	case 0:
 		cdev->private->pgid_valid_mask |= req->lpm;
-	else if (rc != -EACCES)
+		break;
+	case -ETIME:
+		cdev->private->flags.pgid_unknown = 1;
+		cdev->private->path_noirq_mask |= req->lpm;
+		break;
+	case -EACCES:
+		cdev->private->path_notoper_mask |= req->lpm;
+		break;
+	default:
 		goto err;
+	}
+	/* Continue on the next path. */
 	req->lpm >>= 1;
 	snid_do(cdev);
 	return;
@@ -427,6 +512,13 @@
 
 	sch->vpm = 0;
 	sch->lpm = sch->schib.pmcw.pam;
+
+	/* Initialize PGID data. */
+	memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+	cdev->private->pgid_valid_mask = 0;
+	cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+	cdev->private->path_notoper_mask = 0;
+
 	/* Initialize request data. */
 	memset(req, 0, sizeof(*req));
 	req->timeout	= PGID_TIMEOUT;
@@ -459,14 +551,8 @@
  */
 void ccw_device_verify_start(struct ccw_device *cdev)
 {
-	struct subchannel *sch = to_subchannel(cdev->dev.parent);
-
 	CIO_TRACE_EVENT(4, "vrfy");
 	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
-	/* Initialize PGID data. */
-	memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
-	cdev->private->pgid_valid_mask = 0;
-	cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
 	/*
 	 * Initialize pathgroup and multipath state with target values.
 	 * They may change in the course of path verification.
@@ -474,6 +560,7 @@
 	cdev->private->flags.pgroup = cdev->private->options.pgroup;
 	cdev->private->flags.mpath = cdev->private->options.mpath;
 	cdev->private->flags.doverify = 0;
+	cdev->private->path_noirq_mask = 0;
 	verify_start(cdev);
 }
 
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 76253df..b108f4a 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -126,6 +126,10 @@
 	u8 pgid_valid_mask;	/* mask of valid PGIDs */
 	u8 pgid_todo_mask;	/* mask of PGIDs to be adjusted */
 	u8 pgid_reset_mask;	/* mask of PGIDs which were reset */
+	u8 path_noirq_mask;	/* mask of paths for which no irq was
+				   received */
+	u8 path_notoper_mask;	/* mask of paths which were found
+				   not operable */
 	u8 path_gone_mask;	/* mask of paths, that became unavailable */
 	u8 path_new_mask;	/* mask of paths, that became available */
 	struct {
@@ -145,6 +149,7 @@
 		unsigned int resuming:1;    /* recognition while resume */
 		unsigned int pgroup:1;	    /* pathgroup is set up */
 		unsigned int mpath:1;	    /* multipathing is set up */
+		unsigned int pgid_unknown:1;/* unknown pgid state */
 		unsigned int initialized:1; /* set if initial reference held */
 	} __attribute__((packed)) flags;
 	unsigned long intparm;	/* user interruption parameter */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 1671d34..abc550e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@
 		retries++;
 
 		if (!start_time) {
-			start_time = get_clock();
+			start_time = get_tod_clock();
 			goto again;
 		}
-		if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+		if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
 			goto again;
 	}
 	if (retries) {
@@ -504,7 +504,7 @@
 	int count, stop;
 	unsigned char state = 0;
 
-	q->timestamp = get_clock();
+	q->timestamp = get_tod_clock();
 
 	/*
 	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -563,7 +563,7 @@
 	if (bufnr != q->last_move) {
 		q->last_move = bufnr;
 		if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
-			q->u.in.timestamp = get_clock();
+			q->u.in.timestamp = get_tod_clock();
 		return 1;
 	} else
 		return 0;
@@ -595,7 +595,7 @@
 	 * At this point we know, that inbound first_to_check
 	 * has (probably) not moved (see qdio_inbound_processing).
 	 */
-	if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+	if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
 			      q->first_to_check);
 		return 1;
@@ -772,7 +772,7 @@
 	int count, stop;
 	unsigned char state = 0;
 
-	q->timestamp = get_clock();
+	q->timestamp = get_tod_clock();
 
 	if (need_siga_sync(q))
 		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d690b33..d87961d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -818,7 +818,7 @@
 
 static inline int qeth_get_micros(void)
 {
-	return (int) (get_clock() >> 12);
+	return (int) (get_tod_clock() >> 12);
 }
 
 static inline int qeth_get_ip_version(struct sk_buff *skb)
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c96320d..c7e148f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -727,7 +727,7 @@
 	zfcp_reqlist_add(adapter->req_list, req);
 
 	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
-	req->issued = get_clock();
+	req->issued = get_tod_clock();
 	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
 		del_timer(&req->timer);
 		/* lookup request again, list might have changed */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 50b5615..665e3cf 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -68,7 +68,7 @@
 	unsigned long long now, span;
 	int used;
 
-	now = get_clock_monotonic();
+	now = get_tod_clock_monotonic();
 	span = (now - qdio->req_q_time) >> 12;
 	used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
 	qdio->req_q_util += used * span;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 60e48a1..fd47363 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2199,6 +2199,7 @@
 	mutex_unlock(&tty->termios_mutex);
 	return 0;
 }
+EXPORT_SYMBOL(tty_do_resize);
 
 /**
  *	tiocswinsz		-	implement window size set ioctl
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index f56d185..e92eeaf 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -1,6 +1,5 @@
 menuconfig UIO
 	tristate "Userspace I/O drivers"
-	depends on !S390
 	help
 	  Enable this to allow the userspace driver core code to be
 	  built.  This code allows userspace programs easy access to
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 33bbbae..8e260cf 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -53,8 +53,18 @@
 #endif
 
 #define readb __raw_readb
-#define readw(addr) __le16_to_cpu(__raw_readw(addr))
-#define readl(addr) __le32_to_cpu(__raw_readl(addr))
+
+#define readw readw
+static inline u16 readw(const volatile void __iomem *addr)
+{
+	return __le16_to_cpu(__raw_readw(addr));
+}
+
+#define readl readl
+static inline u32 readl(const volatile void __iomem *addr)
+{
+	return __le32_to_cpu(__raw_readl(addr));
+}
 
 #ifndef __raw_writeb
 static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
@@ -89,7 +99,11 @@
 }
 #endif
 
-#define readq(addr) __le64_to_cpu(__raw_readq(addr))
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+	return __le64_to_cpu(__raw_readq(addr));
+}
 
 #ifndef __raw_writeq
 static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 5cf680a9..bfd8768 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -197,16 +197,6 @@
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-#define page_test_and_clear_dirty(pfn, mapped)	(0)
-#endif
-
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-#define pte_maybe_dirty(pte)		pte_dirty(pte)
-#else
-#define pte_maybe_dirty(pte)		(1)
-#endif
-
 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
 #define page_test_and_clear_young(pfn) (0)
 #endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 70473da..6d53675 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -303,21 +303,13 @@
 
 static inline void SetPageUptodate(struct page *page)
 {
-#ifdef CONFIG_S390
-	if (!test_and_set_bit(PG_uptodate, &page->flags))
-		page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, 0);
-#else
 	/*
 	 * Memory barrier must be issued before setting the PG_uptodate bit,
 	 * so that all previous stores issued in order to bring the page
 	 * uptodate are actually visible before PageUptodate becomes true.
-	 *
-	 * s390 doesn't need an explicit smp_wmb here because the test and
-	 * set bit already provides full barriers.
 	 */
 	smp_wmb();
 	set_bit(PG_uptodate, &(page)->flags);
-#endif
 }
 
 CLEARPAGEFLAG(Uptodate, uptodate)
diff --git a/mm/rmap.c b/mm/rmap.c
index 2c78f8c..3d38edf 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1126,7 +1126,6 @@
  */
 void page_remove_rmap(struct page *page)
 {
-	struct address_space *mapping = page_mapping(page);
 	bool anon = PageAnon(page);
 	bool locked;
 	unsigned long flags;
@@ -1144,29 +1143,6 @@
 		goto out;
 
 	/*
-	 * Now that the last pte has gone, s390 must transfer dirty
-	 * flag from storage key to struct page.  We can usually skip
-	 * this if the page is anon, so about to be freed; but perhaps
-	 * not if it's in swapcache - there might be another pte slot
-	 * containing the swap entry, but page not yet written to swap.
-	 *
-	 * And we can skip it on file pages, so long as the filesystem
-	 * participates in dirty tracking (note that this is not only an
-	 * optimization but also solves problems caused by dirty flag in
-	 * storage key getting set by a write from inside kernel); but need to
-	 * catch shm and tmpfs and ramfs pages which have been modified since
-	 * creation by read fault.
-	 *
-	 * Note that mapping must be decided above, before decrementing
-	 * mapcount (which luckily provides a barrier): once page is unmapped,
-	 * it could be truncated and page->mapping reset to NULL at any moment.
-	 * Note also that we are relying on page_mapping(page) to set mapping
-	 * to &swapper_space when PageSwapCache(page).
-	 */
-	if (mapping && !mapping_cap_account_dirty(mapping) &&
-	    page_test_and_clear_dirty(page_to_pfn(page), 1))
-		set_page_dirty(page);
-	/*
 	 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
 	 * and not charged by memcg for now.
 	 */
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index df08250..4fe76ff 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -831,8 +831,11 @@
 {
 	int i;
 
+	if (cpumask_empty(&iucv_irq_cpumask))
+		return NOTIFY_DONE;
+
 	get_online_cpus();
-	on_each_cpu(iucv_block_cpu, NULL, 1);
+	on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1);
 	preempt_disable();
 	for (i = 0; i < iucv_max_pathid; i++) {
 		if (iucv_path_table[i])