Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IPoIB: Fix world-writable child interface control sysfs attributes
  IB/qib: Clean up properly if qib_init() fails
  IB/qib: Completion queue callback needs to be single threaded
  IB/qib: Update 7322 serdes tables
  IB/qib: Clear 6120 hardware error register
  IB/qib: Clear eager buffer memory for each new process
  IB/qib: Mask hardware error during link reset
  IB/qib: Don't mark VL15 bufs as WC to avoid a rare 7322 chip problem
  RDMA/cxgb4: Derive smac_idx from port viid
  RDMA/cxgb4: Avoid false GTS CIDX_INC overflows
  RDMA/cxgb4: Don't call abort_connection() for active connect failures
  RDMA/cxgb4: Use the DMA state API instead of the pci equivalents
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 30ce0a8..855ee44 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -969,7 +969,8 @@
 		goto err;
 	goto out;
 err:
-	abort_connection(ep, skb, GFP_KERNEL);
+	state_set(&ep->com, ABORTING);
+	send_abort(ep, skb, GFP_KERNEL);
 out:
 	connect_reply_upcall(ep, err);
 	return;
@@ -1372,7 +1373,7 @@
 				    pdev, 0);
 		mtu = pdev->mtu;
 		tx_chan = cxgb4_port_chan(pdev);
-		smac_idx = tx_chan << 1;
+		smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
 		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
 		txq_idx = cxgb4_port_idx(pdev) * step;
 		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
@@ -1383,7 +1384,7 @@
 					dst->neighbour->dev, 0);
 		mtu = dst_mtu(dst);
 		tx_chan = cxgb4_port_chan(dst->neighbour->dev);
-		smac_idx = tx_chan << 1;
+		smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
 		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
 		txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
 		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
@@ -1950,7 +1951,7 @@
 					pdev, 0);
 		ep->mtu = pdev->mtu;
 		ep->tx_chan = cxgb4_port_chan(pdev);
-		ep->smac_idx = ep->tx_chan << 1;
+		ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
 		step = ep->com.dev->rdev.lldi.ntxq /
 		       ep->com.dev->rdev.lldi.nchan;
 		ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -1965,7 +1966,8 @@
 					ep->dst->neighbour->dev, 0);
 		ep->mtu = dst_mtu(ep->dst);
 		ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
-		ep->smac_idx = ep->tx_chan << 1;
+		ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) &
+				0x7F) << 1;
 		step = ep->com.dev->rdev.lldi.ntxq /
 		       ep->com.dev->rdev.lldi.nchan;
 		ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 2447f52..fac5c6e 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -77,7 +77,7 @@
 	kfree(cq->sw_queue);
 	dma_free_coherent(&(rdev->lldi.pdev->dev),
 			  cq->memsize, cq->queue,
-			  pci_unmap_addr(cq, mapping));
+			  dma_unmap_addr(cq, mapping));
 	c4iw_put_cqid(rdev, cq->cqid, uctx);
 	return ret;
 }
@@ -112,7 +112,7 @@
 		ret = -ENOMEM;
 		goto err3;
 	}
-	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
 	memset(cq->queue, 0, cq->memsize);
 
 	/* build fw_ri_res_wr */
@@ -179,7 +179,7 @@
 	return 0;
 err4:
 	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
-			  pci_unmap_addr(cq, mapping));
+			  dma_unmap_addr(cq, mapping));
 err3:
 	kfree(cq->sw_queue);
 err2:
@@ -764,7 +764,7 @@
 	struct c4iw_create_cq_resp uresp;
 	struct c4iw_ucontext *ucontext = NULL;
 	int ret;
-	size_t memsize;
+	size_t memsize, hwentries;
 	struct c4iw_mm_entry *mm, *mm2;
 
 	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
@@ -788,14 +788,29 @@
 	 * entries must be multiple of 16 for HW.
 	 */
 	entries = roundup(entries, 16);
-	memsize = entries * sizeof *chp->cq.queue;
+
+	/*
+	 * Make actual HW queue 2x to avoid cdix_inc overflows.
+	 */
+	hwentries = entries * 2;
+
+	/*
+	 * Make HW queue at least 64 entries so GTS updates aren't too
+	 * frequent.
+	 */
+	if (hwentries < 64)
+		hwentries = 64;
+
+	memsize = hwentries * sizeof *chp->cq.queue;
 
 	/*
 	 * memsize must be a multiple of the page size if its a user cq.
 	 */
-	if (ucontext)
+	if (ucontext) {
 		memsize = roundup(memsize, PAGE_SIZE);
-	chp->cq.size = entries;
+		hwentries = memsize / sizeof *chp->cq.queue;
+	}
+	chp->cq.size = hwentries;
 	chp->cq.memsize = memsize;
 
 	ret = create_cq(&rhp->rdev, &chp->cq,
@@ -805,7 +820,7 @@
 
 	chp->rhp = rhp;
 	chp->cq.size--;				/* status page */
-	chp->ibcq.cqe = chp->cq.size - 1;
+	chp->ibcq.cqe = entries - 2;
 	spin_lock_init(&chp->lock);
 	atomic_set(&chp->refcnt, 1);
 	init_waitqueue_head(&chp->wait);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 277ab58..d33e1a6 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -261,7 +261,7 @@
 
 struct c4iw_fr_page_list {
 	struct ib_fast_reg_page_list ibpl;
-	DECLARE_PCI_UNMAP_ADDR(mapping);
+	DEFINE_DMA_UNMAP_ADDR(mapping);
 	dma_addr_t dma_addr;
 	struct c4iw_dev *dev;
 	int size;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 7f94da1..82b5703 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -764,7 +764,7 @@
 	if (!c4pl)
 		return ERR_PTR(-ENOMEM);
 
-	pci_unmap_addr_set(c4pl, mapping, dma_addr);
+	dma_unmap_addr_set(c4pl, mapping, dma_addr);
 	c4pl->dma_addr = dma_addr;
 	c4pl->dev = dev;
 	c4pl->size = size;
@@ -779,7 +779,7 @@
 	struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
 
 	dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
-			  c4pl, pci_unmap_addr(c4pl, mapping));
+			  c4pl, dma_unmap_addr(c4pl, mapping));
 }
 
 int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 0c28ed1..7065cb3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -40,10 +40,10 @@
 	 */
 	dma_free_coherent(&(rdev->lldi.pdev->dev),
 			  wq->rq.memsize, wq->rq.queue,
-			  pci_unmap_addr(&wq->rq, mapping));
+			  dma_unmap_addr(&wq->rq, mapping));
 	dma_free_coherent(&(rdev->lldi.pdev->dev),
 			  wq->sq.memsize, wq->sq.queue,
-			  pci_unmap_addr(&wq->sq, mapping));
+			  dma_unmap_addr(&wq->sq, mapping));
 	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
 	kfree(wq->rq.sw_rq);
 	kfree(wq->sq.sw_sq);
@@ -99,7 +99,7 @@
 	if (!wq->sq.queue)
 		goto err5;
 	memset(wq->sq.queue, 0, wq->sq.memsize);
-	pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
+	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
 
 	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
 					  wq->rq.memsize, &(wq->rq.dma_addr),
@@ -112,7 +112,7 @@
 		wq->rq.queue,
 		(unsigned long long)virt_to_phys(wq->rq.queue));
 	memset(wq->rq.queue, 0, wq->rq.memsize);
-	pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
+	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
 
 	wq->db = rdev->lldi.db_reg;
 	wq->gts = rdev->lldi.gts_reg;
@@ -217,11 +217,11 @@
 err7:
 	dma_free_coherent(&(rdev->lldi.pdev->dev),
 			  wq->rq.memsize, wq->rq.queue,
-			  pci_unmap_addr(&wq->rq, mapping));
+			  dma_unmap_addr(&wq->rq, mapping));
 err6:
 	dma_free_coherent(&(rdev->lldi.pdev->dev),
 			  wq->sq.memsize, wq->sq.queue,
-			  pci_unmap_addr(&wq->sq, mapping));
+			  dma_unmap_addr(&wq->sq, mapping));
 err5:
 	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
 err4:
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 1057cb9..9cf8d85 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -279,7 +279,7 @@
 struct t4_sq {
 	union t4_wr *queue;
 	dma_addr_t dma_addr;
-	DECLARE_PCI_UNMAP_ADDR(mapping);
+	DEFINE_DMA_UNMAP_ADDR(mapping);
 	struct t4_swsqe *sw_sq;
 	struct t4_swsqe *oldest_read;
 	u64 udb;
@@ -298,7 +298,7 @@
 struct t4_rq {
 	union  t4_recv_wr *queue;
 	dma_addr_t dma_addr;
-	DECLARE_PCI_UNMAP_ADDR(mapping);
+	DEFINE_DMA_UNMAP_ADDR(mapping);
 	struct t4_swrqe *sw_rq;
 	u64 udb;
 	size_t memsize;
@@ -429,7 +429,7 @@
 struct t4_cq {
 	struct t4_cqe *queue;
 	dma_addr_t dma_addr;
-	DECLARE_PCI_UNMAP_ADDR(mapping);
+	DEFINE_DMA_UNMAP_ADDR(mapping);
 	struct t4_cqe *sw_queue;
 	void __iomem *gts;
 	struct c4iw_rdev *rdev;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 32d9208..3593983 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -686,6 +686,7 @@
 	void __iomem *piobase;
 	/* mem-mapped pointer to base of user chip regs (if using WC PAT) */
 	u64 __iomem *userbase;
+	void __iomem *piovl15base; /* base of VL15 buffers, if not WC */
 	/*
 	 * points to area where PIOavail registers will be DMA'ed.
 	 * Has to be on a page of it's own, because the page will be
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
index a97440b..32dc81f 100644
--- a/drivers/infiniband/hw/qib/qib_7322_regs.h
+++ b/drivers/infiniband/hw/qib/qib_7322_regs.h
@@ -742,15 +742,15 @@
 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE
-#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE
-#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_LSB 0xE
+#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_MSB 0xE
+#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_RMASK 0x1
 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC
-#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC
-#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_statusValidNoEopMask_LSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_MSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_RMASK 0x1
 #define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
 #define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
 #define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
@@ -796,15 +796,15 @@
 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE
-#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE
-#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_LSB 0xE
+#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_MSB 0xE
+#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_RMASK 0x1
 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC
-#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC
-#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_statusValidNoEop_LSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_MSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_RMASK 0x1
 #define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
 #define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
 #define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
@@ -850,15 +850,15 @@
 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE
-#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE
-#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_LSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_MSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_RMASK 0x1
 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC
-#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC
-#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_statusValidNoEopClear_LSB 0xC
+#define QIB_7322_HwErrClear_statusValidNoEopClear_MSB 0xC
+#define QIB_7322_HwErrClear_statusValidNoEopClear_RMASK 0x1
 #define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
 #define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
 #define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
@@ -880,15 +880,15 @@
 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE
-#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE
-#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_LSB 0xE
+#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_MSB 0xE
+#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_RMASK 0x1
 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC
-#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC
-#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_LSB 0xC
+#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_MSB 0xC
+#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_RMASK 0x1
 
 #define QIB_7322_EXTStatus_OFFS 0xC0
 #define QIB_7322_EXTStatus_DEF 0x000000000000X000
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index ca98dd5..05dcf0d 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -233,6 +233,7 @@
 	u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
 	u32 __iomem *map = NULL;
 	u32 cnt = 0;
+	u32 tot4k, offs4k;
 
 	/* First, simplest case, offset is within the first map. */
 	kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
@@ -250,7 +251,8 @@
 	if (dd->userbase) {
 		/* If user regs mapped, they are after send, so set limit. */
 		u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
-		snd_lim = dd->uregbase;
+		if (!dd->piovl15base)
+			snd_lim = dd->uregbase;
 		krb32 = (u32 __iomem *)dd->userbase;
 		if (offset >= dd->uregbase && offset < ulim) {
 			map = krb32 + (offset - dd->uregbase) / sizeof(u32);
@@ -277,14 +279,14 @@
 	/* If 4k buffers exist, account for them by bumping
 	 * appropriate limit.
 	 */
+	tot4k = dd->piobcnt4k * dd->align4k;
+	offs4k = dd->piobufbase >> 32;
 	if (dd->piobcnt4k) {
-		u32 tot4k = dd->piobcnt4k * dd->align4k;
-		u32 offs4k = dd->piobufbase >> 32;
 		if (snd_bottom > offs4k)
 			snd_bottom = offs4k;
 		else {
 			/* 4k above 2k. Bump snd_lim, if needed*/
-			if (!dd->userbase)
+			if (!dd->userbase || dd->piovl15base)
 				snd_lim = offs4k + tot4k;
 		}
 	}
@@ -298,6 +300,15 @@
 		cnt = snd_lim - offset;
 	}
 
+	if (!map && offs4k && dd->piovl15base) {
+		snd_lim = offs4k + tot4k + 2 * dd->align4k;
+		if (offset >= (offs4k + tot4k) && offset < snd_lim) {
+			map = (u32 __iomem *)dd->piovl15base +
+				((offset - (offs4k + tot4k)) / sizeof(u32));
+			cnt = snd_lim - offset;
+		}
+	}
+
 mapped:
 	if (cntp)
 		*cntp = cnt;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 1eadadc..a5e29db 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1355,8 +1355,7 @@
 	hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
 	if (hwstat) {
 		/* should just have PLL, clear all set, in an case */
-		if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED)
-			qib_write_kreg(dd, kr_hwerrclear, hwstat);
+		qib_write_kreg(dd, kr_hwerrclear, hwstat);
 		qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
 	}
 
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 503992d..5eedf83 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -543,7 +543,7 @@
 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
 
 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
-#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */
+#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 
 #define H1_FORCE_VAL 8
@@ -1100,9 +1100,9 @@
 	HWE_AUTO_P(SDmaMemReadErr, 1),
 	HWE_AUTO_P(SDmaMemReadErr, 0),
 	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
+	HWE_AUTO_P(IBCBusToSPCParityErr, 1),
 	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
-	HWE_AUTO_P(statusValidNoEop, 1),
-	HWE_AUTO_P(statusValidNoEop, 0),
+	HWE_AUTO(statusValidNoEop),
 	HWE_AUTO(LATriggered),
 	{ .mask = 0 }
 };
@@ -4763,6 +4763,8 @@
 		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
 
 	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
+	qib_write_kreg(dd, kr_hwerrmask,
+		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
 	qib_write_kreg_port(ppd, krp_ibcctrl_a,
 			    ppd->cpspec->ibcctrl_a &
 			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
@@ -4772,6 +4774,9 @@
 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
 	qib_write_kreg(dd, kr_scratch, 0ULL);
+	qib_write_kreg(dd, kr_hwerrclear,
+		       SYM_MASK(HwErrClear, statusValidNoEopClear));
+	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
 }
 
 /*
@@ -5624,6 +5629,8 @@
 			if (ppd->port != port || !ppd->link_speed_supported)
 				continue;
 			ppd->cpspec->no_eep = val;
+			if (seth1)
+				ppd->cpspec->h1_val = h1;
 			/* now change the IBC and serdes, overriding generic */
 			init_txdds_table(ppd, 1);
 			any++;
@@ -6064,9 +6071,9 @@
 		 * the "cable info" setup here.  Can be overridden
 		 * in adapter-specific routines.
 		 */
-		if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
-			if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
-				qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
+		if (!(dd->flags & QIB_HAS_QSFP)) {
+			if (!IS_QMH(dd) && !IS_QME(dd))
+				qib_devinfo(dd->pcidev, "IB%u:%u: "
 					    "Unknown mezzanine card type\n",
 					    dd->unit, ppd->port);
 			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
@@ -6119,9 +6126,25 @@
 	qib_set_ctxtcnt(dd);
 
 	if (qib_wc_pat) {
-		ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k);
+		resource_size_t vl15off;
+		/*
+		 * We do not set WC on the VL15 buffers to avoid
+		 * a rare problem with unaligned writes from
+		 * interrupt-flushed store buffers, so we need
+		 * to map those separately here.  We can't solve
+		 * this for the rarely used mtrr case.
+		 */
+		ret = init_chip_wc_pat(dd, 0);
 		if (ret)
 			goto bail;
+
+		/* vl15 buffers start just after the 4k buffers */
+		vl15off = dd->physaddr + (dd->piobufbase >> 32) +
+			dd->piobcnt4k * dd->align4k;
+		dd->piovl15base	= ioremap_nocache(vl15off,
+						  NUM_VL15_BUFS * dd->align4k);
+		if (!dd->piovl15base)
+			goto bail;
 	}
 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
 
@@ -6932,6 +6955,8 @@
 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
+	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */
+	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */
 };
 
 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
@@ -6947,6 +6972,8 @@
 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
+	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */
+	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */
 };
 
 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
@@ -6962,6 +6989,8 @@
 	{  0, 1, 12,  6 },	/* QME7342 backplane setting */
 	{  0, 1, 12,  7 },	/* QME7342 backplane setting */
 	{  0, 1, 12,  8 },	/* QME7342 backplane setting */
+	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */
+	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
 };
 
 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 9b40f34..a873dd5 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1059,7 +1059,7 @@
 		goto bail_dev;
 	}
 
-	qib_cq_wq = create_workqueue("qib_cq");
+	qib_cq_wq = create_singlethread_workqueue("qib_cq");
 	if (!qib_cq_wq) {
 		ret = -ENOMEM;
 		goto bail_wq;
@@ -1289,8 +1289,18 @@
 
 	if (qib_mini_init || initfail || ret) {
 		qib_stop_timers(dd);
+		flush_scheduled_work();
 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
 			dd->f_quiet_serdes(dd->pport + pidx);
+		if (qib_mini_init)
+			goto bail;
+		if (!j) {
+			(void) qibfs_remove(dd);
+			qib_device_remove(dd);
+		}
+		if (!ret)
+			qib_unregister_ib_device(dd);
+		qib_postinit_cleanup(dd);
 		if (initfail)
 			ret = initfail;
 		goto bail;
@@ -1472,6 +1482,9 @@
 		dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
 		unsigned i;
 
+		/* clear for security and sanity on each use */
+		memset(rcd->rcvegrbuf[chunk], 0, size);
+
 		for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
 			dd->f_put_tid(dd, e + egroff +
 					  (u64 __iomem *)
@@ -1499,6 +1512,12 @@
 	return -ENOMEM;
 }
 
+/*
+ * Note: Changes to this routine should be mirrored
+ * for the diagnostics routine qib_remap_ioaddr32().
+ * There is also related code for VL15 buffers in qib_init_7322_variables().
+ * The teardown code that unmaps is in qib_pcie_ddcleanup()
+ */
 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
 {
 	u64 __iomem *qib_kregbase = NULL;
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index c926bf4..7fa6e55 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -179,6 +179,8 @@
 		iounmap(dd->piobase);
 	if (dd->userbase)
 		iounmap(dd->userbase);
+	if (dd->piovl15base)
+		iounmap(dd->piovl15base);
 
 	pci_disable_device(dd->pcidev);
 	pci_release_regions(dd->pcidev);
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index f7eb1dd..af30232 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -340,9 +340,13 @@
 		if (i < dd->piobcnt2k)
 			buf = (u32 __iomem *)(dd->pio2kbase +
 				i * dd->palign);
-		else
+		else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
 			buf = (u32 __iomem *)(dd->pio4kbase +
 				(i - dd->piobcnt2k) * dd->align4k);
+		else
+			buf = (u32 __iomem *)(dd->piovl15base +
+				(i - (dd->piobcnt2k + dd->piobcnt4k)) *
+				dd->align4k);
 		if (pbufnum)
 			*pbufnum = i;
 		dd->upd_pio_shadow = 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index df3eb8c..b4b2257 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1163,7 +1163,7 @@
 
 	return ret ? ret : count;
 }
-static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
+static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
 
 static ssize_t delete_child(struct device *dev,
 			    struct device_attribute *attr,
@@ -1183,7 +1183,7 @@
 	return ret ? ret : count;
 
 }
-static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
+static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
 
 int ipoib_add_pkey_attr(struct net_device *dev)
 {