Merge branches 'cxgb3', 'cxgb4', 'ehca', 'ipath', 'misc', 'nes', 'qib' and 'srp' into for-next
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index ad63b79..64e0903 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -2409,10 +2409,12 @@
 		msg_response = CM_MSG_RESPONSE_REP;
 		break;
 	case IB_CM_ESTABLISHED:
-		cm_state = cm_id->state;
-		lap_state = IB_CM_MRA_LAP_SENT;
-		msg_response = CM_MSG_RESPONSE_OTHER;
-		break;
+		if (cm_id->lap_state == IB_CM_LAP_RCVD) {
+			cm_state = cm_id->state;
+			lap_state = IB_CM_MRA_LAP_SENT;
+			msg_response = CM_MSG_RESPONSE_OTHER;
+			break;
+		}
 	default:
 		ret = -EINVAL;
 		goto error1;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 6babb72..5fa8569 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1085,7 +1085,6 @@
 static void ib_umad_kill_port(struct ib_umad_port *port)
 {
 	struct ib_umad_file *file;
-	int already_dead;
 	int id;
 
 	dev_set_drvdata(port->dev,    NULL);
@@ -1103,7 +1102,6 @@
 
 	list_for_each_entry(file, &port->file_list, port_list) {
 		mutex_lock(&file->mutex);
-		already_dead = file->agents_dead;
 		file->agents_dead = 1;
 		mutex_unlock(&file->mutex);
 
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index a7da9be4..e0fa222 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -310,8 +310,8 @@
 
 static const struct {
 	int			valid;
-	enum ib_qp_attr_mask	req_param[IB_QPT_RAW_ETY + 1];
-	enum ib_qp_attr_mask	opt_param[IB_QPT_RAW_ETY + 1];
+	enum ib_qp_attr_mask	req_param[IB_QPT_RAW_ETHERTYPE + 1];
+	enum ib_qp_attr_mask	opt_param[IB_QPT_RAW_ETHERTYPE + 1];
 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
 	[IB_QPS_RESET] = {
 		[IB_QPS_RESET] = { .valid = 1 },
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index ebfb117..3cdb535 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -137,7 +137,7 @@
 	put_ep(&ep->com);
 }
 
-int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
+static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
 {
 	int	error = 0;
 	struct cxio_rdev *rdev;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index ae47bfd..6adc13fc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -371,7 +371,7 @@
 	}
 	num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
 		  qhp->wq.sq_size_log2);
-	if (num_wrs <= 0) {
+	if (num_wrs == 0) {
 		spin_unlock_irqrestore(&qhp->lock, flag);
 		err = -ENOMEM;
 		goto out;
@@ -554,7 +554,7 @@
 	}
 	num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
 			    qhp->wq.sq_size_log2);
-	if ((num_wrs) <= 0) {
+	if (num_wrs == 0) {
 		spin_unlock_irqrestore(&qhp->lock, flag);
 		return -ENOMEM;
 	}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 855ee44..6c40779 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -61,6 +61,10 @@
 	NULL,
 };
 
+static int dack_mode;
+module_param(dack_mode, int, 0644);
+MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
+
 int c4iw_max_read_depth = 8;
 module_param(c4iw_max_read_depth, int, 0644);
 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
@@ -469,11 +473,12 @@
 		       __func__);
 		return -ENOMEM;
 	}
-	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
 
 	cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
 	wscale = compute_wscale(rcv_win);
 	opt0 = KEEP_ALIVE(1) |
+	       DELACK(1) |
 	       WND_SCALE(wscale) |
 	       MSS_IDX(mtu_idx) |
 	       L2T_IDX(ep->l2t->idx) |
@@ -780,11 +785,11 @@
 		event.private_data_len = ep->plen;
 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
 	}
-	if (ep->com.cm_id) {
-		PDBG("%s ep %p tid %u status %d\n", __func__, ep,
-		     ep->hwtid, status);
-		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
-	}
+
+	PDBG("%s ep %p tid %u status %d\n", __func__, ep,
+	     ep->hwtid, status);
+	ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+
 	if (status < 0) {
 		ep->com.cm_id->rem_ref(ep->com.cm_id);
 		ep->com.cm_id = NULL;
@@ -845,8 +850,10 @@
 	INIT_TP_WR(req, ep->hwtid);
 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
 						    ep->hwtid));
-	req->credit_dack = cpu_to_be32(credits);
-	set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
+	req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
+				       F_RX_DACK_CHANGE |
+				       V_RX_DACK_MODE(dack_mode));
+	set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
 	c4iw_ofld_send(&ep->com.dev->rdev, skb);
 	return credits;
 }
@@ -1264,6 +1271,7 @@
 	cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
 	wscale = compute_wscale(rcv_win);
 	opt0 = KEEP_ALIVE(1) |
+	       DELACK(1) |
 	       WND_SCALE(wscale) |
 	       MSS_IDX(mtu_idx) |
 	       L2T_IDX(ep->l2t->idx) |
@@ -1287,7 +1295,7 @@
 				      ep->hwtid));
 	rpl->opt0 = cpu_to_be64(opt0);
 	rpl->opt2 = cpu_to_be32(opt2);
-	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
 	c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 
 	return;
@@ -1344,7 +1352,7 @@
 	u16 rss_qid;
 	u32 mtu;
 	int step;
-	int txq_idx;
+	int txq_idx, ctrlq_idx;
 
 	parent_ep = lookup_stid(t, stid);
 	PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1376,6 +1384,7 @@
 		smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
 		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
 		txq_idx = cxgb4_port_idx(pdev) * step;
+		ctrlq_idx = cxgb4_port_idx(pdev);
 		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
 		rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
 		dev_put(pdev);
@@ -1387,6 +1396,7 @@
 		smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
 		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
 		txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
+		ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
 		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
 		rss_qid = dev->rdev.lldi.rxq_ids[
 			  cxgb4_port_idx(dst->neighbour->dev) * step];
@@ -1426,6 +1436,7 @@
 	child_ep->rss_qid = rss_qid;
 	child_ep->mtu = mtu;
 	child_ep->txq_idx = txq_idx;
+	child_ep->ctrlq_idx = ctrlq_idx;
 
 	PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
 	     tx_chan, smac_idx, rss_qid);
@@ -1473,8 +1484,6 @@
 	int closing = 0;
 	struct tid_info *t = dev->rdev.lldi.tids;
 	unsigned int tid = GET_TID(hdr);
-	int start_timer = 0;
-	int stop_timer = 0;
 
 	ep = lookup_tid(t, tid);
 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1511,7 +1520,7 @@
 		wake_up(&ep->com.waitq);
 		break;
 	case FPDU_MODE:
-		start_timer = 1;
+		start_ep_timer(ep);
 		__state_set(&ep->com, CLOSING);
 		closing = 1;
 		peer_close_upcall(ep);
@@ -1524,7 +1533,7 @@
 		disconnect = 0;
 		break;
 	case MORIBUND:
-		stop_timer = 1;
+		stop_ep_timer(ep);
 		if (ep->com.cm_id && ep->com.qp) {
 			attrs.next_state = C4IW_QP_STATE_IDLE;
 			c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1547,10 +1556,6 @@
 		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
 			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
 	}
-	if (start_timer)
-		start_ep_timer(ep);
-	if (stop_timer)
-		stop_ep_timer(ep);
 	if (disconnect)
 		c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
 	if (release)
@@ -1579,7 +1584,6 @@
 	unsigned long flags;
 	struct tid_info *t = dev->rdev.lldi.tids;
 	unsigned int tid = GET_TID(req);
-	int stop_timer = 0;
 
 	ep = lookup_tid(t, tid);
 	if (is_neg_adv_abort(req->status)) {
@@ -1594,10 +1598,10 @@
 	case CONNECTING:
 		break;
 	case MPA_REQ_WAIT:
-		stop_timer = 1;
+		stop_ep_timer(ep);
 		break;
 	case MPA_REQ_SENT:
-		stop_timer = 1;
+		stop_ep_timer(ep);
 		connect_reply_upcall(ep, -ECONNRESET);
 		break;
 	case MPA_REP_SENT:
@@ -1621,7 +1625,7 @@
 		break;
 	case MORIBUND:
 	case CLOSING:
-		stop_timer = 1;
+		stop_ep_timer(ep);
 		/*FALLTHROUGH*/
 	case FPDU_MODE:
 		if (ep->com.cm_id && ep->com.qp) {
@@ -1667,8 +1671,6 @@
 	rpl->cmd = CPL_ABORT_NO_RST;
 	c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
 out:
-	if (stop_timer)
-		stop_ep_timer(ep);
 	if (release)
 		release_ep_resources(ep);
 	return 0;
@@ -1683,7 +1685,6 @@
 	int release = 0;
 	struct tid_info *t = dev->rdev.lldi.tids;
 	unsigned int tid = GET_TID(rpl);
-	int stop_timer = 0;
 
 	ep = lookup_tid(t, tid);
 
@@ -1697,7 +1698,7 @@
 		__state_set(&ep->com, MORIBUND);
 		break;
 	case MORIBUND:
-		stop_timer = 1;
+		stop_ep_timer(ep);
 		if ((ep->com.cm_id) && (ep->com.qp)) {
 			attrs.next_state = C4IW_QP_STATE_IDLE;
 			c4iw_modify_qp(ep->com.qp->rhp,
@@ -1717,8 +1718,6 @@
 		break;
 	}
 	spin_unlock_irqrestore(&ep->com.lock, flags);
-	if (stop_timer)
-		stop_ep_timer(ep);
 	if (release)
 		release_ep_resources(ep);
 	return 0;
@@ -1957,6 +1956,7 @@
 		ep->txq_idx = cxgb4_port_idx(pdev) * step;
 		step = ep->com.dev->rdev.lldi.nrxq /
 		       ep->com.dev->rdev.lldi.nchan;
+		ep->ctrlq_idx = cxgb4_port_idx(pdev);
 		ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
 			      cxgb4_port_idx(pdev) * step];
 		dev_put(pdev);
@@ -1971,6 +1971,7 @@
 		step = ep->com.dev->rdev.lldi.ntxq /
 		       ep->com.dev->rdev.lldi.nchan;
 		ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
+		ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
 		step = ep->com.dev->rdev.lldi.nrxq /
 		       ep->com.dev->rdev.lldi.nchan;
 		ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
@@ -2049,8 +2050,15 @@
 		goto fail3;
 
 	/* wait for pass_open_rpl */
-	wait_event(ep->com.waitq, ep->com.rpl_done);
-	err = ep->com.rpl_err;
+	wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
+	if (ep->com.rpl_done)
+		err = ep->com.rpl_err;
+	else {
+		printk(KERN_ERR MOD "Device %s not responding!\n",
+		       pci_name(ep->com.dev->rdev.lldi.pdev));
+		ep->com.dev->rdev.flags = T4_FATAL_ERROR;
+		err = -EIO;
+	}
 	if (!err) {
 		cm_id->provider_data = ep;
 		goto out;
@@ -2079,10 +2087,17 @@
 	err = listen_stop(ep);
 	if (err)
 		goto done;
-	wait_event(ep->com.waitq, ep->com.rpl_done);
+	wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
+	if (ep->com.rpl_done)
+		err = ep->com.rpl_err;
+	else {
+		printk(KERN_ERR MOD "Device %s not responding!\n",
+		       pci_name(ep->com.dev->rdev.lldi.pdev));
+		ep->com.dev->rdev.flags = T4_FATAL_ERROR;
+		err = -EIO;
+	}
 	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
 done:
-	err = ep->com.rpl_err;
 	cm_id->rem_ref(cm_id);
 	c4iw_put_ep(&ep->com);
 	return err;
@@ -2095,8 +2110,6 @@
 	int close = 0;
 	int fatal = 0;
 	struct c4iw_rdev *rdev;
-	int start_timer = 0;
-	int stop_timer = 0;
 
 	spin_lock_irqsave(&ep->com.lock, flags);
 
@@ -2120,7 +2133,7 @@
 			ep->com.state = ABORTING;
 		else {
 			ep->com.state = CLOSING;
-			start_timer = 1;
+			start_ep_timer(ep);
 		}
 		set_bit(CLOSE_SENT, &ep->com.flags);
 		break;
@@ -2128,7 +2141,7 @@
 		if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
 			close = 1;
 			if (abrupt) {
-				stop_timer = 1;
+				stop_ep_timer(ep);
 				ep->com.state = ABORTING;
 			} else
 				ep->com.state = MORIBUND;
@@ -2146,10 +2159,6 @@
 	}
 
 	spin_unlock_irqrestore(&ep->com.lock, flags);
-	if (start_timer)
-		start_ep_timer(ep);
-	if (stop_timer)
-		stop_ep_timer(ep);
 	if (close) {
 		if (abrupt)
 			ret = abort_connection(ep, NULL, gfp);
@@ -2244,7 +2253,7 @@
 {
 	struct sk_buff *skb = NULL;
 	struct c4iw_dev *dev;
-	struct cpl_act_establish *rpl = cplhdr(skb);
+	struct cpl_act_establish *rpl;
 	unsigned int opcode;
 	int ret;
 
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index fac5c6e..b3daf39 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -43,7 +43,7 @@
 	int ret;
 
 	wr_len = sizeof *res_wr + sizeof *res;
-	skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+	skb = alloc_skb(wr_len, GFP_KERNEL);
 	if (!skb)
 		return -ENOMEM;
 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
@@ -118,7 +118,7 @@
 	/* build fw_ri_res_wr */
 	wr_len = sizeof *res_wr + sizeof *res;
 
-	skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+	skb = alloc_skb(wr_len, GFP_KERNEL);
 	if (!skb) {
 		ret = -ENOMEM;
 		goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index d33e1a6..ed459b8 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -619,6 +619,7 @@
 	u16 plen;
 	u16 rss_qid;
 	u16 txq_idx;
+	u16 ctrlq_idx;
 	u8 tos;
 };
 
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 82b5703..269373a 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -59,7 +59,7 @@
 		wr_len = roundup(sizeof *req + sizeof *sc +
 				 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
 
-		skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+		skb = alloc_skb(wr_len, GFP_KERNEL);
 		if (!skb)
 			return -ENOMEM;
 		set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 7065cb3..c9aaf24 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -130,7 +130,7 @@
 	/* build fw_ri_res_wr */
 	wr_len = sizeof *res_wr + 2 * sizeof *res;
 
-	skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+	skb = alloc_skb(wr_len, GFP_KERNEL);
 	if (!skb) {
 		ret = -ENOMEM;
 		goto err7;
@@ -162,7 +162,7 @@
 	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
 		V_FW_RI_RES_WR_DCAEN(0) |
 		V_FW_RI_RES_WR_DCACPU(0) |
-		V_FW_RI_RES_WR_FBMIN(3) |
+		V_FW_RI_RES_WR_FBMIN(2) |
 		V_FW_RI_RES_WR_FBMAX(3) |
 		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
 		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
@@ -185,7 +185,7 @@
 	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
 		V_FW_RI_RES_WR_DCAEN(0) |
 		V_FW_RI_RES_WR_DCACPU(0) |
-		V_FW_RI_RES_WR_FBMIN(3) |
+		V_FW_RI_RES_WR_FBMIN(2) |
 		V_FW_RI_RES_WR_FBMAX(3) |
 		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
 		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
@@ -235,12 +235,78 @@
 	return -ENOMEM;
 }
 
-static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
+		      struct ib_send_wr *wr, int max, u32 *plenp)
+{
+	u8 *dstp, *srcp;
+	u32 plen = 0;
+	int i;
+	int rem, len;
+
+	dstp = (u8 *)immdp->data;
+	for (i = 0; i < wr->num_sge; i++) {
+		if ((plen + wr->sg_list[i].length) > max)
+			return -EMSGSIZE;
+		srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
+		plen += wr->sg_list[i].length;
+		rem = wr->sg_list[i].length;
+		while (rem) {
+			if (dstp == (u8 *)&sq->queue[sq->size])
+				dstp = (u8 *)sq->queue;
+			if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
+				len = rem;
+			else
+				len = (u8 *)&sq->queue[sq->size] - dstp;
+			memcpy(dstp, srcp, len);
+			dstp += len;
+			srcp += len;
+			rem -= len;
+		}
+	}
+	immdp->op = FW_RI_DATA_IMMD;
+	immdp->r1 = 0;
+	immdp->r2 = 0;
+	immdp->immdlen = cpu_to_be32(plen);
+	*plenp = plen;
+	return 0;
+}
+
+static int build_isgl(__be64 *queue_start, __be64 *queue_end,
+		      struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
+		      int num_sge, u32 *plenp)
+
 {
 	int i;
+	u32 plen = 0;
+	__be64 *flitp = (__be64 *)isglp->sge;
+
+	for (i = 0; i < num_sge; i++) {
+		if ((plen + sg_list[i].length) < plen)
+			return -EMSGSIZE;
+		plen += sg_list[i].length;
+		*flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
+				     sg_list[i].length);
+		if (++flitp == queue_end)
+			flitp = queue_start;
+		*flitp = cpu_to_be64(sg_list[i].addr);
+		if (++flitp == queue_end)
+			flitp = queue_start;
+	}
+	isglp->op = FW_RI_DATA_ISGL;
+	isglp->r1 = 0;
+	isglp->nsge = cpu_to_be16(num_sge);
+	isglp->r2 = 0;
+	if (plenp)
+		*plenp = plen;
+	return 0;
+}
+
+static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
+			   struct ib_send_wr *wr, u8 *len16)
+{
 	u32 plen;
 	int size;
-	u8 *datap;
+	int ret;
 
 	if (wr->num_sge > T4_MAX_SEND_SGE)
 		return -EINVAL;
@@ -267,43 +333,23 @@
 	default:
 		return -EINVAL;
 	}
+
 	plen = 0;
 	if (wr->num_sge) {
 		if (wr->send_flags & IB_SEND_INLINE) {
-			datap = (u8 *)wqe->send.u.immd_src[0].data;
-			for (i = 0; i < wr->num_sge; i++) {
-				if ((plen + wr->sg_list[i].length) >
-				    T4_MAX_SEND_INLINE) {
-					return -EMSGSIZE;
-				}
-				plen += wr->sg_list[i].length;
-				memcpy(datap,
-				     (void *)(unsigned long)wr->sg_list[i].addr,
-				     wr->sg_list[i].length);
-				datap += wr->sg_list[i].length;
-			}
-			wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
-			wqe->send.u.immd_src[0].r1 = 0;
-			wqe->send.u.immd_src[0].r2 = 0;
-			wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
+			ret = build_immd(sq, wqe->send.u.immd_src, wr,
+					 T4_MAX_SEND_INLINE, &plen);
+			if (ret)
+				return ret;
 			size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
 			       plen;
 		} else {
-			for (i = 0; i < wr->num_sge; i++) {
-				if ((plen + wr->sg_list[i].length) < plen)
-					return -EMSGSIZE;
-				plen += wr->sg_list[i].length;
-				wqe->send.u.isgl_src[0].sge[i].stag =
-					cpu_to_be32(wr->sg_list[i].lkey);
-				wqe->send.u.isgl_src[0].sge[i].len =
-					cpu_to_be32(wr->sg_list[i].length);
-				wqe->send.u.isgl_src[0].sge[i].to =
-					cpu_to_be64(wr->sg_list[i].addr);
-			}
-			wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
-			wqe->send.u.isgl_src[0].r1 = 0;
-			wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
-			wqe->send.u.isgl_src[0].r2 = 0;
+			ret = build_isgl((__be64 *)sq->queue,
+					 (__be64 *)&sq->queue[sq->size],
+					 wqe->send.u.isgl_src,
+					 wr->sg_list, wr->num_sge, &plen);
+			if (ret)
+				return ret;
 			size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
 			       wr->num_sge * sizeof(struct fw_ri_sge);
 		}
@@ -313,62 +359,40 @@
 		wqe->send.u.immd_src[0].r2 = 0;
 		wqe->send.u.immd_src[0].immdlen = 0;
 		size = sizeof wqe->send + sizeof(struct fw_ri_immd);
+		plen = 0;
 	}
 	*len16 = DIV_ROUND_UP(size, 16);
 	wqe->send.plen = cpu_to_be32(plen);
 	return 0;
 }
 
-static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
+			    struct ib_send_wr *wr, u8 *len16)
 {
-	int i;
 	u32 plen;
 	int size;
-	u8 *datap;
+	int ret;
 
-	if (wr->num_sge > T4_MAX_WRITE_SGE)
+	if (wr->num_sge > T4_MAX_SEND_SGE)
 		return -EINVAL;
 	wqe->write.r2 = 0;
 	wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
 	wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
-	plen = 0;
 	if (wr->num_sge) {
 		if (wr->send_flags & IB_SEND_INLINE) {
-			datap = (u8 *)wqe->write.u.immd_src[0].data;
-			for (i = 0; i < wr->num_sge; i++) {
-				if ((plen + wr->sg_list[i].length) >
-				    T4_MAX_WRITE_INLINE) {
-					return -EMSGSIZE;
-				}
-				plen += wr->sg_list[i].length;
-				memcpy(datap,
-				     (void *)(unsigned long)wr->sg_list[i].addr,
-				     wr->sg_list[i].length);
-				datap += wr->sg_list[i].length;
-			}
-			wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
-			wqe->write.u.immd_src[0].r1 = 0;
-			wqe->write.u.immd_src[0].r2 = 0;
-			wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
+			ret = build_immd(sq, wqe->write.u.immd_src, wr,
+					 T4_MAX_WRITE_INLINE, &plen);
+			if (ret)
+				return ret;
 			size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
 			       plen;
 		} else {
-			for (i = 0; i < wr->num_sge; i++) {
-				if ((plen + wr->sg_list[i].length) < plen)
-					return -EMSGSIZE;
-				plen += wr->sg_list[i].length;
-				wqe->write.u.isgl_src[0].sge[i].stag =
-					cpu_to_be32(wr->sg_list[i].lkey);
-				wqe->write.u.isgl_src[0].sge[i].len =
-					cpu_to_be32(wr->sg_list[i].length);
-				wqe->write.u.isgl_src[0].sge[i].to =
-					cpu_to_be64(wr->sg_list[i].addr);
-			}
-			wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
-			wqe->write.u.isgl_src[0].r1 = 0;
-			wqe->write.u.isgl_src[0].nsge =
-						       cpu_to_be16(wr->num_sge);
-			wqe->write.u.isgl_src[0].r2 = 0;
+			ret = build_isgl((__be64 *)sq->queue,
+					 (__be64 *)&sq->queue[sq->size],
+					 wqe->write.u.isgl_src,
+					 wr->sg_list, wr->num_sge, &plen);
+			if (ret)
+				return ret;
 			size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
 			       wr->num_sge * sizeof(struct fw_ri_sge);
 		}
@@ -378,6 +402,7 @@
 		wqe->write.u.immd_src[0].r2 = 0;
 		wqe->write.u.immd_src[0].immdlen = 0;
 		size = sizeof wqe->write + sizeof(struct fw_ri_immd);
+		plen = 0;
 	}
 	*len16 = DIV_ROUND_UP(size, 16);
 	wqe->write.plen = cpu_to_be32(plen);
@@ -416,29 +441,13 @@
 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
 			   struct ib_recv_wr *wr, u8 *len16)
 {
-	int i;
-	int plen = 0;
+	int ret;
 
-	for (i = 0; i < wr->num_sge; i++) {
-		if ((plen + wr->sg_list[i].length) < plen)
-			return -EMSGSIZE;
-		plen += wr->sg_list[i].length;
-		wqe->recv.isgl.sge[i].stag =
-			cpu_to_be32(wr->sg_list[i].lkey);
-		wqe->recv.isgl.sge[i].len =
-			cpu_to_be32(wr->sg_list[i].length);
-		wqe->recv.isgl.sge[i].to =
-			cpu_to_be64(wr->sg_list[i].addr);
-	}
-	for (; i < T4_MAX_RECV_SGE; i++) {
-		wqe->recv.isgl.sge[i].stag = 0;
-		wqe->recv.isgl.sge[i].len = 0;
-		wqe->recv.isgl.sge[i].to = 0;
-	}
-	wqe->recv.isgl.op = FW_RI_DATA_ISGL;
-	wqe->recv.isgl.r1 = 0;
-	wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
-	wqe->recv.isgl.r2 = 0;
+	ret = build_isgl((__be64 *)qhp->wq.rq.queue,
+			 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
+			 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
+	if (ret)
+		return ret;
 	*len16 = DIV_ROUND_UP(sizeof wqe->recv +
 			      wr->num_sge * sizeof(struct fw_ri_sge), 16);
 	return 0;
@@ -547,7 +556,9 @@
 			*bad_wr = wr;
 			break;
 		}
-		wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
+		wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
+		      qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
+
 		fw_flags = 0;
 		if (wr->send_flags & IB_SEND_SOLICITED)
 			fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
@@ -564,12 +575,12 @@
 				swsqe->opcode = FW_RI_SEND;
 			else
 				swsqe->opcode = FW_RI_SEND_WITH_INV;
-			err = build_rdma_send(wqe, wr, &len16);
+			err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
 			break;
 		case IB_WR_RDMA_WRITE:
 			fw_opcode = FW_RI_RDMA_WRITE_WR;
 			swsqe->opcode = FW_RI_RDMA_WRITE;
-			err = build_rdma_write(wqe, wr, &len16);
+			err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
 			break;
 		case IB_WR_RDMA_READ:
 		case IB_WR_RDMA_READ_WITH_INV:
@@ -619,8 +630,8 @@
 		     swsqe->opcode, swsqe->read_len);
 		wr = wr->next;
 		num_wrs--;
-		t4_sq_produce(&qhp->wq);
-		idx++;
+		t4_sq_produce(&qhp->wq, len16);
+		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
 	}
 	if (t4_wq_db_enabled(&qhp->wq))
 		t4_ring_sq_db(&qhp->wq, idx);
@@ -656,7 +667,9 @@
 			*bad_wr = wr;
 			break;
 		}
-		wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
+		wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
+					   qhp->wq.rq.wq_pidx *
+					   T4_EQ_ENTRY_SIZE);
 		if (num_wrs)
 			err = build_rdma_recv(qhp, wqe, wr, &len16);
 		else
@@ -675,15 +688,12 @@
 		wqe->recv.r2[1] = 0;
 		wqe->recv.r2[2] = 0;
 		wqe->recv.len16 = len16;
-		if (len16 < 5)
-			wqe->flits[8] = 0;
-
 		PDBG("%s cookie 0x%llx pidx %u\n", __func__,
 		     (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
-		t4_rq_produce(&qhp->wq);
+		t4_rq_produce(&qhp->wq, len16);
+		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
 		wr = wr->next;
 		num_wrs--;
-		idx++;
 	}
 	if (t4_wq_db_enabled(&qhp->wq))
 		t4_ring_rq_db(&qhp->wq, idx);
@@ -951,7 +961,8 @@
 	__flush_qp(qhp, rchp, schp, flag);
 }
 
-static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
+		     struct c4iw_ep *ep)
 {
 	struct fw_ri_wr *wqe;
 	int ret;
@@ -959,12 +970,12 @@
 	struct sk_buff *skb;
 
 	PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
-	     qhp->ep->hwtid);
+	     ep->hwtid);
 
-	skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+	skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
 	if (!skb)
 		return -ENOMEM;
-	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
 
 	wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
 	memset(wqe, 0, sizeof *wqe);
@@ -972,7 +983,7 @@
 		FW_WR_OP(FW_RI_INIT_WR) |
 		FW_WR_COMPL(1));
 	wqe->flowid_len16 = cpu_to_be32(
-		FW_WR_FLOWID(qhp->ep->hwtid) |
+		FW_WR_FLOWID(ep->hwtid) |
 		FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
 	wqe->cookie = (u64)&wr_wait;
 
@@ -1035,7 +1046,7 @@
 	PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
 	     qhp->ep->hwtid);
 
-	skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+	skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
 	if (!skb)
 		return -ENOMEM;
 	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
@@ -1202,17 +1213,16 @@
 		case C4IW_QP_STATE_CLOSING:
 			BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
 			qhp->attr.state = C4IW_QP_STATE_CLOSING;
+			ep = qhp->ep;
 			if (!internal) {
 				abort = 0;
 				disconnect = 1;
-				ep = qhp->ep;
 				c4iw_get_ep(&ep->com);
 			}
 			spin_unlock_irqrestore(&qhp->lock, flag);
-			ret = rdma_fini(rhp, qhp);
+			ret = rdma_fini(rhp, qhp, ep);
 			spin_lock_irqsave(&qhp->lock, flag);
 			if (ret) {
-				ep = qhp->ep;
 				c4iw_get_ep(&ep->com);
 				disconnect = abort = 1;
 				goto err;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 9cf8d85..aef55f4 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -65,10 +65,10 @@
 	u8 db_off;
 };
 
-#define T4_EQ_SIZE 64
+#define T4_EQ_ENTRY_SIZE 64
 
 #define T4_SQ_NUM_SLOTS 4
-#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
+#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
 #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
 			sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
 #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
@@ -84,7 +84,7 @@
 #define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
 
 #define T4_RQ_NUM_SLOTS 2
-#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
+#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
 #define T4_MAX_RECV_SGE 4
 
 union t4_wr {
@@ -97,20 +97,18 @@
 	struct fw_ri_fr_nsmr_wr fr;
 	struct fw_ri_inv_lstag_wr inv;
 	struct t4_status_page status;
-	__be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
+	__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
 };
 
 union t4_recv_wr {
 	struct fw_ri_recv_wr recv;
 	struct t4_status_page status;
-	__be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
+	__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
 };
 
 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
 			       enum fw_wr_opcodes opcode, u8 flags, u8 len16)
 {
-	int slots_used;
-
 	wqe->send.opcode = (u8)opcode;
 	wqe->send.flags = flags;
 	wqe->send.wrid = wrid;
@@ -118,12 +116,6 @@
 	wqe->send.r1[1] = 0;
 	wqe->send.r1[2] = 0;
 	wqe->send.len16 = len16;
-
-	slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
-	while (slots_used < T4_SQ_NUM_SLOTS) {
-		wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
-		slots_used++;
-	}
 }
 
 /* CQE/AE status codes */
@@ -289,6 +281,7 @@
 	u16 size;
 	u16 cidx;
 	u16 pidx;
+	u16 wq_pidx;
 };
 
 struct t4_swrqe {
@@ -310,6 +303,7 @@
 	u16 size;
 	u16 cidx;
 	u16 pidx;
+	u16 wq_pidx;
 };
 
 struct t4_wq {
@@ -340,11 +334,14 @@
 	return wq->rq.size - 1 - wq->rq.in_use;
 }
 
-static inline void t4_rq_produce(struct t4_wq *wq)
+static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
 {
 	wq->rq.in_use++;
 	if (++wq->rq.pidx == wq->rq.size)
 		wq->rq.pidx = 0;
+	wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
+	if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
+		wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
 }
 
 static inline void t4_rq_consume(struct t4_wq *wq)
@@ -370,11 +367,14 @@
 	return wq->sq.size - 1 - wq->sq.in_use;
 }
 
-static inline void t4_sq_produce(struct t4_wq *wq)
+static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
 {
 	wq->sq.in_use++;
 	if (++wq->sq.pidx == wq->sq.size)
 		wq->sq.pidx = 0;
+	wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
+	if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
+		wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
 }
 
 static inline void t4_sq_consume(struct t4_wq *wq)
@@ -386,14 +386,12 @@
 
 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
 {
-	inc *= T4_SQ_NUM_SLOTS;
 	wmb();
 	writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
 }
 
 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
 {
-	inc *= T4_RQ_NUM_SLOTS;
 	wmb();
 	writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
 }
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index fc706bd..dc193c2 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -826,4 +826,14 @@
 #define S_ULPTX_NSGE    0
 #define M_ULPTX_NSGE    0xFFFF
 #define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+
+#define S_RX_DACK_MODE    29
+#define M_RX_DACK_MODE    0x3
+#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
+#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+
+#define S_RX_DACK_CHANGE    31
+#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
+#define F_RX_DACK_CHANGE    V_RX_DACK_CHANGE(1U)
+
 #endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 3b87589..d9b1bb4 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -122,21 +122,21 @@
 
 	/* register interrupt handlers and initialize work queues */
 	if (type == EHCA_EQ) {
+		tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
+
 		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
 					  IRQF_DISABLED, "ehca_eq",
 					  (void *)shca);
 		if (ret < 0)
 			ehca_err(ib_dev, "Can't map interrupt handler.");
-
-		tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
 	} else if (type == EHCA_NEQ) {
+		tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
+
 		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
 					  IRQF_DISABLED, "ehca_neq",
 					  (void *)shca);
 		if (ret < 0)
 			ehca_err(ib_dev, "Can't map interrupt handler.");
-
-		tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
 	}
 
 	eq->is_initialized = 1;
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index ecb51b3..cfc4de7 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -360,7 +360,8 @@
 	 * a firmware property, so it's valid across all adapters
 	 */
 	if (ehca_lock_hcalls == -1)
-		ehca_lock_hcalls = !(shca->hca_cap & HCA_CAP_H_ALLOC_RES_SYNC);
+		ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
+					shca->hca_cap);
 
 	/* translate supported MR page sizes; always support 4K */
 	shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 31a68b9..53f4cd4 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -933,11 +933,6 @@
 	/* check all FMR belong to same SHCA, and check internal flag */
 	list_for_each_entry(ib_fmr, fmr_list, list) {
 		prev_shca = shca;
-		if (!ib_fmr) {
-			ehca_gen_err("bad fmr=%p in list", ib_fmr);
-			ret = -EINVAL;
-			goto unmap_fmr_exit0;
-		}
 		shca = container_of(ib_fmr->device, struct ehca_shca,
 				    ib_device);
 		e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 47d388e..32fb342 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -251,7 +251,7 @@
 		return ST_UD;
 	case IB_QPT_RAW_IPV6:
 		return -EINVAL;
-	case IB_QPT_RAW_ETY:
+	case IB_QPT_RAW_ETHERTYPE:
 		return -EINVAL;
 	default:
 		ehca_gen_err("Invalid ibqptype=%x", ibqptype);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 4d5dc33..e6f9cdd 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -269,6 +269,7 @@
 			     struct ehca_cq *cq,
 			     struct ehca_alloc_cq_parms *param)
 {
+	int rc;
 	u64 ret;
 	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
@@ -283,8 +284,19 @@
 	param->act_nr_of_entries = (u32)outs[3];
 	param->act_pages = (u32)outs[4];
 
-	if (ret == H_SUCCESS)
-		hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
+	if (ret == H_SUCCESS) {
+		rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
+		if (rc) {
+			ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
+				     rc, outs[5]);
+
+			ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+						adapter_handle.handle,     /* r4 */
+						cq->ipz_cq_handle.handle,  /* r5 */
+						0, 0, 0, 0, 0);
+			ret = H_NO_MEM;
+		}
+	}
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
 		ehca_gen_err("Not enough resources. ret=%lli", ret);
@@ -295,6 +307,7 @@
 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
 			     struct ehca_alloc_qp_parms *parms, int is_user)
 {
+	int rc;
 	u64 ret;
 	u64 allocate_controls, max_r10_reg, r11, r12;
 	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
@@ -358,8 +371,19 @@
 	parms->rqueue.queue_size =
 		(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
 
-	if (ret == H_SUCCESS)
-		hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
+	if (ret == H_SUCCESS) {
+		rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
+		if (rc) {
+			ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
+				     rc, outs[6]);
+
+			ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+						adapter_handle.handle,     /* r4 */
+						parms->qp_handle.handle,  /* r5 */
+						0, 0, 0, 0, 0);
+			ret = H_NO_MEM;
+		}
+	}
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
 		ehca_gen_err("Not enough resources. ret=%lli", ret);
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
index b3e0e72..077376f 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -42,10 +42,9 @@
 #include "ehca_classes.h"
 #include "hipz_hw.h"
 
-int hcall_map_page(u64 physaddr, u64 *mapaddr)
+u64 hcall_map_page(u64 physaddr)
 {
-	*mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE));
-	return 0;
+	return (u64)ioremap(physaddr, EHCA_PAGESIZE);
 }
 
 int hcall_unmap_page(u64 mapaddr)
@@ -58,9 +57,9 @@
 		    u64 paddr_kernel, u64 paddr_user)
 {
 	if (!is_user) {
-		int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
-		if (ret)
-			return ret;
+		galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
+		if (!galpas->kernel.fw_handle)
+			return -ENOMEM;
 	} else
 		galpas->kernel.fw_handle = 0;
 
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h
index 204227d..d1b0299 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.h
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.h
@@ -83,7 +83,7 @@
 
 int hcp_galpas_dtor(struct h_galpas *galpas);
 
-int hcall_map_page(u64 physaddr, u64 * mapaddr);
+u64 hcall_map_page(u64 physaddr);
 
 int hcall_unmap_page(u64 mapaddr);
 
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 2133746..765f0fc 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -390,6 +390,8 @@
 	ipath_enable_armlaunch(dd);
 }
 
+static void cleanup_device(struct ipath_devdata *dd);
+
 static int __devinit ipath_init_one(struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
 {
@@ -616,8 +618,13 @@
 	goto bail;
 
 bail_irqsetup:
-	if (pdev->irq)
-		free_irq(pdev->irq, dd);
+	cleanup_device(dd);
+
+	if (dd->ipath_irq)
+		dd->ipath_f_free_irq(dd);
+
+	if (dd->ipath_f_cleanup)
+		dd->ipath_f_cleanup(dd);
 
 bail_iounmap:
 	iounmap((volatile void __iomem *) dd->ipath_kregbase);
@@ -635,7 +642,7 @@
 	return ret;
 }
 
-static void __devexit cleanup_device(struct ipath_devdata *dd)
+static void cleanup_device(struct ipath_devdata *dd)
 {
 	int port;
 	struct ipath_portdata **tmp;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 3603ae8..f4ceecd 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1817,7 +1817,7 @@
 	case IB_QPT_RAW_IPV6:
 		op_mod = 2;
 		break;
-	case IB_QPT_RAW_ETY:
+	case IB_QPT_RAW_ETHERTYPE:
 		op_mod = 3;
 		break;
 	default:
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index de7b9d7..0c9f0aa 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -110,8 +110,8 @@
 static unsigned int sysfs_idx_addr;
 
 static struct pci_device_id nes_pci_table[] = {
-	{PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
-	{PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID},
+	{ PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020), },
+	{ PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR), },
 	{0}
 };
 
@@ -259,13 +259,11 @@
 	unsigned long flags;
 	struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
-	u32 qp_id;
 
 	atomic_inc(&qps_destroyed);
 
 	/* Free the control structures */
 
-	qp_id = nesqp->hwqp.qp_id;
 	if (nesqp->pbl_vbase) {
 		pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
 				nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
@@ -441,7 +439,6 @@
 	struct net_device *netdev = NULL;
 	struct nes_device *nesdev = NULL;
 	int ret = 0;
-	struct nes_vnic *nesvnic = NULL;
 	void __iomem *mmio_regs = NULL;
 	u8 hw_rev;
 
@@ -664,25 +661,21 @@
 	nes_notifiers_registered++;
 
 	/* Initialize network devices */
-		if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) {
-			goto bail7;
-		}
+	if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
+		goto bail7;
 
-		/* Register network device */
-		ret = register_netdev(netdev);
-		if (ret) {
-			printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
-			nes_netdev_destroy(netdev);
-			goto bail7;
-		}
+	/* Register network device */
+	ret = register_netdev(netdev);
+	if (ret) {
+		printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
+		nes_netdev_destroy(netdev);
+		goto bail7;
+	}
 
-		nes_print_macaddr(netdev);
-		/* create a CM core for this netdev */
-		nesvnic = netdev_priv(netdev);
+	nes_print_macaddr(netdev);
 
-		nesdev->netdev_count++;
-		nesdev->nesadapter->netdev_count++;
-
+	nesdev->netdev_count++;
+	nesdev->nesadapter->netdev_count++;
 
 	printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n",
 			pci_name(pcidev));
@@ -1104,7 +1097,7 @@
 		i++;
 	}
 
-	return  snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta);
+	return  snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value);
 }
 
 
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index cc78fee..b3d145e 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -262,6 +262,7 @@
 	u16                    base_doorbell_index;
 	u16                    currcq_count;
 	u16                    deepcq_count;
+	u8                     iw_status;
 	u8                     msi_enabled;
 	u8                     netdev_count;
 	u8                     napi_isr_ran;
@@ -527,6 +528,7 @@
 int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
 int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
 struct nes_ib_device *nes_init_ofa_device(struct net_device *);
+void  nes_port_ibevent(struct nes_vnic *nesvnic);
 void nes_destroy_ofa_device(struct nes_ib_device *);
 int nes_register_ofa_device(struct nes_ib_device *);
 
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 986d6f3..8bc11e1 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1719,8 +1719,6 @@
 {
 	int datasize = 0;
 	u32 inc_sequence;
-	u32 rem_seq_ack;
-	u32 rem_seq;
 	int ret = 0;
 	int optionsize;
 	optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
@@ -1730,8 +1728,6 @@
 
 	skb_pull(skb, tcph->doff << 2);
 	inc_sequence = ntohl(tcph->seq);
-	rem_seq = ntohl(tcph->seq);
-	rem_seq_ack =  ntohl(tcph->ack_seq);
 	datasize = skb->len;
 	switch (cm_node->state) {
 	case NES_CM_STATE_SYN_RCVD:
@@ -2565,7 +2561,7 @@
 	u16 last_ae;
 	u8 original_hw_tcp_state;
 	u8 original_ibqp_state;
-	enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
+	enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK;
 	int issue_disconn = 0;
 	int issue_close = 0;
 	int issue_flush = 0;
@@ -3128,17 +3124,15 @@
 	struct nes_vnic *nesvnic;
 	struct nes_cm_listener *cm_node;
 	struct nes_cm_info cm_info;
-	struct nes_adapter *adapter;
 	int err;
 
-
 	nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
 			cm_id, ntohs(cm_id->local_addr.sin_port));
 
 	nesvnic = to_nesvnic(cm_id->device);
 	if (!nesvnic)
 		return -EINVAL;
-	adapter = nesvnic->nesdev->nesadapter;
+
 	nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
 			nesvnic, nesvnic->netdev, nesvnic->netdev->name);
 
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 57874a1..f8233c8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1970,7 +1970,7 @@
 			dev_kfree_skb(
 				nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]);
 
-		nesvnic->nic.sq_tail = (++nesvnic->nic.sq_tail)
+		nesvnic->nic.sq_tail = (nesvnic->nic.sq_tail + 1)
 					& (nesvnic->nic.sq_size - 1);
 	}
 
@@ -2737,9 +2737,9 @@
 				nesnic->sq_tail &= nesnic->sq_size-1;
 				if (sq_cqes > 128) {
 					barrier();
-				/* restart the queue if it had been stopped */
-				if (netif_queue_stopped(nesvnic->netdev))
-					netif_wake_queue(nesvnic->netdev);
+					/* restart the queue if it had been stopped */
+					if (netif_queue_stopped(nesvnic->netdev))
+						netif_wake_queue(nesvnic->netdev);
 					sq_cqes = 0;
 				}
 			} else {
@@ -2999,11 +2999,8 @@
 
 static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
 {
-	u16 pkt_len;
-
 	if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
 		/* skip over ethernet header */
-		pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
 		pkt += ETH_HLEN;
 
 		/* Skip over IP and TCP headers */
@@ -3283,9 +3280,15 @@
 	else
 		mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
 
-	nes_terminate_start_timer(nesqp);
-	nesqp->term_flags |= NES_TERM_SENT;
-	nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
+	if (!nesdev->iw_status)  {
+		nesqp->term_flags = NES_TERM_DONE;
+		nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_ERROR, 0, 0);
+		nes_cm_disconn(nesqp);
+	} else {
+		nes_terminate_start_timer(nesqp);
+		nesqp->term_flags |= NES_TERM_SENT;
+		nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
+	}
 }
 
 static void nes_terminate_send_fin(struct nes_device *nesdev,
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index bbbfe9f..aa9183d 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1100,11 +1100,12 @@
 	u32 wqm_wat;
 	u32 core_clock;
 	u32 firmware_version;
+	u32 eeprom_version;
 
 	u32 nic_rx_eth_route_err;
 
 	u32 et_rx_coalesce_usecs;
-	u32	et_rx_max_coalesced_frames;
+	u32 et_rx_max_coalesced_frames;
 	u32 et_rx_coalesce_usecs_irq;
 	u32 et_rx_max_coalesced_frames_irq;
 	u32 et_pkt_rate_low;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 5cc0a9a..1901ec7 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -232,6 +232,13 @@
 				NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
 		first_nesvnic = nesvnic;
 	}
+
+	if (nesvnic->of_device_registered) {
+		nesdev->iw_status = 1;
+		nesdev->nesadapter->send_term_ok = 1;
+		nes_port_ibevent(nesvnic);
+	}
+
 	if (first_nesvnic->linkup) {
 		/* Enable network packets */
 		nesvnic->linkup = 1;
@@ -309,9 +316,9 @@
 
 
 	if (nesvnic->of_device_registered) {
-		nes_destroy_ofa_device(nesvnic->nesibdev);
-		nesvnic->nesibdev = NULL;
-		nesvnic->of_device_registered = 0;
+		nesdev->nesadapter->send_term_ok = 0;
+		nesdev->iw_status = 0;
+		nes_port_ibevent(nesvnic);
 	}
 	nes_destroy_nic_qp(nesvnic);
 
@@ -463,7 +470,6 @@
 	u16 nhoffset;
 	u16 wqes_needed;
 	u16 wqes_available;
-	u32 old_head;
 	u32 wqe_misc;
 
 	/*
@@ -503,7 +509,6 @@
 		if (skb_is_gso(skb)) {
 			nesvnic->segmented_tso_requests++;
 			nesvnic->tso_requests++;
-			old_head = nesnic->sq_head;
 			/* Basically 4 fragments available per WQE with extended fragments */
 			wqes_needed = nr_frags >> 2;
 			wqes_needed += (nr_frags&3)?1:0;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index a9f5dd2..f9c417c 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -190,6 +190,11 @@
 		nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8))  <<  16) +
 				(u32)((u8)eeprom_data);
 
+		eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 10);
+		printk(PFX "EEPROM version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data);
+		nesadapter->eeprom_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
+				(u32)((u8)eeprom_data);
+
 no_fw_rev:
 		/* eeprom is valid */
 		eeprom_offset = nesadapter->software_eeprom_offset;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 9bc2d74..9046e66 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -518,7 +518,7 @@
 	memset(props, 0, sizeof(*props));
 	memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6);
 
-	props->fw_ver = nesdev->nesadapter->fw_ver;
+	props->fw_ver = nesdev->nesadapter->firmware_version;
 	props->device_cap_flags = nesdev->nesadapter->device_cap_flags;
 	props->vendor_id = nesdev->nesadapter->vendor_id;
 	props->vendor_part_id = nesdev->nesadapter->vendor_part_id;
@@ -1941,7 +1941,7 @@
 	u8  use_256_pbls = 0;
 	u8  use_4k_pbls = 0;
 	u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0;
-	struct nes_root_vpbl new_root = {0, 0, 0};
+	struct nes_root_vpbl new_root = { 0, NULL, NULL };
 	u32 opcode = 0;
 	u16 major_code;
 
@@ -2112,13 +2112,12 @@
 	u32 driver_key = 0;
 	u32 root_pbl_index = 0;
 	u32 cur_pbl_index = 0;
-	int err = 0, pbl_depth = 0;
+	int err = 0;
 	int ret = 0;
 	u16 pbl_count = 0;
 	u8 single_page = 1;
 	u8 stag_key = 0;
 
-	pbl_depth = 0;
 	region_length = 0;
 	vpbl.pbl_vbase = NULL;
 	root_vpbl.pbl_vbase = NULL;
@@ -2931,7 +2930,6 @@
 	int ret;
 	u16 original_last_aeq;
 	u8 issue_modify_qp = 0;
-	u8 issue_disconnect = 0;
 	u8 dont_wait = 0;
 
 	nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u,"
@@ -3058,6 +3056,7 @@
 						nesqp->hte_added = 0;
 					}
 				if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
+						(nesdev->iw_status) &&
 						(nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
 					next_iwarp_state |= NES_CQP_QP_RESET;
 				} else {
@@ -3082,7 +3081,6 @@
 			nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
 			nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
 					nesqp->iwarp_state);
-			issue_disconnect = 1;
 		} else {
 			nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
 			nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
@@ -3936,6 +3934,17 @@
 	return nesibdev;
 }
 
+void  nes_port_ibevent(struct nes_vnic *nesvnic)
+{
+	struct nes_ib_device *nesibdev = nesvnic->nesibdev;
+	struct nes_device *nesdev = nesvnic->nesdev;
+	struct ib_event event;
+	event.device = &nesibdev->ibdev;
+	event.element.port_num = nesvnic->logical_port + 1;
+	event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+	ib_dispatch_event(&event);
+}
+
 
 /**
  * nes_destroy_ofa_device
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 3593983..61de065 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -45,6 +45,7 @@
 #include <linux/mutex.h>
 #include <linux/list.h>
 #include <linux/scatterlist.h>
+#include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/fs.h>
 #include <linux/completion.h>
@@ -326,6 +327,9 @@
 
 #define QIB_DEFAULT_MTU 4096
 
+/* max number of IB ports supported per HCA */
+#define QIB_MAX_IB_PORTS 2
+
 /*
  * Possible IB config parameters for f_get/set_ib_table()
  */
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index b3955ed..145da404 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -279,7 +279,7 @@
  * may not be implemented; the user code must deal with this if it
  * cares, or it must abort after initialization reports the difference.
  */
-#define QIB_USER_SWMINOR 10
+#define QIB_USER_SWMINOR 11
 
 #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
 
@@ -302,6 +302,18 @@
 #define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
 
 /*
+ * If the unit is specified via open, HCA choice is fixed.  If port is
+ * specified, it's also fixed.  Otherwise we try to spread contexts
+ * across ports and HCAs, using different algorithims.  WITHIN is
+ * the old default, prior to this mechanism.
+ */
+#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then
+			       * ports; this is the default */
+#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin
+			       * active ports within), then next HCA */
+#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */
+
+/*
  * This structure is passed to qib_userinit() to tell the driver where
  * user code buffers are, sizes, etc.   The offsets and sizes of the
  * fields must remain unchanged, for binary compatibility.  It can
@@ -319,7 +331,7 @@
 	/* size of struct base_info to write to */
 	__u32 spu_base_info_size;
 
-	__u32 _spu_unused3;
+	__u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */
 
 	/*
 	 * If two or more processes wish to share a context, each process
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index f15ce07..9cd1936 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -335,7 +335,7 @@
 		smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
 	}
 
-	for (last = 0, i = 1; !last; i += !last) {
+	for (last = 0, i = 1; !last && i <= 64; i += !last) {
 		hdr = dd->f_get_msgheader(dd, rhf_addr);
 		eflags = qib_hdrget_err_flags(rhf_addr);
 		etype = qib_hdrget_rcv_type(rhf_addr);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index a142a9e..6b11645 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1294,128 +1294,130 @@
 	return ret;
 }
 
-static inline int usable(struct qib_pportdata *ppd, int active_only)
+static inline int usable(struct qib_pportdata *ppd)
 {
 	struct qib_devdata *dd = ppd->dd;
-	u32 linkok = active_only ? QIBL_LINKACTIVE :
-		 (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
 
 	return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
-		(ppd->lflags & linkok);
+		(ppd->lflags & QIBL_LINKACTIVE);
+}
+
+/*
+ * Select a context on the given device, either using a requested port
+ * or the port based on the context number.
+ */
+static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
+			    const struct qib_user_info *uinfo)
+{
+	struct qib_pportdata *ppd = NULL;
+	int ret, ctxt;
+
+	if (port) {
+		if (!usable(dd->pport + port - 1)) {
+			ret = -ENETDOWN;
+			goto done;
+		} else
+			ppd = dd->pport + port - 1;
+	}
+	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
+	     ctxt++)
+		;
+	if (ctxt == dd->cfgctxts) {
+		ret = -EBUSY;
+		goto done;
+	}
+	if (!ppd) {
+		u32 pidx = ctxt % dd->num_pports;
+		if (usable(dd->pport + pidx))
+			ppd = dd->pport + pidx;
+		else {
+			for (pidx = 0; pidx < dd->num_pports && !ppd;
+			     pidx++)
+				if (usable(dd->pport + pidx))
+					ppd = dd->pport + pidx;
+		}
+	}
+	ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
+done:
+	return ret;
 }
 
 static int find_free_ctxt(int unit, struct file *fp,
 			  const struct qib_user_info *uinfo)
 {
 	struct qib_devdata *dd = qib_lookup(unit);
-	struct qib_pportdata *ppd = NULL;
 	int ret;
-	u32 ctxt;
 
-	if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
+	if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
 		ret = -ENODEV;
-		goto bail;
-	}
+	else
+		ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
 
-	/*
-	 * If users requests specific port, only try that one port, else
-	 * select "best" port below, based on context.
-	 */
-	if (uinfo->spu_port) {
-		ppd = dd->pport + uinfo->spu_port - 1;
-		if (!usable(ppd, 0)) {
-			ret = -ENETDOWN;
-			goto bail;
-		}
-	}
-
-	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
-		if (dd->rcd[ctxt])
-			continue;
-		/*
-		 * The setting and clearing of user context rcd[x] protected
-		 * by the qib_mutex
-		 */
-		if (!ppd) {
-			/* choose port based on ctxt, if up, else 1st up */
-			ppd = dd->pport + (ctxt % dd->num_pports);
-			if (!usable(ppd, 0)) {
-				int i;
-				for (i = 0; i < dd->num_pports; i++) {
-					ppd = dd->pport + i;
-					if (usable(ppd, 0))
-						break;
-				}
-				if (i == dd->num_pports) {
-					ret = -ENETDOWN;
-					goto bail;
-				}
-			}
-		}
-		ret = setup_ctxt(ppd, ctxt, fp, uinfo);
-		goto bail;
-	}
-	ret = -EBUSY;
-
-bail:
 	return ret;
 }
 
-static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
+		      unsigned alg)
 {
-	struct qib_pportdata *ppd;
-	int ret = 0, devmax;
-	int npresent, nup;
-	int ndev;
+	struct qib_devdata *udd = NULL;
+	int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
 	u32 port = uinfo->spu_port, ctxt;
 
 	devmax = qib_count_units(&npresent, &nup);
-
-	for (ndev = 0; ndev < devmax; ndev++) {
-		struct qib_devdata *dd = qib_lookup(ndev);
-
-		/* device portion of usable() */
-		if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
-			continue;
-		for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
-			if (dd->rcd[ctxt])
-				continue;
-			if (port) {
-				if (port > dd->num_pports)
-					continue;
-				ppd = dd->pport + port - 1;
-				if (!usable(ppd, 0))
-					continue;
-			} else {
-				/*
-				 * choose port based on ctxt, if up, else
-				 * first port that's up for multi-port HCA
-				 */
-				ppd = dd->pport + (ctxt % dd->num_pports);
-				if (!usable(ppd, 0)) {
-					int j;
-
-					ppd = NULL;
-					for (j = 0; j < dd->num_pports &&
-						!ppd; j++)
-						if (usable(dd->pport + j, 0))
-							ppd = dd->pport + j;
-					if (!ppd)
-						continue; /* to next unit */
-				}
-			}
-			ret = setup_ctxt(ppd, ctxt, fp, uinfo);
-			goto done;
-		}
+	if (!npresent) {
+		ret = -ENXIO;
+		goto done;
+	}
+	if (nup == 0) {
+		ret = -ENETDOWN;
+		goto done;
 	}
 
-	if (npresent) {
-		if (nup == 0)
-			ret = -ENETDOWN;
-		else
-			ret = -EBUSY;
-	} else
-		ret = -ENXIO;
+	if (alg == QIB_PORT_ALG_ACROSS) {
+		unsigned inuse = ~0U;
+		/* find device (with ACTIVE ports) with fewest ctxts in use */
+		for (ndev = 0; ndev < devmax; ndev++) {
+			struct qib_devdata *dd = qib_lookup(ndev);
+			unsigned cused = 0, cfree = 0;
+			if (!dd)
+				continue;
+			if (port && port <= dd->num_pports &&
+			    usable(dd->pport + port - 1))
+				dusable = 1;
+			else
+				for (i = 0; i < dd->num_pports; i++)
+					if (usable(dd->pport + i))
+						dusable++;
+			if (!dusable)
+				continue;
+			for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
+			     ctxt++)
+				if (dd->rcd[ctxt])
+					cused++;
+				else
+					cfree++;
+			if (cfree && cused < inuse) {
+				udd = dd;
+				inuse = cused;
+			}
+		}
+		if (udd) {
+			ret = choose_port_ctxt(fp, udd, port, uinfo);
+			goto done;
+		}
+	} else {
+		for (ndev = 0; ndev < devmax; ndev++) {
+			struct qib_devdata *dd = qib_lookup(ndev);
+			if (dd) {
+				ret = choose_port_ctxt(fp, dd, port, uinfo);
+				if (!ret)
+					goto done;
+				if (ret == -EBUSY)
+					dusable++;
+			}
+		}
+	}
+	ret = dusable ? -EBUSY : -ENETDOWN;
 
 done:
 	return ret;
@@ -1481,7 +1483,7 @@
 {
 	int ret;
 	int i_minor;
-	unsigned swmajor, swminor;
+	unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
 
 	/* Check to be sure we haven't already initialized this file */
 	if (ctxt_fp(fp)) {
@@ -1498,6 +1500,9 @@
 
 	swminor = uinfo->spu_userversion & 0xffff;
 
+	if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
+		alg = uinfo->spu_port_alg;
+
 	mutex_lock(&qib_mutex);
 
 	if (qib_compatible_subctxts(swmajor, swminor) &&
@@ -1514,7 +1519,7 @@
 	if (i_minor)
 		ret = find_free_ctxt(i_minor - 1, fp, uinfo);
 	else
-		ret = get_a_ctxt(fp, uinfo);
+		ret = get_a_ctxt(fp, uinfo, alg);
 
 done_chk_sdma:
 	if (!ret) {
@@ -1862,7 +1867,7 @@
 {
 	int ret = 0;
 
-	if (!usable(rcd->ppd, 1)) {
+	if (!usable(rcd->ppd)) {
 		int i;
 		/*
 		 * if link is down, or otherwise not usable, delay
@@ -1881,7 +1886,7 @@
 				set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
 					&rcd->user_event_mask[i]);
 		}
-		for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
+		for (i = 0; !usable(rcd->ppd) && i < 300; i++)
 			msleep(100);
 		ret = -ENETDOWN;
 	}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 844954b..9f989c0 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -135,8 +135,8 @@
 }
 
 static const struct file_operations driver_ops[] = {
-	{ .read = driver_stats_read, },
-	{ .read = driver_names_read, },
+	{ .read = driver_stats_read, .llseek = generic_file_llseek, },
+	{ .read = driver_names_read, .llseek = generic_file_llseek, },
 };
 
 /* read the per-device counters */
@@ -164,8 +164,8 @@
 }
 
 static const struct file_operations cntr_ops[] = {
-	{ .read = dev_counters_read, },
-	{ .read = dev_names_read, },
+	{ .read = dev_counters_read, .llseek = generic_file_llseek, },
+	{ .read = dev_names_read, .llseek = generic_file_llseek, },
 };
 
 /*
@@ -210,9 +210,9 @@
 }
 
 static const struct file_operations portcntr_ops[] = {
-	{ .read = portnames_read, },
-	{ .read = portcntrs_1_read, },
-	{ .read = portcntrs_2_read, },
+	{ .read = portnames_read, .llseek = generic_file_llseek, },
+	{ .read = portcntrs_1_read, .llseek = generic_file_llseek, },
+	{ .read = portcntrs_2_read, .llseek = generic_file_llseek, },
 };
 
 /*
@@ -261,8 +261,8 @@
 }
 
 static const struct file_operations qsfp_ops[] = {
-	{ .read = qsfp_1_read, },
-	{ .read = qsfp_2_read, },
+	{ .read = qsfp_1_read, .llseek = generic_file_llseek, },
+	{ .read = qsfp_2_read, .llseek = generic_file_llseek, },
 };
 
 static ssize_t flash_read(struct file *file, char __user *buf,
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5eedf83..584d443 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5864,7 +5864,7 @@
 	 * Doesn't clear any of the error bits that might be set.
 	 */
 	val = TIDFLOW_ERRBITS; /* these are W1C */
-	for (i = 0; i < dd->ctxtcnt; i++) {
+	for (i = 0; i < dd->cfgctxts; i++) {
 		int flow;
 		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
@@ -7271,6 +7271,8 @@
 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
 
 	data = qib_read_kreg_port(ppd, krp_serdesctrl);
+	/* Turn off IB latency mode */
+	data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
 	qib_write_kreg_port(ppd, krp_serdesctrl, data |
 		SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
 
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index a873dd5..f1d16d3 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -93,7 +93,7 @@
 void qib_set_ctxtcnt(struct qib_devdata *dd)
 {
 	if (!qib_cfgctxts)
-		dd->cfgctxts = dd->ctxtcnt;
+		dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
 	else if (qib_cfgctxts < dd->num_pports)
 		dd->cfgctxts = dd->ctxtcnt;
 	else if (qib_cfgctxts <= dd->ctxtcnt)
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index e0f65e3..6c39851 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -450,7 +450,7 @@
  *
  * Flushes both send and receive work queues.
  * Returns true if last WQE event should be generated.
- * The QP s_lock should be held and interrupts disabled.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
  * If we are already in error state, just return.
  */
 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 40c0a37..a093111 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -868,7 +868,7 @@
 
 /*
  * Back up requester to resend the last un-ACKed request.
- * The QP s_lock should be held and interrupts disabled.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
  */
 static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
 {
@@ -911,7 +911,8 @@
 	struct qib_ibport *ibp;
 	unsigned long flags;
 
-	spin_lock_irqsave(&qp->s_lock, flags);
+	spin_lock_irqsave(&qp->r_lock, flags);
+	spin_lock(&qp->s_lock);
 	if (qp->s_flags & QIB_S_TIMER) {
 		ibp = to_iport(qp->ibqp.device, qp->port_num);
 		ibp->n_rc_timeouts++;
@@ -920,7 +921,8 @@
 		qib_restart_rc(qp, qp->s_last_psn + 1, 1);
 		qib_schedule_send(qp);
 	}
-	spin_unlock_irqrestore(&qp->s_lock, flags);
+	spin_unlock(&qp->s_lock);
+	spin_unlock_irqrestore(&qp->r_lock, flags);
 }
 
 /*
@@ -1414,10 +1416,6 @@
 
 	spin_lock_irqsave(&qp->s_lock, flags);
 
-	/* Double check we can process this now that we hold the s_lock. */
-	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
-		goto ack_done;
-
 	/* Ignore invalid responses. */
 	if (qib_cmp24(psn, qp->s_next_psn) >= 0)
 		goto ack_done;
@@ -1661,9 +1659,6 @@
 	ibp->n_rc_dupreq++;
 
 	spin_lock_irqsave(&qp->s_lock, flags);
-	/* Double check we can process this now that we hold the s_lock. */
-	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
-		goto unlock_done;
 
 	for (i = qp->r_head_ack_queue; ; i = prev) {
 		if (i == qp->s_tail_ack_queue)
@@ -1878,9 +1873,6 @@
 	psn = be32_to_cpu(ohdr->bth[2]);
 	opcode >>= 24;
 
-	/* Prevent simultaneous processing after APM on different CPUs */
-	spin_lock(&qp->r_lock);
-
 	/*
 	 * Process responses (ACKs) before anything else.  Note that the
 	 * packet sequence number will be for something in the send work
@@ -1891,14 +1883,14 @@
 	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
 		qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
 				hdrsize, pmtu, rcd);
-		goto runlock;
+		return;
 	}
 
 	/* Compute 24 bits worth of difference. */
 	diff = qib_cmp24(psn, qp->r_psn);
 	if (unlikely(diff)) {
 		if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
-			goto runlock;
+			return;
 		goto send_ack;
 	}
 
@@ -2090,9 +2082,6 @@
 		if (next > QIB_MAX_RDMA_ATOMIC)
 			next = 0;
 		spin_lock_irqsave(&qp->s_lock, flags);
-		/* Double check we can process this while holding the s_lock. */
-		if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
-			goto srunlock;
 		if (unlikely(next == qp->s_tail_ack_queue)) {
 			if (!qp->s_ack_queue[next].sent)
 				goto nack_inv_unlck;
@@ -2146,7 +2135,7 @@
 		qp->s_flags |= QIB_S_RESP_PENDING;
 		qib_schedule_send(qp);
 
-		goto srunlock;
+		goto sunlock;
 	}
 
 	case OP(COMPARE_SWAP):
@@ -2165,9 +2154,6 @@
 		if (next > QIB_MAX_RDMA_ATOMIC)
 			next = 0;
 		spin_lock_irqsave(&qp->s_lock, flags);
-		/* Double check we can process this while holding the s_lock. */
-		if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
-			goto srunlock;
 		if (unlikely(next == qp->s_tail_ack_queue)) {
 			if (!qp->s_ack_queue[next].sent)
 				goto nack_inv_unlck;
@@ -2213,7 +2199,7 @@
 		qp->s_flags |= QIB_S_RESP_PENDING;
 		qib_schedule_send(qp);
 
-		goto srunlock;
+		goto sunlock;
 	}
 
 	default:
@@ -2227,7 +2213,7 @@
 	/* Send an ACK if requested or required. */
 	if (psn & (1 << 31))
 		goto send_ack;
-	goto runlock;
+	return;
 
 rnr_nak:
 	qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
@@ -2238,7 +2224,7 @@
 		atomic_inc(&qp->refcount);
 		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
 	}
-	goto runlock;
+	return;
 
 nack_op_err:
 	qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
@@ -2250,7 +2236,7 @@
 		atomic_inc(&qp->refcount);
 		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
 	}
-	goto runlock;
+	return;
 
 nack_inv_unlck:
 	spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2264,7 +2250,7 @@
 		atomic_inc(&qp->refcount);
 		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
 	}
-	goto runlock;
+	return;
 
 nack_acc_unlck:
 	spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2274,13 +2260,6 @@
 	qp->r_ack_psn = qp->r_psn;
 send_ack:
 	qib_send_rc_ack(qp);
-runlock:
-	spin_unlock(&qp->r_lock);
-	return;
-
-srunlock:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-	spin_unlock(&qp->r_lock);
 	return;
 
 sunlock:
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index b845688..cad4449 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -656,6 +656,7 @@
 	}
 	qp = tx->qp;
 	qib_put_txreq(tx);
+	spin_lock(&qp->r_lock);
 	spin_lock(&qp->s_lock);
 	if (qp->ibqp.qp_type == IB_QPT_RC) {
 		/* XXX what about error sending RDMA read responses? */
@@ -664,6 +665,7 @@
 	} else if (qp->s_wqe)
 		qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
 	spin_unlock(&qp->s_lock);
+	spin_unlock(&qp->r_lock);
 	/* return zero to process the next send work request */
 	goto unlock;
 
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index dab4d9f..d50a33f 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -347,7 +347,7 @@
 
 #define QIB_DIAGC_ATTR(N) \
 	static struct qib_diagc_attr qib_diagc_attr_##N = { \
-		.attr = { .name = __stringify(N), .mode = 0444 }, \
+		.attr = { .name = __stringify(N), .mode = 0664 }, \
 		.counter = offsetof(struct qib_ibport, n_##N) \
 	}
 
@@ -403,8 +403,27 @@
 	return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
 }
 
+static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t size)
+{
+	struct qib_diagc_attr *dattr =
+		container_of(attr, struct qib_diagc_attr, attr);
+	struct qib_pportdata *ppd =
+		container_of(kobj, struct qib_pportdata, diagc_kobj);
+	struct qib_ibport *qibp = &ppd->ibport_data;
+	char *endp;
+	long val = simple_strtol(buf, &endp, 0);
+
+	if (val < 0 || endp == buf)
+		return -EINVAL;
+
+	*(u32 *)((char *) qibp + dattr->counter) = val;
+	return size;
+}
+
 static const struct sysfs_ops qib_diagc_ops = {
 	.show = diagc_attr_show,
+	.store = diagc_attr_store,
 };
 
 static struct kobj_type qib_diagc_ktype = {
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index af30232..7f36454 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -170,7 +170,7 @@
 void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
 			    unsigned cnt)
 {
-	struct qib_pportdata *ppd, *pppd[dd->num_pports];
+	struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
 	unsigned i;
 	unsigned long flags;
 
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 6c7fe78..b9c8b63 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -272,9 +272,6 @@
 	opcode >>= 24;
 	memset(&wc, 0, sizeof wc);
 
-	/* Prevent simultaneous processing after APM on different CPUs */
-	spin_lock(&qp->r_lock);
-
 	/* Compare the PSN verses the expected PSN. */
 	if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
 		/*
@@ -534,7 +531,6 @@
 	}
 	qp->r_psn++;
 	qp->r_state = opcode;
-	spin_unlock(&qp->r_lock);
 	return;
 
 rewind:
@@ -542,12 +538,10 @@
 	qp->r_sge.num_sge = 0;
 drop:
 	ibp->n_pkt_drops++;
-	spin_unlock(&qp->r_lock);
 	return;
 
 op_err:
 	qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
-	spin_unlock(&qp->r_lock);
 	return;
 
 sunlock:
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index c838cda..e1b3da2 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -535,13 +535,6 @@
 	wc.byte_len = tlen + sizeof(struct ib_grh);
 
 	/*
-	 * We need to serialize getting a receive work queue entry and
-	 * generating a completion for it against QPs sending to this QP
-	 * locally.
-	 */
-	spin_lock(&qp->r_lock);
-
-	/*
 	 * Get the next work request entry to find where to put the data.
 	 */
 	if (qp->r_flags & QIB_R_REUSE_SGE)
@@ -552,19 +545,19 @@
 		ret = qib_get_rwqe(qp, 0);
 		if (ret < 0) {
 			qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
-			goto bail_unlock;
+			return;
 		}
 		if (!ret) {
 			if (qp->ibqp.qp_num == 0)
 				ibp->n_vl15_dropped++;
-			goto bail_unlock;
+			return;
 		}
 	}
 	/* Silently drop packets which are too big. */
 	if (unlikely(wc.byte_len > qp->r_len)) {
 		qp->r_flags |= QIB_R_REUSE_SGE;
 		ibp->n_pkt_drops++;
-		goto bail_unlock;
+		return;
 	}
 	if (has_grh) {
 		qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -579,7 +572,7 @@
 			qp->r_sge.sge = *qp->r_sge.sg_list++;
 	}
 	if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
-		goto bail_unlock;
+		return;
 	wc.wr_id = qp->r_wr_id;
 	wc.status = IB_WC_SUCCESS;
 	wc.opcode = IB_WC_RECV;
@@ -601,7 +594,5 @@
 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 		     (ohdr->bth[0] &
 			cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail_unlock:
-	spin_unlock(&qp->r_lock);
 bail:;
 }
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index cda8f41..9fab404 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -550,10 +550,12 @@
 {
 	struct qib_ibport *ibp = &rcd->ppd->ibport_data;
 
+	spin_lock(&qp->r_lock);
+
 	/* Check for valid receive state. */
 	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
 		ibp->n_pkt_drops++;
-		return;
+		goto unlock;
 	}
 
 	switch (qp->ibqp.qp_type) {
@@ -577,6 +579,9 @@
 	default:
 		break;
 	}
+
+unlock:
+	spin_unlock(&qp->r_lock);
 }
 
 /**
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 0b9ef07..95a08a8 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -170,7 +170,7 @@
 }
 
 
-int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
 {
 	int i, j;
 	u64 dma_addr;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index f3e8f3c..857b3b9 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -555,7 +555,7 @@
 	IB_QPT_UC,
 	IB_QPT_UD,
 	IB_QPT_RAW_IPV6,
-	IB_QPT_RAW_ETY
+	IB_QPT_RAW_ETHERTYPE
 };
 
 enum ib_qp_create_flags {