blob: 36739da8bc15699bac5586055de547e95697e022 [file] [log] [blame]
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001/*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -070017#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <scsi/scsi_host.h>
20
21#include "common.h"
22#include "t3_cpl.h"
23#include "t3cdev.h"
24#include "cxgb3_defs.h"
25#include "cxgb3_ctl_defs.h"
26#include "cxgb3_offload.h"
27#include "firmware_exports.h"
28#include "cxgb3i.h"
29
30static unsigned int dbg_level;
31#include "../libcxgbi.h"
32
33#define DRV_MODULE_NAME "cxgb3i"
34#define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
35#define DRV_MODULE_VERSION "2.0.0"
36#define DRV_MODULE_RELDATE "Jun. 2010"
37
38static char version[] =
39 DRV_MODULE_DESC " " DRV_MODULE_NAME
40 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41
42MODULE_AUTHOR("Chelsio Communications, Inc.");
43MODULE_DESCRIPTION(DRV_MODULE_DESC);
44MODULE_VERSION(DRV_MODULE_VERSION);
45MODULE_LICENSE("GPL");
46
47module_param(dbg_level, uint, 0644);
48MODULE_PARM_DESC(dbg_level, "debug flag (default=0)");
49
50static int cxgb3i_rcv_win = 256 * 1024;
51module_param(cxgb3i_rcv_win, int, 0644);
52MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)");
53
54static int cxgb3i_snd_win = 128 * 1024;
55module_param(cxgb3i_snd_win, int, 0644);
56MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)");
57
58static int cxgb3i_rx_credit_thres = 10 * 1024;
59module_param(cxgb3i_rx_credit_thres, int, 0644);
60MODULE_PARM_DESC(rx_credit_thres,
61 "RX credits return threshold in bytes (default=10KB)");
62
63static unsigned int cxgb3i_max_connect = 8 * 1024;
64module_param(cxgb3i_max_connect, uint, 0644);
65MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)");
66
67static unsigned int cxgb3i_sport_base = 20000;
68module_param(cxgb3i_sport_base, uint, 0644);
69MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)");
70
71static void cxgb3i_dev_open(struct t3cdev *);
72static void cxgb3i_dev_close(struct t3cdev *);
73static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32);
74
75static struct cxgb3_client t3_client = {
76 .name = DRV_MODULE_NAME,
77 .handlers = cxgb3i_cpl_handlers,
78 .add = cxgb3i_dev_open,
79 .remove = cxgb3i_dev_close,
80 .event_handler = cxgb3i_dev_event_handler,
81};
82
83static struct scsi_host_template cxgb3i_host_template = {
84 .module = THIS_MODULE,
85 .name = DRV_MODULE_NAME,
86 .proc_name = DRV_MODULE_NAME,
87 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
88 .queuecommand = iscsi_queuecommand,
89 .change_queue_depth = iscsi_change_queue_depth,
90 .sg_tablesize = SG_ALL,
91 .max_sectors = 0xFFFF,
92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
93 .eh_abort_handler = iscsi_eh_abort,
94 .eh_device_reset_handler = iscsi_eh_device_reset,
95 .eh_target_reset_handler = iscsi_eh_recover_target,
96 .target_alloc = iscsi_target_alloc,
97 .use_clustering = DISABLE_CLUSTERING,
98 .this_id = -1,
99};
100
101static struct iscsi_transport cxgb3i_iscsi_transport = {
102 .owner = THIS_MODULE,
103 .name = DRV_MODULE_NAME,
104 /* owner and name should be set already */
105 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
106 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
Mike Christiefdafd4d2011-02-16 15:04:32 -0600107 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
Mike Christie3128c6c2011-07-25 13:48:42 -0500108 .attr_is_visible = cxgbi_attr_is_visible,
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700109 .get_host_param = cxgbi_get_host_param,
110 .set_host_param = cxgbi_set_host_param,
111 /* session management */
112 .create_session = cxgbi_create_session,
113 .destroy_session = cxgbi_destroy_session,
114 .get_session_param = iscsi_session_get_param,
115 /* connection management */
116 .create_conn = cxgbi_create_conn,
117 .bind_conn = cxgbi_bind_conn,
118 .destroy_conn = iscsi_tcp_conn_teardown,
119 .start_conn = iscsi_conn_start,
120 .stop_conn = iscsi_conn_stop,
Mike Christiec71b9b62011-02-16 15:04:38 -0600121 .get_conn_param = iscsi_conn_get_param,
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700122 .set_param = cxgbi_set_conn_param,
123 .get_stats = cxgbi_get_conn_stats,
124 /* pdu xmit req from user space */
125 .send_pdu = iscsi_conn_send_pdu,
126 /* task */
127 .init_task = iscsi_tcp_task_init,
128 .xmit_task = iscsi_tcp_task_xmit,
129 .cleanup_task = cxgbi_cleanup_task,
130 /* pdu */
131 .alloc_pdu = cxgbi_conn_alloc_pdu,
132 .init_pdu = cxgbi_conn_init_pdu,
133 .xmit_pdu = cxgbi_conn_xmit_pdu,
134 .parse_pdu_itt = cxgbi_parse_pdu_itt,
135 /* TCP connect/disconnect */
Mike Christiec71b9b62011-02-16 15:04:38 -0600136 .get_ep_param = cxgbi_get_ep_param,
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700137 .ep_connect = cxgbi_ep_connect,
138 .ep_poll = cxgbi_ep_poll,
139 .ep_disconnect = cxgbi_ep_disconnect,
140 /* Error recovery timeout call */
141 .session_recovery_timedout = iscsi_session_recovery_timedout,
142};
143
144static struct scsi_transport_template *cxgb3i_stt;
145
146/*
147 * CPL (Chelsio Protocol Language) defines a message passing interface between
148 * the host driver and Chelsio asic.
149 * The section below implments CPLs that related to iscsi tcp connection
150 * open/close/abort and data send/receive.
151 */
152
153static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
154
155static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
156 const struct l2t_entry *e)
157{
158 unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win);
159 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
160
161 skb->priority = CPL_PRIORITY_SETUP;
162
163 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
164 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
165 req->local_port = csk->saddr.sin_port;
166 req->peer_port = csk->daddr.sin_port;
167 req->local_ip = csk->saddr.sin_addr.s_addr;
168 req->peer_ip = csk->daddr.sin_addr.s_addr;
169
170 req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
171 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
172 V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
173 req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
174 V_RCV_BUFSIZ(cxgb3i_rcv_win>>10));
175
176 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
177 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
178 csk, csk->state, csk->flags, csk->atid,
179 &req->local_ip, ntohs(req->local_port),
180 &req->peer_ip, ntohs(req->peer_port),
181 csk->mss_idx, e->idx, e->smt_idx);
182
183 l2t_send(csk->cdev->lldev, skb, csk->l2t);
184}
185
186static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
187{
188 cxgbi_sock_act_open_req_arp_failure(NULL, skb);
189}
190
191/*
192 * CPL connection close request: host ->
193 *
194 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
195 * the write queue (i.e., after any unsent txt data).
196 */
197static void send_close_req(struct cxgbi_sock *csk)
198{
199 struct sk_buff *skb = csk->cpl_close;
200 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
201 unsigned int tid = csk->tid;
202
203 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
204 "csk 0x%p,%u,0x%lx,%u.\n",
205 csk, csk->state, csk->flags, csk->tid);
206
207 csk->cpl_close = NULL;
208 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
209 req->wr.wr_lo = htonl(V_WR_TID(tid));
210 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
211 req->rsvd = htonl(csk->write_seq);
212
213 cxgbi_sock_skb_entail(csk, skb);
214 if (csk->state >= CTP_ESTABLISHED)
215 push_tx_frames(csk, 1);
216}
217
218/*
219 * CPL connection abort request: host ->
220 *
221 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
222 * for the same connection and also that we do not try to send a message
223 * after the connection has closed.
224 */
225static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
226{
227 struct cpl_abort_req *req = cplhdr(skb);
228
229 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
230 "t3dev 0x%p, tid %u, skb 0x%p.\n",
231 tdev, GET_TID(req), skb);
232 req->cmd = CPL_ABORT_NO_RST;
233 cxgb3_ofld_send(tdev, skb);
234}
235
236static void send_abort_req(struct cxgbi_sock *csk)
237{
238 struct sk_buff *skb = csk->cpl_abort_req;
239 struct cpl_abort_req *req;
240
241 if (unlikely(csk->state == CTP_ABORTING || !skb))
242 return;
243 cxgbi_sock_set_state(csk, CTP_ABORTING);
244 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
245 /* Purge the send queue so we don't send anything after an abort. */
246 cxgbi_sock_purge_write_queue(csk);
247
248 csk->cpl_abort_req = NULL;
249 req = (struct cpl_abort_req *)skb->head;
250 skb->priority = CPL_PRIORITY_DATA;
251 set_arp_failure_handler(skb, abort_arp_failure);
252 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
253 req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
254 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
255 req->rsvd0 = htonl(csk->snd_nxt);
256 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
257 req->cmd = CPL_ABORT_SEND_RST;
258
259 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
260 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
261 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
262 req->rsvd1);
263
264 l2t_send(csk->cdev->lldev, skb, csk->l2t);
265}
266
267/*
268 * CPL connection abort reply: host ->
269 *
270 * Send an ABORT_RPL message in response of the ABORT_REQ received.
271 */
272static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
273{
274 struct sk_buff *skb = csk->cpl_abort_rpl;
275 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
276
277 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
278 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
279 csk, csk->state, csk->flags, csk->tid, rst_status);
280
281 csk->cpl_abort_rpl = NULL;
282 skb->priority = CPL_PRIORITY_DATA;
283 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
284 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
285 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
286 rpl->cmd = rst_status;
287 cxgb3_ofld_send(csk->cdev->lldev, skb);
288}
289
290/*
291 * CPL connection rx data ack: host ->
292 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
293 * credits sent.
294 */
295static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
296{
297 struct sk_buff *skb;
298 struct cpl_rx_data_ack *req;
299 u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
300
301 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
302 "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
303 csk, csk->state, csk->flags, csk->tid, credits, dack);
304
kxie@chelsio.com24d3f952010-09-23 16:43:23 -0700305 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700306 if (!skb) {
307 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
308 return 0;
309 }
310 req = (struct cpl_rx_data_ack *)skb->head;
311 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
312 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
313 req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
314 V_RX_CREDITS(credits));
315 skb->priority = CPL_PRIORITY_ACK;
316 cxgb3_ofld_send(csk->cdev->lldev, skb);
317 return credits;
318}
319
320/*
321 * CPL connection tx data: host ->
322 *
323 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
324 * credits sent.
325 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
326 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
327 */
328
329static unsigned int wrlen __read_mostly;
330static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
331
332static void init_wr_tab(unsigned int wr_len)
333{
334 int i;
335
336 if (skb_wrs[1]) /* already initialized */
337 return;
338 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
339 int sgl_len = (3 * i) / 2 + (i & 1);
340
341 sgl_len += 3;
342 skb_wrs[i] = (sgl_len <= wr_len
343 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
344 }
345 wrlen = wr_len * 8;
346}
347
348static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
349 int len, int req_completion)
350{
351 struct tx_data_wr *req;
352 struct l2t_entry *l2t = csk->l2t;
353
354 skb_reset_transport_header(skb);
355 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
356 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
357 (req_completion ? F_WR_COMPL : 0));
358 req->wr_lo = htonl(V_WR_TID(csk->tid));
359 /* len includes the length of any HW ULP additions */
360 req->len = htonl(len);
361 /* V_TX_ULP_SUBMODE sets both the mode and submode */
362 req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) |
363 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
364 req->sndseq = htonl(csk->snd_nxt);
365 req->param = htonl(V_TX_PORT(l2t->smt_idx));
366
367 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
368 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
369 V_TX_CPU_IDX(csk->rss_qid));
370 /* sendbuffer is in units of 32KB. */
371 req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15));
372 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
373 }
374}
375
376/**
377 * push_tx_frames -- start transmit
378 * @c3cn: the offloaded connection
379 * @req_completion: request wr_ack or not
380 *
381 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
382 * connection's send queue and sends them on to T3. Must be called with the
383 * connection's lock held. Returns the amount of send buffer space that was
384 * freed as a result of sending queued data to T3.
385 */
386
387static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
388{
389 kfree_skb(skb);
390}
391
392static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
393{
394 int total_size = 0;
395 struct sk_buff *skb;
396
397 if (unlikely(csk->state < CTP_ESTABLISHED ||
398 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
399 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
400 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
401 csk, csk->state, csk->flags, csk->tid);
402 return 0;
403 }
404
405 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
406 int len = skb->len; /* length before skb_push */
407 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
408 int wrs_needed = skb_wrs[frags];
409
410 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
411 wrs_needed = 1;
412
413 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
414
415 if (csk->wr_cred < wrs_needed) {
416 log_debug(1 << CXGBI_DBG_PDU_TX,
417 "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
418 csk, skb->len, skb->data_len, frags,
419 wrs_needed, csk->wr_cred);
420 break;
421 }
422
423 __skb_unlink(skb, &csk->write_queue);
424 skb->priority = CPL_PRIORITY_DATA;
425 skb->csum = wrs_needed; /* remember this until the WR_ACK */
426 csk->wr_cred -= wrs_needed;
427 csk->wr_una_cred += wrs_needed;
428 cxgbi_sock_enqueue_wr(csk, skb);
429
430 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
431 "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
432 "left %u, unack %u.\n",
433 csk, skb->len, skb->data_len, frags, skb->csum,
434 csk->wr_cred, csk->wr_una_cred);
435
436 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
437 if ((req_completion &&
438 csk->wr_una_cred == wrs_needed) ||
439 csk->wr_una_cred >= csk->wr_max_cred / 2) {
440 req_completion = 1;
441 csk->wr_una_cred = 0;
442 }
443 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
444 make_tx_data_wr(csk, skb, len, req_completion);
445 csk->snd_nxt += len;
446 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
447 }
448 total_size += skb->truesize;
449 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
450 "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
451 csk, csk->tid, skb);
452 set_arp_failure_handler(skb, arp_failure_skb_discard);
453 l2t_send(csk->cdev->lldev, skb, csk->l2t);
454 }
455 return total_size;
456}
457
458/*
459 * Process a CPL_ACT_ESTABLISH message: -> host
460 * Updates connection state from an active establish CPL message. Runs with
461 * the connection lock held.
462 */
463
464static inline void free_atid(struct cxgbi_sock *csk)
465{
466 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
467 cxgb3_free_atid(csk->cdev->lldev, csk->atid);
468 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
469 cxgbi_sock_put(csk);
470 }
471}
472
473static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
474{
475 struct cxgbi_sock *csk = ctx;
476 struct cpl_act_establish *req = cplhdr(skb);
477 unsigned int tid = GET_TID(req);
478 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
479 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
480
481 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
482 "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
483 atid, atid, csk, csk->state, csk->flags, rcv_isn);
484
485 cxgbi_sock_get(csk);
486 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
487 csk->tid = tid;
488 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
489
490 free_atid(csk);
491
492 csk->rss_qid = G_QNUM(ntohs(skb->csum));
493
494 spin_lock_bh(&csk->lock);
495 if (csk->retry_timer.function) {
496 del_timer(&csk->retry_timer);
497 csk->retry_timer.function = NULL;
498 }
499
500 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
501 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
502 csk, csk->state, csk->flags, csk->tid);
503
504 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
505 if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10))
506 csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10);
507
508 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
509
510 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
511 /* upper layer has requested closing */
512 send_abort_req(csk);
513 else {
514 if (skb_queue_len(&csk->write_queue))
515 push_tx_frames(csk, 1);
516 cxgbi_conn_tx_open(csk);
517 }
518
519 spin_unlock_bh(&csk->lock);
520 __kfree_skb(skb);
521 return 0;
522}
523
524/*
525 * Process a CPL_ACT_OPEN_RPL message: -> host
526 * Handle active open failures.
527 */
528static int act_open_rpl_status_to_errno(int status)
529{
530 switch (status) {
531 case CPL_ERR_CONN_RESET:
532 return -ECONNREFUSED;
533 case CPL_ERR_ARP_MISS:
534 return -EHOSTUNREACH;
535 case CPL_ERR_CONN_TIMEDOUT:
536 return -ETIMEDOUT;
537 case CPL_ERR_TCAM_FULL:
538 return -ENOMEM;
539 case CPL_ERR_CONN_EXIST:
540 return -EADDRINUSE;
541 default:
542 return -EIO;
543 }
544}
545
546static void act_open_retry_timer(unsigned long data)
547{
548 struct sk_buff *skb;
549 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
550
551 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
552 "csk 0x%p,%u,0x%lx,%u.\n",
553 csk, csk->state, csk->flags, csk->tid);
554
555 cxgbi_sock_get(csk);
556 spin_lock_bh(&csk->lock);
kxie@chelsio.com24d3f952010-09-23 16:43:23 -0700557 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700558 if (!skb)
559 cxgbi_sock_fail_act_open(csk, -ENOMEM);
560 else {
561 skb->sk = (struct sock *)csk;
562 set_arp_failure_handler(skb, act_open_arp_failure);
563 send_act_open_req(csk, skb, csk->l2t);
564 }
565 spin_unlock_bh(&csk->lock);
566 cxgbi_sock_put(csk);
567}
568
569static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
570{
571 struct cxgbi_sock *csk = ctx;
572 struct cpl_act_open_rpl *rpl = cplhdr(skb);
573
kxie@chelsio.com0b3d8942010-09-23 16:43:23 -0700574 pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
575 csk, csk->state, csk->flags, csk->atid, rpl->status,
576 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
577 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700578
579 if (rpl->status != CPL_ERR_TCAM_FULL &&
580 rpl->status != CPL_ERR_CONN_EXIST &&
581 rpl->status != CPL_ERR_ARP_MISS)
582 cxgb3_queue_tid_release(tdev, GET_TID(rpl));
583
584 cxgbi_sock_get(csk);
585 spin_lock_bh(&csk->lock);
586 if (rpl->status == CPL_ERR_CONN_EXIST &&
587 csk->retry_timer.function != act_open_retry_timer) {
588 csk->retry_timer.function = act_open_retry_timer;
589 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
590 } else
591 cxgbi_sock_fail_act_open(csk,
592 act_open_rpl_status_to_errno(rpl->status));
593
594 spin_unlock_bh(&csk->lock);
595 cxgbi_sock_put(csk);
596 __kfree_skb(skb);
597 return 0;
598}
599
600/*
601 * Process PEER_CLOSE CPL messages: -> host
602 * Handle peer FIN.
603 */
604static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
605{
606 struct cxgbi_sock *csk = ctx;
607
608 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
609 "csk 0x%p,%u,0x%lx,%u.\n",
610 csk, csk->state, csk->flags, csk->tid);
611
612 cxgbi_sock_rcv_peer_close(csk);
613 __kfree_skb(skb);
614 return 0;
615}
616
617/*
618 * Process CLOSE_CONN_RPL CPL message: -> host
619 * Process a peer ACK to our FIN.
620 */
621static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
622 void *ctx)
623{
624 struct cxgbi_sock *csk = ctx;
625 struct cpl_close_con_rpl *rpl = cplhdr(skb);
626
627 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
628 "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
629 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
630
631 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
632 __kfree_skb(skb);
633 return 0;
634}
635
636/*
637 * Process ABORT_REQ_RSS CPL message: -> host
638 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
639 * request except that we need to reply to it.
640 */
641
642static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
643 int *need_rst)
644{
645 switch (abort_reason) {
646 case CPL_ERR_BAD_SYN: /* fall through */
647 case CPL_ERR_CONN_RESET:
kxie@chelsio.com0b3d8942010-09-23 16:43:23 -0700648 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700649 case CPL_ERR_XMIT_TIMEDOUT:
650 case CPL_ERR_PERSIST_TIMEDOUT:
651 case CPL_ERR_FINWAIT2_TIMEDOUT:
652 case CPL_ERR_KEEPALIVE_TIMEDOUT:
653 return -ETIMEDOUT;
654 default:
655 return -EIO;
656 }
657}
658
659static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
660{
661 const struct cpl_abort_req_rss *req = cplhdr(skb);
662 struct cxgbi_sock *csk = ctx;
663 int rst_status = CPL_ABORT_NO_RST;
664
665 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
666 "csk 0x%p,%u,0x%lx,%u.\n",
667 csk, csk->state, csk->flags, csk->tid);
668
669 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
670 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
671 goto done;
672 }
673
674 cxgbi_sock_get(csk);
675 spin_lock_bh(&csk->lock);
676
677 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
678 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
679 cxgbi_sock_set_state(csk, CTP_ABORTING);
680 goto out;
681 }
682
683 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
684 send_abort_rpl(csk, rst_status);
685
686 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
687 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
688 cxgbi_sock_closed(csk);
689 }
690
691out:
692 spin_unlock_bh(&csk->lock);
693 cxgbi_sock_put(csk);
694done:
695 __kfree_skb(skb);
696 return 0;
697}
698
699/*
700 * Process ABORT_RPL_RSS CPL message: -> host
701 * Process abort replies. We only process these messages if we anticipate
702 * them as the coordination between SW and HW in this area is somewhat lacking
703 * and sometimes we get ABORT_RPLs after we are done with the connection that
704 * originated the ABORT_REQ.
705 */
706static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
707{
708 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
709 struct cxgbi_sock *csk = ctx;
710
711 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
712 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
713 rpl->status, csk, csk ? csk->state : 0,
714 csk ? csk->flags : 0UL);
715 /*
716 * Ignore replies to post-close aborts indicating that the abort was
717 * requested too late. These connections are terminated when we get
718 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
719 * arrives the TID is either no longer used or it has been recycled.
720 */
721 if (rpl->status == CPL_ERR_ABORT_FAILED)
722 goto rel_skb;
723 /*
724 * Sometimes we've already closed the connection, e.g., a post-close
725 * abort races with ABORT_REQ_RSS, the latter frees the connection
726 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
727 * but FW turns the ABORT_REQ into a regular one and so we get
728 * ABORT_RPL_RSS with status 0 and no connection.
729 */
730 if (csk)
731 cxgbi_sock_rcv_abort_rpl(csk);
732rel_skb:
733 __kfree_skb(skb);
734 return 0;
735}
736
737/*
738 * Process RX_ISCSI_HDR CPL message: -> host
739 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
740 * follow after the bhs.
741 */
742static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
743{
744 struct cxgbi_sock *csk = ctx;
745 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
746 struct cpl_iscsi_hdr_norss data_cpl;
747 struct cpl_rx_data_ddp_norss ddp_cpl;
748 unsigned int hdr_len, data_len, status;
749 unsigned int len;
750 int err;
751
752 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
753 "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
754 csk, csk->state, csk->flags, csk->tid, skb, skb->len);
755
756 spin_lock_bh(&csk->lock);
757
758 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
759 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
760 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
761 csk, csk->state, csk->flags, csk->tid);
762 if (csk->state != CTP_ABORTING)
763 goto abort_conn;
764 else
765 goto discard;
766 }
767
768 cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
769 cxgbi_skcb_flags(skb) = 0;
770
771 skb_reset_transport_header(skb);
772 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
773
774 len = hdr_len = ntohs(hdr_cpl->len);
775 /* msg coalesce is off or not enough data received */
776 if (skb->len <= hdr_len) {
777 pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
778 csk->cdev->ports[csk->port_id]->name, csk->tid,
779 skb->len, hdr_len);
780 goto abort_conn;
781 }
782 cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
783
784 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
785 sizeof(ddp_cpl));
786 if (err < 0) {
787 pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
788 csk->cdev->ports[csk->port_id]->name, csk->tid,
789 skb->len, sizeof(ddp_cpl), err);
790 goto abort_conn;
791 }
792
793 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
794 cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
795 cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
796 status = ntohl(ddp_cpl.ddp_status);
797
798 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
799 "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
800 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
801
802 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT))
803 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
804 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT))
805 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
806 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT))
807 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
808
809 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
810 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
811 if (err < 0) {
812 pr_err("%s: tid %u, cp %zu/%u failed %d.\n",
813 csk->cdev->ports[csk->port_id]->name,
814 csk->tid, sizeof(data_cpl), skb->len, err);
815 goto abort_conn;
816 }
817 data_len = ntohs(data_cpl.len);
818 log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX,
819 "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
820 skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
821 len += sizeof(data_cpl) + data_len;
822 } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT))
823 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
824
825 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
826 __pskb_trim(skb, len);
827 __skb_queue_tail(&csk->receive_queue, skb);
828 cxgbi_conn_pdu_ready(csk);
829
830 spin_unlock_bh(&csk->lock);
831 return 0;
832
833abort_conn:
834 send_abort_req(csk);
835discard:
836 spin_unlock_bh(&csk->lock);
837 __kfree_skb(skb);
838 return 0;
839}
840
841/*
842 * Process TX_DATA_ACK CPL messages: -> host
843 * Process an acknowledgment of WR completion. Advance snd_una and send the
844 * next batch of work requests from the write queue.
845 */
846static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
847{
848 struct cxgbi_sock *csk = ctx;
849 struct cpl_wr_ack *hdr = cplhdr(skb);
850
851 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
852 "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
853 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
854
855 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
856 __kfree_skb(skb);
857 return 0;
858}
859
860/*
861 * for each connection, pre-allocate skbs needed for close/abort requests. So
862 * that we can service the request right away.
863 */
864static int alloc_cpls(struct cxgbi_sock *csk)
865{
kxie@chelsio.com24d3f952010-09-23 16:43:23 -0700866 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700867 GFP_KERNEL);
868 if (!csk->cpl_close)
869 return -ENOMEM;
kxie@chelsio.com24d3f952010-09-23 16:43:23 -0700870 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700871 GFP_KERNEL);
872 if (!csk->cpl_abort_req)
873 goto free_cpl_skbs;
874
kxie@chelsio.com24d3f952010-09-23 16:43:23 -0700875 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700876 GFP_KERNEL);
877 if (!csk->cpl_abort_rpl)
878 goto free_cpl_skbs;
879
880 return 0;
881
882free_cpl_skbs:
883 cxgbi_sock_free_cpl_skbs(csk);
884 return -ENOMEM;
885}
886
887/**
888 * release_offload_resources - release offload resource
889 * @c3cn: the offloaded iscsi tcp connection.
890 * Release resources held by an offload connection (TID, L2T entry, etc.)
891 */
892static void l2t_put(struct cxgbi_sock *csk)
893{
894 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
895
896 if (csk->l2t) {
Neil Hormane48f1292011-09-06 13:59:13 -0400897 l2t_release(t3dev, csk->l2t);
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700898 csk->l2t = NULL;
899 cxgbi_sock_put(csk);
900 }
901}
902
903static void release_offload_resources(struct cxgbi_sock *csk)
904{
905 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
906
907 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
908 "csk 0x%p,%u,0x%lx,%u.\n",
909 csk, csk->state, csk->flags, csk->tid);
910
911 csk->rss_qid = 0;
912 cxgbi_sock_free_cpl_skbs(csk);
913
914 if (csk->wr_cred != csk->wr_max_cred) {
915 cxgbi_sock_purge_wr_queue(csk);
916 cxgbi_sock_reset_wr_list(csk);
917 }
918 l2t_put(csk);
919 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
920 free_atid(csk);
921 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
922 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
923 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
924 cxgbi_sock_put(csk);
925 }
926 csk->dst = NULL;
927 csk->cdev = NULL;
928}
929
kxie@chelsio.com0b3d8942010-09-23 16:43:23 -0700930static void update_address(struct cxgbi_hba *chba)
931{
932 if (chba->ipv4addr) {
933 if (chba->vdev &&
934 chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) {
935 cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr);
936 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
937 pr_info("%s set %pI4.\n",
938 chba->vdev->name, &chba->ipv4addr);
939 } else if (chba->ipv4addr !=
940 cxgb3i_get_private_ipv4addr(chba->ndev)) {
941 cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr);
942 pr_info("%s set %pI4.\n",
943 chba->ndev->name, &chba->ipv4addr);
944 }
945 } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) {
946 if (chba->vdev)
947 cxgb3i_set_private_ipv4addr(chba->vdev, 0);
948 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
949 }
950}
951
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700952static int init_act_open(struct cxgbi_sock *csk)
953{
954 struct dst_entry *dst = csk->dst;
955 struct cxgbi_device *cdev = csk->cdev;
956 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
957 struct net_device *ndev = cdev->ports[csk->port_id];
kxie@chelsio.com0b3d8942010-09-23 16:43:23 -0700958 struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700959 struct sk_buff *skb = NULL;
960
961 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
962 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
963
kxie@chelsio.com0b3d8942010-09-23 16:43:23 -0700964 update_address(chba);
965 if (chba->ipv4addr)
966 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
967
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700968 csk->rss_qid = 0;
David Millera4757122011-12-02 16:52:18 +0000969 csk->l2t = t3_l2t_get(t3dev, dst, ndev);
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700970 if (!csk->l2t) {
971 pr_err("NO l2t available.\n");
972 return -EINVAL;
973 }
974 cxgbi_sock_get(csk);
975
976 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
977 if (csk->atid < 0) {
978 pr_err("NO atid available.\n");
979 goto rel_resource;
980 }
981 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
982 cxgbi_sock_get(csk);
983
kxie@chelsio.com24d3f952010-09-23 16:43:23 -0700984 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -0700985 if (!skb)
986 goto rel_resource;
987 skb->sk = (struct sock *)csk;
988 set_arp_failure_handler(skb, act_open_arp_failure);
989
990 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
991 csk->wr_una_cred = 0;
992 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
993 cxgbi_sock_reset_wr_list(csk);
994 csk->err = 0;
995
kxie@chelsio.com0b3d8942010-09-23 16:43:23 -0700996 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
997 "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
998 csk, csk->state, csk->flags,
999 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
1000 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
1001
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001002 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1003 send_act_open_req(csk, skb, csk->l2t);
1004 return 0;
1005
1006rel_resource:
1007 if (skb)
1008 __kfree_skb(skb);
1009 return -EINVAL;
1010}
1011
1012cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
1013 [CPL_ACT_ESTABLISH] = do_act_establish,
1014 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1015 [CPL_PEER_CLOSE] = do_peer_close,
1016 [CPL_ABORT_REQ_RSS] = do_abort_req,
1017 [CPL_ABORT_RPL_RSS] = do_abort_rpl,
1018 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1019 [CPL_TX_DMA_ACK] = do_wr_ack,
1020 [CPL_ISCSI_HDR] = do_iscsi_hdr,
1021};
1022
1023/**
1024 * cxgb3i_ofld_init - allocate and initialize resources for each adapter found
1025 * @cdev: cxgbi adapter
1026 */
1027int cxgb3i_ofld_init(struct cxgbi_device *cdev)
1028{
1029 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
1030 struct adap_ports port;
1031 struct ofld_page_info rx_page_info;
1032 unsigned int wr_len;
1033 int rc;
1034
1035 if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 ||
1036 t3dev->ctl(t3dev, GET_PORTS, &port) < 0 ||
1037 t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
1038 pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev);
1039 return -EINVAL;
1040 }
1041
1042 if (cxgb3i_max_connect > CXGBI_MAX_CONN)
1043 cxgb3i_max_connect = CXGBI_MAX_CONN;
1044
1045 rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base,
1046 cxgb3i_max_connect);
1047 if (rc < 0)
1048 return rc;
1049
1050 init_wr_tab(wr_len);
1051 cdev->csk_release_offload_resources = release_offload_resources;
1052 cdev->csk_push_tx_frames = push_tx_frames;
1053 cdev->csk_send_abort_req = send_abort_req;
1054 cdev->csk_send_close_req = send_close_req;
1055 cdev->csk_send_rx_credits = send_rx_credits;
1056 cdev->csk_alloc_cpls = alloc_cpls;
1057 cdev->csk_init_act_open = init_act_open;
1058
1059 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1060 return 0;
1061}
1062
1063/*
1064 * functions to program the pagepod in h/w
1065 */
1066static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
1067{
1068 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
1069
1070 memset(req, 0, sizeof(*req));
1071
1072 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
1073 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
1074 V_ULPTX_CMD(ULP_MEM_WRITE));
1075 req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
1076 V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
1077}
1078
1079static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1080 unsigned int idx, unsigned int npods,
1081 struct cxgbi_gather_list *gl)
1082{
1083 struct cxgbi_device *cdev = csk->cdev;
1084 struct cxgbi_ddp_info *ddp = cdev->ddp;
1085 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
1086 int i;
1087
1088 log_debug(1 << CXGBI_DBG_DDP,
1089 "csk 0x%p, idx %u, npods %u, gl 0x%p.\n",
1090 csk, idx, npods, gl);
1091
1092 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
kxie@chelsio.comb8ce8b52011-01-07 14:45:39 -08001093 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1094 PPOD_SIZE, 0, GFP_ATOMIC);
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001095
kxie@chelsio.comb8ce8b52011-01-07 14:45:39 -08001096 if (!skb)
1097 return -ENOMEM;
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001098
1099 ulp_mem_io_set_hdr(skb, pm_addr);
1100 cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
1101 sizeof(struct ulp_mem_io)),
1102 hdr, gl, i * PPOD_PAGES_MAX);
1103 skb->priority = CPL_PRIORITY_CONTROL;
1104 cxgb3_ofld_send(cdev->lldev, skb);
1105 }
1106 return 0;
1107}
1108
1109static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1110 unsigned int idx, unsigned int npods)
1111{
1112 struct cxgbi_device *cdev = chba->cdev;
1113 struct cxgbi_ddp_info *ddp = cdev->ddp;
1114 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
1115 int i;
1116
1117 log_debug(1 << CXGBI_DBG_DDP,
1118 "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n",
1119 cdev, idx, npods, tag);
1120
1121 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
kxie@chelsio.comb8ce8b52011-01-07 14:45:39 -08001122 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1123 PPOD_SIZE, 0, GFP_ATOMIC);
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001124
1125 if (!skb) {
kxie@chelsio.comb8ce8b52011-01-07 14:45:39 -08001126 pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001127 tag, idx, i, npods);
1128 continue;
1129 }
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001130 ulp_mem_io_set_hdr(skb, pm_addr);
1131 skb->priority = CPL_PRIORITY_CONTROL;
1132 cxgb3_ofld_send(cdev->lldev, skb);
1133 }
1134}
1135
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001136static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1137 unsigned int tid, int pg_idx, bool reply)
1138{
kxie@chelsio.com24d3f952010-09-23 16:43:23 -07001139 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001140 GFP_KERNEL);
1141 struct cpl_set_tcb_field *req;
1142 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
1143
1144 log_debug(1 << CXGBI_DBG_DDP,
1145 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1146 if (!skb)
1147 return -ENOMEM;
1148
1149 /* set up ulp submode and page size */
1150 req = (struct cpl_set_tcb_field *)skb->head;
1151 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1152 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1153 req->reply = V_NO_REPLY(reply ? 0 : 1);
1154 req->cpu_idx = 0;
1155 req->word = htons(31);
1156 req->mask = cpu_to_be64(0xF0000000);
1157 req->val = cpu_to_be64(val << 28);
1158 skb->priority = CPL_PRIORITY_CONTROL;
1159
1160 cxgb3_ofld_send(csk->cdev->lldev, skb);
1161 return 0;
1162}
1163
1164/**
1165 * cxgb3i_setup_conn_digest - setup conn. digest setting
1166 * @csk: cxgb tcp socket
1167 * @tid: connection id
1168 * @hcrc: header digest enabled
1169 * @dcrc: data digest enabled
1170 * @reply: request reply from h/w
1171 * set up the iscsi digest settings for a connection identified by tid
1172 */
1173static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1174 int hcrc, int dcrc, int reply)
1175{
kxie@chelsio.com24d3f952010-09-23 16:43:23 -07001176 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001177 GFP_KERNEL);
1178 struct cpl_set_tcb_field *req;
1179 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
1180
1181 log_debug(1 << CXGBI_DBG_DDP,
1182 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1183 if (!skb)
1184 return -ENOMEM;
1185
1186 /* set up ulp submode and page size */
1187 req = (struct cpl_set_tcb_field *)skb->head;
1188 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1189 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1190 req->reply = V_NO_REPLY(reply ? 0 : 1);
1191 req->cpu_idx = 0;
1192 req->word = htons(31);
1193 req->mask = cpu_to_be64(0x0F000000);
1194 req->val = cpu_to_be64(val << 24);
1195 skb->priority = CPL_PRIORITY_CONTROL;
1196
1197 cxgb3_ofld_send(csk->cdev->lldev, skb);
1198 return 0;
1199}
1200
1201/**
1202 * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource
1203 * @cdev: cxgb3i adapter
1204 * release all the resource held by the ddp pagepod manager for a given
1205 * adapter if needed
1206 */
1207
1208static void t3_ddp_cleanup(struct cxgbi_device *cdev)
1209{
1210 struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1211
1212 if (cxgbi_ddp_cleanup(cdev)) {
1213 pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev);
1214 tdev->ulp_iscsi = NULL;
1215 }
1216}
1217
1218/**
1219 * ddp_init - initialize the cxgb3 adapter's ddp resource
1220 * @cdev: cxgb3i adapter
1221 * initialize the ddp pagepod manager for a given adapter
1222 */
1223static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
1224{
1225 struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1226 struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi;
1227 struct ulp_iscsi_info uinfo;
1228 unsigned int pgsz_factor[4];
Karen Xiec682d602011-06-17 16:10:32 -07001229 int i, err;
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001230
1231 if (ddp) {
1232 kref_get(&ddp->refcnt);
1233 pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
1234 tdev, tdev->ulp_iscsi);
1235 cdev->ddp = ddp;
1236 return -EALREADY;
1237 }
1238
1239 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
1240 if (err < 0) {
1241 pr_err("%s, failed to get iscsi param err=%d.\n",
1242 tdev->name, err);
1243 return err;
1244 }
1245
1246 err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit,
1247 uinfo.max_txsz, uinfo.max_rxsz);
1248 if (err < 0)
1249 return err;
1250
1251 ddp = cdev->ddp;
1252
1253 uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1254 cxgbi_ddp_page_size_factor(pgsz_factor);
Karen Xiec682d602011-06-17 16:10:32 -07001255 for (i = 0; i < 4; i++)
1256 uinfo.pgsz_factor[i] = pgsz_factor[i];
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001257 uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT);
1258
1259 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
1260 if (err < 0) {
1261 pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n",
1262 tdev->name, err);
1263 cxgbi_ddp_cleanup(cdev);
1264 return err;
1265 }
1266 tdev->ulp_iscsi = ddp;
1267
kxie@chelsio.com6f7efaa2010-08-16 20:55:53 -07001268 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1269 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1270 cdev->csk_ddp_set = ddp_set_map;
1271 cdev->csk_ddp_clear = ddp_clear_map;
1272
1273 pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1274 "%u/%u.\n",
1275 tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1276 ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
1277 ddp->max_rxsz, uinfo.max_rxsz);
1278 return 0;
1279}
1280
1281static void cxgb3i_dev_close(struct t3cdev *t3dev)
1282{
1283 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1284
1285 if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) {
1286 pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0);
1287 return;
1288 }
1289
1290 cxgbi_device_unregister(cdev);
1291}
1292
1293/**
1294 * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings
1295 * @t3dev: t3cdev adapter
1296 */
1297static void cxgb3i_dev_open(struct t3cdev *t3dev)
1298{
1299 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1300 struct adapter *adapter = tdev2adap(t3dev);
1301 int i, err;
1302
1303 if (cdev) {
1304 pr_info("0x%p, updating.\n", cdev);
1305 return;
1306 }
1307
1308 cdev = cxgbi_device_register(0, adapter->params.nports);
1309 if (!cdev) {
1310 pr_warn("device 0x%p register failed.\n", t3dev);
1311 return;
1312 }
1313
1314 cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET;
1315 cdev->lldev = t3dev;
1316 cdev->pdev = adapter->pdev;
1317 cdev->ports = adapter->port;
1318 cdev->nports = adapter->params.nports;
1319 cdev->mtus = adapter->params.mtus;
1320 cdev->nmtus = NMTUS;
1321 cdev->snd_win = cxgb3i_snd_win;
1322 cdev->rcv_win = cxgb3i_rcv_win;
1323 cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
1324 cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
1325 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
1326 cdev->dev_ddp_cleanup = t3_ddp_cleanup;
1327 cdev->itp = &cxgb3i_iscsi_transport;
1328
1329 err = cxgb3i_ddp_init(cdev);
1330 if (err) {
1331 pr_info("0x%p ddp init failed\n", cdev);
1332 goto err_out;
1333 }
1334
1335 err = cxgb3i_ofld_init(cdev);
1336 if (err) {
1337 pr_info("0x%p offload init failed\n", cdev);
1338 goto err_out;
1339 }
1340
1341 err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN,
1342 &cxgb3i_host_template, cxgb3i_stt);
1343 if (err)
1344 goto err_out;
1345
1346 for (i = 0; i < cdev->nports; i++)
1347 cdev->hbas[i]->ipv4addr =
1348 cxgb3i_get_private_ipv4addr(cdev->ports[i]);
1349
1350 pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
1351 cdev, cdev ? cdev->flags : 0, t3dev, err);
1352 return;
1353
1354err_out:
1355 cxgbi_device_unregister(cdev);
1356}
1357
1358static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port)
1359{
1360 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1361
1362 log_debug(1 << CXGBI_DBG_TOE,
1363 "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
1364 t3dev, cdev, event, port);
1365 if (!cdev)
1366 return;
1367
1368 switch (event) {
1369 case OFFLOAD_STATUS_DOWN:
1370 cdev->flags |= CXGBI_FLAG_ADAPTER_RESET;
1371 break;
1372 case OFFLOAD_STATUS_UP:
1373 cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET;
1374 break;
1375 }
1376}
1377
1378/**
1379 * cxgb3i_init_module - module init entry point
1380 *
1381 * initialize any driver wide global data structures and register itself
1382 * with the cxgb3 module
1383 */
1384static int __init cxgb3i_init_module(void)
1385{
1386 int rc;
1387
1388 printk(KERN_INFO "%s", version);
1389
1390 rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1391 if (rc < 0)
1392 return rc;
1393
1394 cxgb3_register_client(&t3_client);
1395 return 0;
1396}
1397
1398/**
1399 * cxgb3i_exit_module - module cleanup/exit entry point
1400 *
1401 * go through the driver hba list and for each hba, release any resource held.
1402 * and unregisters iscsi transport and the cxgb3 module
1403 */
1404static void __exit cxgb3i_exit_module(void)
1405{
1406 cxgb3_unregister_client(&t3_client);
1407 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3);
1408 cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1409}
1410
1411module_init(cxgb3i_init_module);
1412module_exit(cxgb3i_exit_module);