Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Oracle. All rights reserved. |
| 3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | /* Lightweight memory registration using Fast Registration Work |
| 7 | * Requests (FRWR). Also referred to sometimes as FRMR mode. |
| 8 | * |
| 9 | * FRWR features ordered asynchronous registration and deregistration |
| 10 | * of arbitrarily sized memory regions. This is the fastest and safest |
| 11 | * but most complex memory registration mode. |
| 12 | */ |
| 13 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 14 | /* Normal operation |
| 15 | * |
| 16 | * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG |
| 17 | * Work Request (frmr_op_map). When the RDMA operation is finished, this |
| 18 | * Memory Region is invalidated using a LOCAL_INV Work Request |
| 19 | * (frmr_op_unmap). |
| 20 | * |
| 21 | * Typically these Work Requests are not signaled, and neither are RDMA |
| 22 | * SEND Work Requests (with the exception of signaling occasionally to |
| 23 | * prevent provider work queue overflows). This greatly reduces HCA |
| 24 | * interrupt workload. |
| 25 | * |
| 26 | * As an optimization, frwr_op_unmap marks MRs INVALID before the |
| 27 | * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on |
| 28 | * rb_mws immediately so that no work (like managing a linked list |
| 29 | * under a spinlock) is needed in the completion upcall. |
| 30 | * |
| 31 | * But this means that frwr_op_map() can occasionally encounter an MR |
| 32 | * that is INVALID but the LOCAL_INV WR has not completed. Work Queue |
| 33 | * ordering prevents a subsequent FAST_REG WR from executing against |
| 34 | * that MR while it is still being invalidated. |
| 35 | */ |
| 36 | |
| 37 | /* Transport recovery |
| 38 | * |
| 39 | * ->op_map and the transport connect worker cannot run at the same |
| 40 | * time, but ->op_unmap can fire while the transport connect worker |
| 41 | * is running. Thus MR recovery is handled in ->op_map, to guarantee |
| 42 | * that recovered MRs are owned by a sending RPC, and not one where |
| 43 | * ->op_unmap could fire at the same time transport reconnect is |
| 44 | * being done. |
| 45 | * |
| 46 | * When the underlying transport disconnects, MRs are left in one of |
| 47 | * three states: |
| 48 | * |
| 49 | * INVALID: The MR was not in use before the QP entered ERROR state. |
| 50 | * (Or, the LOCAL_INV WR has not completed or flushed yet). |
| 51 | * |
| 52 | * STALE: The MR was being registered or unregistered when the QP |
| 53 | * entered ERROR state, and the pending WR was flushed. |
| 54 | * |
| 55 | * VALID: The MR was registered before the QP entered ERROR state. |
| 56 | * |
| 57 | * When frwr_op_map encounters STALE and VALID MRs, they are recovered |
| 58 | * with ib_dereg_mr and then are re-initialized. Beause MR recovery |
| 59 | * allocates fresh resources, it is deferred to a workqueue, and the |
| 60 | * recovered MRs are placed back on the rb_mws list when recovery is |
| 61 | * complete. frwr_op_map allocates another MR for the current RPC while |
| 62 | * the broken MR is reset. |
| 63 | * |
| 64 | * To ensure that frwr_op_map doesn't encounter an MR that is marked |
| 65 | * INVALID but that is about to be flushed due to a previous transport |
| 66 | * disconnect, the transport connect worker attempts to drain all |
| 67 | * pending send queue WRs before the transport is reconnected. |
| 68 | */ |
| 69 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 70 | #include "xprt_rdma.h" |
| 71 | |
| 72 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 73 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 74 | #endif |
| 75 | |
Chuck Lever | 951e721 | 2015-05-26 11:52:25 -0400 | [diff] [blame] | 76 | static struct workqueue_struct *frwr_recovery_wq; |
| 77 | |
| 78 | #define FRWR_RECOVERY_WQ_FLAGS (WQ_UNBOUND | WQ_MEM_RECLAIM) |
| 79 | |
| 80 | int |
| 81 | frwr_alloc_recovery_wq(void) |
| 82 | { |
| 83 | frwr_recovery_wq = alloc_workqueue("frwr_recovery", |
| 84 | FRWR_RECOVERY_WQ_FLAGS, 0); |
| 85 | return !frwr_recovery_wq ? -ENOMEM : 0; |
| 86 | } |
| 87 | |
| 88 | void |
| 89 | frwr_destroy_recovery_wq(void) |
| 90 | { |
| 91 | struct workqueue_struct *wq; |
| 92 | |
| 93 | if (!frwr_recovery_wq) |
| 94 | return; |
| 95 | |
| 96 | wq = frwr_recovery_wq; |
| 97 | frwr_recovery_wq = NULL; |
| 98 | destroy_workqueue(wq); |
| 99 | } |
| 100 | |
| 101 | /* Deferred reset of a single FRMR. Generate a fresh rkey by |
| 102 | * replacing the MR. |
| 103 | * |
| 104 | * There's no recovery if this fails. The FRMR is abandoned, but |
| 105 | * remains in rb_all. It will be cleaned up when the transport is |
| 106 | * destroyed. |
| 107 | */ |
| 108 | static void |
| 109 | __frwr_recovery_worker(struct work_struct *work) |
| 110 | { |
| 111 | struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw, |
| 112 | r.frmr.fr_work); |
| 113 | struct rpcrdma_xprt *r_xprt = r->r.frmr.fr_xprt; |
| 114 | unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; |
| 115 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; |
| 116 | |
| 117 | if (ib_dereg_mr(r->r.frmr.fr_mr)) |
| 118 | goto out_fail; |
| 119 | |
Sagi Grimberg | 0410e38 | 2015-07-30 10:32:39 +0300 | [diff] [blame] | 120 | r->r.frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth); |
Chuck Lever | 951e721 | 2015-05-26 11:52:25 -0400 | [diff] [blame] | 121 | if (IS_ERR(r->r.frmr.fr_mr)) |
| 122 | goto out_fail; |
| 123 | |
| 124 | dprintk("RPC: %s: recovered FRMR %p\n", __func__, r); |
| 125 | r->r.frmr.fr_state = FRMR_IS_INVALID; |
| 126 | rpcrdma_put_mw(r_xprt, r); |
| 127 | return; |
| 128 | |
| 129 | out_fail: |
| 130 | pr_warn("RPC: %s: FRMR %p unrecovered\n", |
| 131 | __func__, r); |
| 132 | } |
| 133 | |
| 134 | /* A broken MR was discovered in a context that can't sleep. |
| 135 | * Defer recovery to the recovery worker. |
| 136 | */ |
| 137 | static void |
| 138 | __frwr_queue_recovery(struct rpcrdma_mw *r) |
| 139 | { |
| 140 | INIT_WORK(&r->r.frmr.fr_work, __frwr_recovery_worker); |
| 141 | queue_work(frwr_recovery_wq, &r->r.frmr.fr_work); |
| 142 | } |
| 143 | |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 144 | static int |
| 145 | __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device, |
| 146 | unsigned int depth) |
| 147 | { |
| 148 | struct rpcrdma_frmr *f = &r->r.frmr; |
| 149 | int rc; |
| 150 | |
Sagi Grimberg | 0410e38 | 2015-07-30 10:32:39 +0300 | [diff] [blame] | 151 | f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth); |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 152 | if (IS_ERR(f->fr_mr)) |
| 153 | goto out_mr_err; |
| 154 | f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth); |
| 155 | if (IS_ERR(f->fr_pgl)) |
| 156 | goto out_list_err; |
| 157 | return 0; |
| 158 | |
| 159 | out_mr_err: |
| 160 | rc = PTR_ERR(f->fr_mr); |
Sagi Grimberg | 0410e38 | 2015-07-30 10:32:39 +0300 | [diff] [blame] | 161 | dprintk("RPC: %s: ib_alloc_mr status %i\n", |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 162 | __func__, rc); |
| 163 | return rc; |
| 164 | |
| 165 | out_list_err: |
| 166 | rc = PTR_ERR(f->fr_pgl); |
| 167 | dprintk("RPC: %s: ib_alloc_fast_reg_page_list status %i\n", |
| 168 | __func__, rc); |
| 169 | ib_dereg_mr(f->fr_mr); |
| 170 | return rc; |
| 171 | } |
| 172 | |
Chuck Lever | 31a701a | 2015-03-30 14:35:07 -0400 | [diff] [blame] | 173 | static void |
| 174 | __frwr_release(struct rpcrdma_mw *r) |
| 175 | { |
| 176 | int rc; |
| 177 | |
| 178 | rc = ib_dereg_mr(r->r.frmr.fr_mr); |
| 179 | if (rc) |
| 180 | dprintk("RPC: %s: ib_dereg_mr status %i\n", |
| 181 | __func__, rc); |
| 182 | ib_free_fast_reg_page_list(r->r.frmr.fr_pgl); |
| 183 | } |
| 184 | |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 185 | static int |
| 186 | frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
| 187 | struct rpcrdma_create_data_internal *cdata) |
| 188 | { |
| 189 | struct ib_device_attr *devattr = &ia->ri_devattr; |
| 190 | int depth, delta; |
| 191 | |
Chuck Lever | d1ed857 | 2015-08-03 13:03:30 -0400 | [diff] [blame] | 192 | /* Obtain an lkey to use for the regbufs, which are |
| 193 | * protected from remote access. |
| 194 | */ |
| 195 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; |
| 196 | |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 197 | ia->ri_max_frmr_depth = |
| 198 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
| 199 | devattr->max_fast_reg_page_list_len); |
| 200 | dprintk("RPC: %s: device's max FR page list len = %u\n", |
| 201 | __func__, ia->ri_max_frmr_depth); |
| 202 | |
| 203 | /* Add room for frmr register and invalidate WRs. |
| 204 | * 1. FRMR reg WR for head |
| 205 | * 2. FRMR invalidate WR for head |
| 206 | * 3. N FRMR reg WRs for pagelist |
| 207 | * 4. N FRMR invalidate WRs for pagelist |
| 208 | * 5. FRMR reg WR for tail |
| 209 | * 6. FRMR invalidate WR for tail |
| 210 | * 7. The RDMA_SEND WR |
| 211 | */ |
| 212 | depth = 7; |
| 213 | |
| 214 | /* Calculate N if the device max FRMR depth is smaller than |
| 215 | * RPCRDMA_MAX_DATA_SEGS. |
| 216 | */ |
| 217 | if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { |
| 218 | delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; |
| 219 | do { |
| 220 | depth += 2; /* FRMR reg + invalidate */ |
| 221 | delta -= ia->ri_max_frmr_depth; |
| 222 | } while (delta > 0); |
| 223 | } |
| 224 | |
| 225 | ep->rep_attr.cap.max_send_wr *= depth; |
| 226 | if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) { |
| 227 | cdata->max_requests = devattr->max_qp_wr / depth; |
| 228 | if (!cdata->max_requests) |
| 229 | return -EINVAL; |
| 230 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * |
| 231 | depth; |
| 232 | } |
| 233 | |
| 234 | return 0; |
| 235 | } |
| 236 | |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 237 | /* FRWR mode conveys a list of pages per chunk segment. The |
| 238 | * maximum length of that list is the FRWR page list depth. |
| 239 | */ |
| 240 | static size_t |
| 241 | frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) |
| 242 | { |
| 243 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 244 | |
| 245 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
| 246 | rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth); |
| 247 | } |
| 248 | |
Chuck Lever | e46ac34 | 2015-03-30 14:35:35 -0400 | [diff] [blame] | 249 | /* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */ |
| 250 | static void |
| 251 | frwr_sendcompletion(struct ib_wc *wc) |
| 252 | { |
| 253 | struct rpcrdma_mw *r; |
| 254 | |
| 255 | if (likely(wc->status == IB_WC_SUCCESS)) |
| 256 | return; |
| 257 | |
| 258 | /* WARNING: Only wr_id and status are reliable at this point */ |
| 259 | r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; |
Linus Torvalds | 8688d95 | 2015-07-02 11:32:23 -0700 | [diff] [blame] | 260 | pr_warn("RPC: %s: frmr %p flushed, status %s (%d)\n", |
Sagi Grimberg | 76357c7 | 2015-05-18 13:40:32 +0300 | [diff] [blame] | 261 | __func__, r, ib_wc_status_msg(wc->status), wc->status); |
Chuck Lever | e46ac34 | 2015-03-30 14:35:35 -0400 | [diff] [blame] | 262 | r->r.frmr.fr_state = FRMR_IS_STALE; |
| 263 | } |
| 264 | |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 265 | static int |
| 266 | frwr_op_init(struct rpcrdma_xprt *r_xprt) |
| 267 | { |
| 268 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 269 | struct ib_device *device = r_xprt->rx_ia.ri_device; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 270 | unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; |
| 271 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; |
| 272 | int i; |
| 273 | |
Chuck Lever | 58d1dcf | 2015-05-26 11:53:13 -0400 | [diff] [blame] | 274 | spin_lock_init(&buf->rb_mwlock); |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 275 | INIT_LIST_HEAD(&buf->rb_mws); |
| 276 | INIT_LIST_HEAD(&buf->rb_all); |
| 277 | |
Chuck Lever | 40c6ed0 | 2015-05-26 11:53:33 -0400 | [diff] [blame] | 278 | i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1); |
| 279 | i += 2; /* head + tail */ |
| 280 | i *= buf->rb_max_requests; /* one set for each RPC slot */ |
| 281 | dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i); |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 282 | |
| 283 | while (i--) { |
| 284 | struct rpcrdma_mw *r; |
| 285 | int rc; |
| 286 | |
| 287 | r = kzalloc(sizeof(*r), GFP_KERNEL); |
| 288 | if (!r) |
| 289 | return -ENOMEM; |
| 290 | |
| 291 | rc = __frwr_init(r, pd, device, depth); |
| 292 | if (rc) { |
| 293 | kfree(r); |
| 294 | return rc; |
| 295 | } |
| 296 | |
| 297 | list_add(&r->mw_list, &buf->rb_mws); |
| 298 | list_add(&r->mw_all, &buf->rb_all); |
Chuck Lever | e46ac34 | 2015-03-30 14:35:35 -0400 | [diff] [blame] | 299 | r->mw_sendcompletion = frwr_sendcompletion; |
Chuck Lever | 951e721 | 2015-05-26 11:52:25 -0400 | [diff] [blame] | 300 | r->r.frmr.fr_xprt = r_xprt; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 301 | } |
| 302 | |
| 303 | return 0; |
| 304 | } |
| 305 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 306 | /* Post a FAST_REG Work Request to register a memory region |
| 307 | * for remote access via RDMA READ or RDMA WRITE. |
| 308 | */ |
| 309 | static int |
| 310 | frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, |
| 311 | int nsegs, bool writing) |
| 312 | { |
| 313 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 314 | struct ib_device *device = ia->ri_device; |
Chuck Lever | d654788 | 2015-03-30 14:35:44 -0400 | [diff] [blame] | 315 | enum dma_data_direction direction = rpcrdma_data_dir(writing); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 316 | struct rpcrdma_mr_seg *seg1 = seg; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 317 | struct rpcrdma_mw *mw; |
| 318 | struct rpcrdma_frmr *frmr; |
| 319 | struct ib_mr *mr; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 320 | struct ib_send_wr fastreg_wr, *bad_wr; |
| 321 | u8 key; |
| 322 | int len, pageoff; |
| 323 | int i, rc; |
| 324 | int seg_len; |
| 325 | u64 pa; |
| 326 | int page_no; |
| 327 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 328 | mw = seg1->rl_mw; |
| 329 | seg1->rl_mw = NULL; |
| 330 | do { |
| 331 | if (mw) |
| 332 | __frwr_queue_recovery(mw); |
| 333 | mw = rpcrdma_get_mw(r_xprt); |
| 334 | if (!mw) |
| 335 | return -ENOMEM; |
| 336 | } while (mw->r.frmr.fr_state != FRMR_IS_INVALID); |
| 337 | frmr = &mw->r.frmr; |
| 338 | frmr->fr_state = FRMR_IS_VALID; |
| 339 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 340 | pageoff = offset_in_page(seg1->mr_offset); |
| 341 | seg1->mr_offset -= pageoff; /* start of page */ |
| 342 | seg1->mr_len += pageoff; |
| 343 | len = -pageoff; |
| 344 | if (nsegs > ia->ri_max_frmr_depth) |
| 345 | nsegs = ia->ri_max_frmr_depth; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 346 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 347 | for (page_no = i = 0; i < nsegs;) { |
Chuck Lever | d654788 | 2015-03-30 14:35:44 -0400 | [diff] [blame] | 348 | rpcrdma_map_one(device, seg, direction); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 349 | pa = seg->mr_dma; |
| 350 | for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) { |
| 351 | frmr->fr_pgl->page_list[page_no++] = pa; |
| 352 | pa += PAGE_SIZE; |
| 353 | } |
| 354 | len += seg->mr_len; |
| 355 | ++seg; |
| 356 | ++i; |
| 357 | /* Check for holes */ |
| 358 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || |
| 359 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) |
| 360 | break; |
| 361 | } |
| 362 | dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n", |
| 363 | __func__, mw, i, len); |
| 364 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 365 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); |
| 366 | fastreg_wr.wr_id = (unsigned long)(void *)mw; |
| 367 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; |
| 368 | fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff; |
| 369 | fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl; |
| 370 | fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; |
| 371 | fastreg_wr.wr.fast_reg.page_list_len = page_no; |
| 372 | fastreg_wr.wr.fast_reg.length = len; |
| 373 | fastreg_wr.wr.fast_reg.access_flags = writing ? |
| 374 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : |
| 375 | IB_ACCESS_REMOTE_READ; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 376 | mr = frmr->fr_mr; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 377 | key = (u8)(mr->rkey & 0x000000FF); |
| 378 | ib_update_fast_reg_key(mr, ++key); |
| 379 | fastreg_wr.wr.fast_reg.rkey = mr->rkey; |
| 380 | |
| 381 | DECR_CQCOUNT(&r_xprt->rx_ep); |
| 382 | rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr); |
| 383 | if (rc) |
| 384 | goto out_senderr; |
| 385 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 386 | seg1->rl_mw = mw; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 387 | seg1->mr_rkey = mr->rkey; |
| 388 | seg1->mr_base = seg1->mr_dma + pageoff; |
| 389 | seg1->mr_nsegs = i; |
| 390 | seg1->mr_len = len; |
| 391 | return i; |
| 392 | |
| 393 | out_senderr: |
| 394 | dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 395 | while (i--) |
Chuck Lever | d654788 | 2015-03-30 14:35:44 -0400 | [diff] [blame] | 396 | rpcrdma_unmap_one(device, --seg); |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 397 | __frwr_queue_recovery(mw); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 398 | return rc; |
| 399 | } |
| 400 | |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 401 | /* Post a LOCAL_INV Work Request to prevent further remote access |
| 402 | * via RDMA READ or RDMA WRITE. |
| 403 | */ |
| 404 | static int |
| 405 | frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) |
| 406 | { |
| 407 | struct rpcrdma_mr_seg *seg1 = seg; |
| 408 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 409 | struct rpcrdma_mw *mw = seg1->rl_mw; |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 410 | struct ib_send_wr invalidate_wr, *bad_wr; |
| 411 | int rc, nsegs = seg->mr_nsegs; |
| 412 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 413 | dprintk("RPC: %s: FRMR %p\n", __func__, mw); |
| 414 | |
| 415 | seg1->rl_mw = NULL; |
| 416 | mw->r.frmr.fr_state = FRMR_IS_INVALID; |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 417 | |
| 418 | memset(&invalidate_wr, 0, sizeof(invalidate_wr)); |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 419 | invalidate_wr.wr_id = (unsigned long)(void *)mw; |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 420 | invalidate_wr.opcode = IB_WR_LOCAL_INV; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 421 | invalidate_wr.ex.invalidate_rkey = mw->r.frmr.fr_mr->rkey; |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 422 | DECR_CQCOUNT(&r_xprt->rx_ep); |
| 423 | |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 424 | while (seg1->mr_nsegs--) |
Chuck Lever | 89e0d112 | 2015-05-26 11:51:56 -0400 | [diff] [blame] | 425 | rpcrdma_unmap_one(ia->ri_device, seg++); |
| 426 | read_lock(&ia->ri_qplock); |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 427 | rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); |
| 428 | read_unlock(&ia->ri_qplock); |
| 429 | if (rc) |
| 430 | goto out_err; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 431 | |
| 432 | rpcrdma_put_mw(r_xprt, mw); |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 433 | return nsegs; |
| 434 | |
| 435 | out_err: |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 436 | dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 437 | __frwr_queue_recovery(mw); |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 438 | return nsegs; |
| 439 | } |
| 440 | |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 441 | static void |
| 442 | frwr_op_destroy(struct rpcrdma_buffer *buf) |
| 443 | { |
| 444 | struct rpcrdma_mw *r; |
| 445 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 446 | /* Ensure stale MWs for "buf" are no longer in flight */ |
| 447 | flush_workqueue(frwr_recovery_wq); |
| 448 | |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 449 | while (!list_empty(&buf->rb_all)) { |
| 450 | r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); |
| 451 | list_del(&r->mw_all); |
| 452 | __frwr_release(r); |
| 453 | kfree(r); |
| 454 | } |
| 455 | } |
| 456 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 457 | const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 458 | .ro_map = frwr_op_map, |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 459 | .ro_unmap = frwr_op_unmap, |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 460 | .ro_open = frwr_op_open, |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 461 | .ro_maxpages = frwr_op_maxpages, |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 462 | .ro_init = frwr_op_init, |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 463 | .ro_destroy = frwr_op_destroy, |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 464 | .ro_displayname = "frwr", |
| 465 | }; |