Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Linux network driver for Brocade Converged Network Adapter. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
| 6 | * published by the Free Software Foundation |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | */ |
| 13 | #ifndef __BNA_H__ |
| 14 | #define __BNA_H__ |
| 15 | |
| 16 | #include "bfa_wc.h" |
| 17 | #include "bfa_ioc.h" |
| 18 | #include "cna.h" |
| 19 | #include "bfi_ll.h" |
| 20 | #include "bna_types.h" |
| 21 | |
Rasesh Mody | b7ee31c5 | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 22 | extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 23 | |
| 24 | /** |
| 25 | * |
| 26 | * Macros and constants |
| 27 | * |
| 28 | */ |
| 29 | |
| 30 | #define BNA_IOC_TIMER_FREQ 200 |
| 31 | |
| 32 | /* Log string size */ |
| 33 | #define BNA_MESSAGE_SIZE 256 |
| 34 | |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 35 | /* MBOX API for PORT, TX, RX */ |
| 36 | #define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \ |
| 37 | do { \ |
| 38 | memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len)); \ |
| 39 | (_qe)->cbfn = (_cbfn); \ |
| 40 | (_qe)->cbarg = (_cbarg); \ |
| 41 | } while (0) |
| 42 | |
| 43 | #define bna_is_small_rxq(rcb) ((rcb)->id == 1) |
| 44 | |
| 45 | #define BNA_MAC_IS_EQUAL(_mac1, _mac2) \ |
| 46 | (!memcmp((_mac1), (_mac2), sizeof(mac_t))) |
| 47 | |
| 48 | #define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0) |
| 49 | |
| 50 | #define BNA_TO_POWER_OF_2(x) \ |
| 51 | do { \ |
| 52 | int _shift = 0; \ |
| 53 | while ((x) && (x) != 1) { \ |
| 54 | (x) >>= 1; \ |
| 55 | _shift++; \ |
| 56 | } \ |
| 57 | (x) <<= _shift; \ |
| 58 | } while (0) |
| 59 | |
| 60 | #define BNA_TO_POWER_OF_2_HIGH(x) \ |
| 61 | do { \ |
| 62 | int n = 1; \ |
| 63 | while (n < (x)) \ |
| 64 | n <<= 1; \ |
| 65 | (x) = n; \ |
| 66 | } while (0) |
| 67 | |
| 68 | /* |
| 69 | * input : _addr-> os dma addr in host endian format, |
| 70 | * output : _bna_dma_addr-> pointer to hw dma addr |
| 71 | */ |
| 72 | #define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \ |
| 73 | do { \ |
| 74 | u64 tmp_addr = \ |
| 75 | cpu_to_be64((u64)(_addr)); \ |
| 76 | (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \ |
| 77 | (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \ |
| 78 | } while (0) |
| 79 | |
| 80 | /* |
| 81 | * input : _bna_dma_addr-> pointer to hw dma addr |
| 82 | * output : _addr-> os dma addr in host endian format |
| 83 | */ |
| 84 | #define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \ |
| 85 | do { \ |
| 86 | (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \ |
| 87 | | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \ |
| 88 | } while (0) |
| 89 | |
| 90 | #define containing_rec(addr, type, field) \ |
| 91 | ((type *)((unsigned char *)(addr) - \ |
| 92 | (unsigned char *)(&((type *)0)->field))) |
| 93 | |
| 94 | #define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2) |
| 95 | |
| 96 | /* TxQ element is 64 bytes */ |
| 97 | #define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6) |
| 98 | #define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6) |
| 99 | |
| 100 | #define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ |
| 101 | { \ |
| 102 | unsigned int page_index; /* index within a page */ \ |
| 103 | void *page_addr; \ |
| 104 | page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \ |
| 105 | (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \ |
| 106 | page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\ |
| 107 | (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \ |
| 108 | } |
| 109 | |
| 110 | /* RxQ element is 8 bytes */ |
| 111 | #define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3) |
| 112 | #define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3) |
| 113 | |
| 114 | #define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ |
| 115 | { \ |
| 116 | unsigned int page_index; /* index within a page */ \ |
| 117 | void *page_addr; \ |
| 118 | page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \ |
| 119 | (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \ |
| 120 | page_addr = (_qpt_ptr)[((_qe_idx) >> \ |
| 121 | BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \ |
| 122 | (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \ |
| 123 | } |
| 124 | |
| 125 | /* CQ element is 16 bytes */ |
| 126 | #define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4) |
| 127 | #define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4) |
| 128 | |
| 129 | #define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ |
| 130 | { \ |
| 131 | unsigned int page_index; /* index within a page */ \ |
| 132 | void *page_addr; \ |
| 133 | \ |
| 134 | page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \ |
| 135 | (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \ |
| 136 | page_addr = (_qpt_ptr)[((_qe_idx) >> \ |
| 137 | BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \ |
| 138 | (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\ |
| 139 | } |
| 140 | |
| 141 | #define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \ |
| 142 | (&((_cast *)(_q_base))[(_qe_idx)]) |
| 143 | |
| 144 | #define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx)) |
| 145 | |
| 146 | #define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \ |
| 147 | ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1)) |
| 148 | |
| 149 | #define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \ |
| 150 | (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1)) |
| 151 | |
| 152 | #define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \ |
| 153 | (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \ |
| 154 | ((_q_depth) - 1)) |
| 155 | |
| 156 | #define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \ |
| 157 | ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \ |
| 158 | (_q_depth - 1)) |
| 159 | |
| 160 | #define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index) |
| 161 | |
| 162 | #define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index) |
| 163 | |
| 164 | #define BNA_Q_PI_ADD(_q_ptr, _num) \ |
| 165 | (_q_ptr)->q.producer_index = \ |
| 166 | (((_q_ptr)->q.producer_index + (_num)) & \ |
| 167 | ((_q_ptr)->q.q_depth - 1)) |
| 168 | |
| 169 | #define BNA_Q_CI_ADD(_q_ptr, _num) \ |
| 170 | (_q_ptr)->q.consumer_index = \ |
| 171 | (((_q_ptr)->q.consumer_index + (_num)) \ |
| 172 | & ((_q_ptr)->q.q_depth - 1)) |
| 173 | |
| 174 | #define BNA_Q_FREE_COUNT(_q_ptr) \ |
| 175 | (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth)) |
| 176 | |
| 177 | #define BNA_Q_IN_USE_COUNT(_q_ptr) \ |
| 178 | (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth)) |
| 179 | |
| 180 | /* These macros build the data portion of the TxQ/RxQ doorbell */ |
| 181 | #define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi)) |
| 182 | #define BNA_DOORBELL_Q_STOP (0x40000000) |
| 183 | |
| 184 | /* These macros build the data portion of the IB doorbell */ |
| 185 | #define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \ |
| 186 | (0x80000000 | ((_timeout) << 16) | (_events)) |
| 187 | #define BNA_DOORBELL_IB_INT_DISABLE (0x40000000) |
| 188 | |
| 189 | /* Set the coalescing timer for the given ib */ |
| 190 | #define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \ |
| 191 | ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0)); |
| 192 | |
| 193 | /* Acks 'events' # of events for a given ib */ |
| 194 | #define bna_ib_ack(_i_dbell, _events) \ |
| 195 | (writel(((_i_dbell)->doorbell_ack | (_events)), \ |
| 196 | (_i_dbell)->doorbell_addr)); |
| 197 | |
| 198 | #define bna_txq_prod_indx_doorbell(_tcb) \ |
| 199 | (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \ |
| 200 | (_tcb)->q_dbell)); |
| 201 | |
| 202 | #define bna_rxq_prod_indx_doorbell(_rcb) \ |
| 203 | (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ |
| 204 | (_rcb)->q_dbell)); |
| 205 | |
| 206 | #define BNA_LARGE_PKT_SIZE 1000 |
| 207 | |
| 208 | #define BNA_UPDATE_PKT_CNT(_pkt, _len) \ |
| 209 | do { \ |
| 210 | if ((_len) > BNA_LARGE_PKT_SIZE) { \ |
| 211 | (_pkt)->large_pkt_cnt++; \ |
| 212 | } else { \ |
| 213 | (_pkt)->small_pkt_cnt++; \ |
| 214 | } \ |
| 215 | } while (0) |
| 216 | |
| 217 | #define call_rxf_stop_cbfn(rxf, status) \ |
| 218 | if ((rxf)->stop_cbfn) { \ |
| 219 | (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \ |
| 220 | (rxf)->stop_cbfn = NULL; \ |
| 221 | (rxf)->stop_cbarg = NULL; \ |
| 222 | } |
| 223 | |
| 224 | #define call_rxf_start_cbfn(rxf, status) \ |
| 225 | if ((rxf)->start_cbfn) { \ |
| 226 | (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \ |
| 227 | (rxf)->start_cbfn = NULL; \ |
| 228 | (rxf)->start_cbarg = NULL; \ |
| 229 | } |
| 230 | |
| 231 | #define call_rxf_cam_fltr_cbfn(rxf, status) \ |
| 232 | if ((rxf)->cam_fltr_cbfn) { \ |
| 233 | (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \ |
| 234 | (status)); \ |
| 235 | (rxf)->cam_fltr_cbfn = NULL; \ |
| 236 | (rxf)->cam_fltr_cbarg = NULL; \ |
| 237 | } |
| 238 | |
| 239 | #define call_rxf_pause_cbfn(rxf, status) \ |
| 240 | if ((rxf)->oper_state_cbfn) { \ |
| 241 | (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\ |
| 242 | (status)); \ |
| 243 | (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \ |
| 244 | (rxf)->oper_state_cbfn = NULL; \ |
| 245 | (rxf)->oper_state_cbarg = NULL; \ |
| 246 | } |
| 247 | |
| 248 | #define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status) |
| 249 | |
| 250 | #define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx)) |
| 251 | |
| 252 | #define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx)) |
| 253 | |
| 254 | #define xxx_enable(mode, bitmask, xxx) \ |
| 255 | do { \ |
| 256 | bitmask |= xxx; \ |
| 257 | mode |= xxx; \ |
| 258 | } while (0) |
| 259 | |
| 260 | #define xxx_disable(mode, bitmask, xxx) \ |
| 261 | do { \ |
| 262 | bitmask |= xxx; \ |
| 263 | mode &= ~xxx; \ |
| 264 | } while (0) |
| 265 | |
| 266 | #define xxx_inactive(mode, bitmask, xxx) \ |
| 267 | do { \ |
| 268 | bitmask &= ~xxx; \ |
| 269 | mode &= ~xxx; \ |
| 270 | } while (0) |
| 271 | |
| 272 | #define is_promisc_enable(mode, bitmask) \ |
| 273 | is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) |
| 274 | |
| 275 | #define is_promisc_disable(mode, bitmask) \ |
| 276 | is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) |
| 277 | |
| 278 | #define promisc_enable(mode, bitmask) \ |
| 279 | xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) |
| 280 | |
| 281 | #define promisc_disable(mode, bitmask) \ |
| 282 | xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) |
| 283 | |
| 284 | #define promisc_inactive(mode, bitmask) \ |
| 285 | xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC) |
| 286 | |
| 287 | #define is_default_enable(mode, bitmask) \ |
| 288 | is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) |
| 289 | |
| 290 | #define is_default_disable(mode, bitmask) \ |
| 291 | is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) |
| 292 | |
| 293 | #define default_enable(mode, bitmask) \ |
| 294 | xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) |
| 295 | |
| 296 | #define default_disable(mode, bitmask) \ |
| 297 | xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) |
| 298 | |
| 299 | #define default_inactive(mode, bitmask) \ |
| 300 | xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT) |
| 301 | |
| 302 | #define is_allmulti_enable(mode, bitmask) \ |
| 303 | is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) |
| 304 | |
| 305 | #define is_allmulti_disable(mode, bitmask) \ |
| 306 | is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) |
| 307 | |
| 308 | #define allmulti_enable(mode, bitmask) \ |
| 309 | xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) |
| 310 | |
| 311 | #define allmulti_disable(mode, bitmask) \ |
| 312 | xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) |
| 313 | |
| 314 | #define allmulti_inactive(mode, bitmask) \ |
| 315 | xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI) |
| 316 | |
| 317 | #define GET_RXQS(rxp, q0, q1) do { \ |
| 318 | switch ((rxp)->type) { \ |
| 319 | case BNA_RXP_SINGLE: \ |
| 320 | (q0) = rxp->rxq.single.only; \ |
| 321 | (q1) = NULL; \ |
| 322 | break; \ |
| 323 | case BNA_RXP_SLR: \ |
| 324 | (q0) = rxp->rxq.slr.large; \ |
| 325 | (q1) = rxp->rxq.slr.small; \ |
| 326 | break; \ |
| 327 | case BNA_RXP_HDS: \ |
| 328 | (q0) = rxp->rxq.hds.data; \ |
| 329 | (q1) = rxp->rxq.hds.hdr; \ |
| 330 | break; \ |
| 331 | } \ |
| 332 | } while (0) |
| 333 | |
| 334 | /** |
| 335 | * |
| 336 | * Function prototypes |
| 337 | * |
| 338 | */ |
| 339 | |
| 340 | /** |
| 341 | * BNA |
| 342 | */ |
| 343 | |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 344 | /* APIs for BNAD */ |
| 345 | void bna_res_req(struct bna_res_info *res_info); |
| 346 | void bna_init(struct bna *bna, struct bnad *bnad, |
| 347 | struct bfa_pcidev *pcidev, |
| 348 | struct bna_res_info *res_info); |
| 349 | void bna_uninit(struct bna *bna); |
| 350 | void bna_stats_get(struct bna *bna); |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 351 | void bna_get_perm_mac(struct bna *bna, u8 *mac); |
| 352 | |
| 353 | /* APIs for Rx */ |
| 354 | int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size); |
| 355 | |
| 356 | /* APIs for RxF */ |
| 357 | struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod); |
| 358 | void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, |
| 359 | struct bna_mac *mac); |
| 360 | struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod); |
| 361 | void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, |
| 362 | struct bna_mac *mac); |
| 363 | struct bna_rit_segment * |
| 364 | bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size); |
| 365 | void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod, |
| 366 | struct bna_rit_segment *seg); |
| 367 | |
| 368 | /** |
| 369 | * DEVICE |
| 370 | */ |
| 371 | |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 372 | /* APIs for BNAD */ |
| 373 | void bna_device_enable(struct bna_device *device); |
| 374 | void bna_device_disable(struct bna_device *device, |
| 375 | enum bna_cleanup_type type); |
| 376 | |
| 377 | /** |
| 378 | * MBOX |
| 379 | */ |
| 380 | |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 381 | /* APIs for PORT, TX, RX */ |
| 382 | void bna_mbox_handler(struct bna *bna, u32 intr_status); |
| 383 | void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe); |
| 384 | |
| 385 | /** |
| 386 | * PORT |
| 387 | */ |
| 388 | |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 389 | /* API for RX */ |
| 390 | int bna_port_mtu_get(struct bna_port *port); |
Rasesh Mody | 0613ecf | 2010-12-23 21:45:02 +0000 | [diff] [blame] | 391 | void bna_llport_rx_started(struct bna_llport *llport); |
| 392 | void bna_llport_rx_stopped(struct bna_llport *llport); |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 393 | |
| 394 | /* API for BNAD */ |
| 395 | void bna_port_enable(struct bna_port *port); |
| 396 | void bna_port_disable(struct bna_port *port, enum bna_cleanup_type type, |
| 397 | void (*cbfn)(void *, enum bna_cb_status)); |
| 398 | void bna_port_pause_config(struct bna_port *port, |
| 399 | struct bna_pause_config *pause_config, |
| 400 | void (*cbfn)(struct bnad *, enum bna_cb_status)); |
| 401 | void bna_port_mtu_set(struct bna_port *port, int mtu, |
| 402 | void (*cbfn)(struct bnad *, enum bna_cb_status)); |
| 403 | void bna_port_mac_get(struct bna_port *port, mac_t *mac); |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 404 | |
| 405 | /* Callbacks for TX, RX */ |
| 406 | void bna_port_cb_tx_stopped(struct bna_port *port, |
| 407 | enum bna_cb_status status); |
| 408 | void bna_port_cb_rx_stopped(struct bna_port *port, |
| 409 | enum bna_cb_status status); |
| 410 | |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 411 | /** |
| 412 | * IB |
| 413 | */ |
| 414 | |
| 415 | /* APIs for BNA */ |
| 416 | void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna, |
| 417 | struct bna_res_info *res_info); |
| 418 | void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod); |
| 419 | |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 420 | /** |
| 421 | * TX MODULE AND TX |
| 422 | */ |
| 423 | |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 424 | /* APIs for BNA */ |
| 425 | void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, |
| 426 | struct bna_res_info *res_info); |
| 427 | void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod); |
| 428 | int bna_tx_state_get(struct bna_tx *tx); |
| 429 | |
| 430 | /* APIs for PORT */ |
| 431 | void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type); |
| 432 | void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type); |
| 433 | void bna_tx_mod_fail(struct bna_tx_mod *tx_mod); |
| 434 | void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio); |
| 435 | void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link); |
| 436 | |
| 437 | /* APIs for BNAD */ |
| 438 | void bna_tx_res_req(int num_txq, int txq_depth, |
| 439 | struct bna_res_info *res_info); |
| 440 | struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad, |
| 441 | struct bna_tx_config *tx_cfg, |
| 442 | struct bna_tx_event_cbfn *tx_cbfn, |
| 443 | struct bna_res_info *res_info, void *priv); |
| 444 | void bna_tx_destroy(struct bna_tx *tx); |
| 445 | void bna_tx_enable(struct bna_tx *tx); |
| 446 | void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, |
| 447 | void (*cbfn)(void *, struct bna_tx *, |
| 448 | enum bna_cb_status)); |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 449 | void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); |
| 450 | |
| 451 | /** |
| 452 | * RX MODULE, RX, RXF |
| 453 | */ |
| 454 | |
| 455 | /* Internal APIs */ |
| 456 | void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status); |
| 457 | void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd, |
| 458 | const struct bna_mac *mac_addr); |
| 459 | void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status); |
| 460 | void bna_rxf_adv_init(struct bna_rxf *rxf, |
| 461 | struct bna_rx *rx, |
| 462 | struct bna_rx_config *q_config); |
| 463 | int rxf_process_packet_filter_ucast(struct bna_rxf *rxf); |
| 464 | int rxf_process_packet_filter_promisc(struct bna_rxf *rxf); |
| 465 | int rxf_process_packet_filter_default(struct bna_rxf *rxf); |
| 466 | int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf); |
| 467 | int rxf_clear_packet_filter_ucast(struct bna_rxf *rxf); |
| 468 | int rxf_clear_packet_filter_promisc(struct bna_rxf *rxf); |
| 469 | int rxf_clear_packet_filter_default(struct bna_rxf *rxf); |
| 470 | int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf); |
| 471 | void rxf_reset_packet_filter_ucast(struct bna_rxf *rxf); |
| 472 | void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf); |
| 473 | void rxf_reset_packet_filter_default(struct bna_rxf *rxf); |
| 474 | void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf); |
| 475 | |
| 476 | /* APIs for BNA */ |
| 477 | void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, |
| 478 | struct bna_res_info *res_info); |
| 479 | void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod); |
| 480 | int bna_rx_state_get(struct bna_rx *rx); |
| 481 | int bna_rxf_state_get(struct bna_rxf *rxf); |
| 482 | |
| 483 | /* APIs for PORT */ |
| 484 | void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type); |
| 485 | void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type); |
| 486 | void bna_rx_mod_fail(struct bna_rx_mod *rx_mod); |
| 487 | |
| 488 | /* APIs for BNAD */ |
| 489 | void bna_rx_res_req(struct bna_rx_config *rx_config, |
| 490 | struct bna_res_info *res_info); |
| 491 | struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad, |
| 492 | struct bna_rx_config *rx_cfg, |
| 493 | struct bna_rx_event_cbfn *rx_cbfn, |
| 494 | struct bna_res_info *res_info, void *priv); |
| 495 | void bna_rx_destroy(struct bna_rx *rx); |
| 496 | void bna_rx_enable(struct bna_rx *rx); |
| 497 | void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, |
| 498 | void (*cbfn)(void *, struct bna_rx *, |
| 499 | enum bna_cb_status)); |
| 500 | void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo); |
Rasesh Mody | b7ee31c5 | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 501 | void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]); |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 502 | void bna_rx_dim_update(struct bna_ccb *ccb); |
| 503 | enum bna_cb_status |
| 504 | bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, |
| 505 | void (*cbfn)(struct bnad *, struct bna_rx *, |
| 506 | enum bna_cb_status)); |
| 507 | enum bna_cb_status |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 508 | bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, |
| 509 | void (*cbfn)(struct bnad *, struct bna_rx *, |
| 510 | enum bna_cb_status)); |
| 511 | enum bna_cb_status |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 512 | bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, |
| 513 | void (*cbfn)(struct bnad *, struct bna_rx *, |
| 514 | enum bna_cb_status)); |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 515 | enum bna_cb_status |
| 516 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, |
| 517 | enum bna_rxmode bitmask, |
| 518 | void (*cbfn)(struct bnad *, struct bna_rx *, |
| 519 | enum bna_cb_status)); |
| 520 | void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); |
| 521 | void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); |
| 522 | void bna_rx_vlanfilter_enable(struct bna_rx *rx); |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 523 | void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config, |
| 524 | void (*cbfn)(struct bnad *, struct bna_rx *, |
| 525 | enum bna_cb_status)); |
| 526 | void bna_rx_hds_disable(struct bna_rx *rx, |
| 527 | void (*cbfn)(struct bnad *, struct bna_rx *, |
| 528 | enum bna_cb_status)); |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 529 | |
| 530 | /** |
| 531 | * BNAD |
| 532 | */ |
| 533 | |
| 534 | /* Callbacks for BNA */ |
| 535 | void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, |
| 536 | struct bna_stats *stats); |
Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 537 | |
| 538 | /* Callbacks for DEVICE */ |
| 539 | void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status); |
| 540 | void bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status); |
| 541 | void bnad_cb_device_enable_mbox_intr(struct bnad *bnad); |
| 542 | void bnad_cb_device_disable_mbox_intr(struct bnad *bnad); |
| 543 | |
| 544 | /* Callbacks for port */ |
| 545 | void bnad_cb_port_link_status(struct bnad *bnad, |
| 546 | enum bna_link_status status); |
| 547 | |
| 548 | #endif /* __BNA_H__ */ |