diff options
Diffstat (limited to 'drivers/staging/lustre/lnet/klnds')
11 files changed, 801 insertions, 869 deletions
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 651016919669..3bad441de8dc 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -53,8 +53,7 @@ static lnd_t the_o2iblnd = { kib_data_t kiblnd_data; -static __u32 -kiblnd_cksum(void *ptr, int nob) +static __u32 kiblnd_cksum(void *ptr, int nob) { char *c = ptr; __u32 sum = 0; @@ -66,8 +65,7 @@ kiblnd_cksum(void *ptr, int nob) return (sum == 0) ? 1 : sum; } -static char * -kiblnd_msgtype2str(int type) +static char *kiblnd_msgtype2str(int type) { switch (type) { case IBLND_MSG_CONNREQ: @@ -105,8 +103,7 @@ kiblnd_msgtype2str(int type) } } -static int -kiblnd_msgtype2size(int type) +static int kiblnd_msgtype2size(int type) { const int hdr_size = offsetof(kib_msg_t, ibm_u); @@ -139,15 +136,14 @@ kiblnd_msgtype2size(int type) } } -static int -kiblnd_unpack_rd(kib_msg_t *msg, int flip) +static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) { kib_rdma_desc_t *rd; int nob; int n; int i; - LASSERT (msg->ibm_type == IBLND_MSG_GET_REQ || + LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ || msg->ibm_type == IBLND_MSG_PUT_ACK); rd = msg->ibm_type == IBLND_MSG_GET_REQ ? @@ -167,7 +163,7 @@ kiblnd_unpack_rd(kib_msg_t *msg, int flip) return 1; } - nob = offsetof (kib_msg_t, ibm_u) + + nob = offsetof(kib_msg_t, ibm_u) + kiblnd_rd_msg_size(rd, msg->ibm_type, n); if (msg->ibm_nob < nob) { @@ -187,9 +183,8 @@ kiblnd_unpack_rd(kib_msg_t *msg, int flip) return 0; } -void -kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version, - int credits, lnet_nid_t dstnid, __u64 dststamp) +void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, + int credits, lnet_nid_t dstnid, __u64 dststamp) { kib_net_t *net = ni->ni_data; @@ -212,8 +207,7 @@ kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version, } } -int -kiblnd_unpack_msg(kib_msg_t *msg, int nob) +int kiblnd_unpack_msg(kib_msg_t *msg, int nob) { const int hdr_size = offsetof(kib_msg_t, ibm_u); __u32 msg_cksum; @@ -269,8 +263,8 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) if (flip) { /* leave magic unflipped as a clue to peer endianness */ msg->ibm_version = version; - CLASSERT (sizeof(msg->ibm_type) == 1); - CLASSERT (sizeof(msg->ibm_credits) == 1); + CLASSERT(sizeof(msg->ibm_type) == 1); + CLASSERT(sizeof(msg->ibm_credits) == 1); msg->ibm_nob = msg_nob; __swab64s(&msg->ibm_srcnid); __swab64s(&msg->ibm_srcstamp); @@ -324,8 +318,7 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) return 0; } -int -kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) +int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) { kib_peer_t *peer; kib_net_t *net = ni->ni_data; @@ -356,7 +349,7 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT (net->ibn_shutdown == 0); + LASSERT(net->ibn_shutdown == 0); /* npeers only grows with the global lock held */ atomic_inc(&net->ibn_npeers); @@ -367,18 +360,17 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) return 0; } -void -kiblnd_destroy_peer (kib_peer_t *peer) +void kiblnd_destroy_peer(kib_peer_t *peer) { kib_net_t *net = peer->ibp_ni->ni_data; - LASSERT (net != NULL); - LASSERT (atomic_read(&peer->ibp_refcount) == 0); - LASSERT (!kiblnd_peer_active(peer)); - LASSERT (peer->ibp_connecting == 0); - LASSERT (peer->ibp_accepting == 0); - LASSERT (list_empty(&peer->ibp_conns)); - LASSERT (list_empty(&peer->ibp_tx_queue)); + LASSERT(net != NULL); + LASSERT(atomic_read(&peer->ibp_refcount) == 0); + LASSERT(!kiblnd_peer_active(peer)); + LASSERT(peer->ibp_connecting == 0); + LASSERT(peer->ibp_accepting == 0); + LASSERT(list_empty(&peer->ibp_conns)); + LASSERT(list_empty(&peer->ibp_tx_queue)); LIBCFS_FREE(peer, sizeof(*peer)); @@ -389,8 +381,7 @@ kiblnd_destroy_peer (kib_peer_t *peer) atomic_dec(&net->ibn_npeers); } -kib_peer_t * -kiblnd_find_peer_locked (lnet_nid_t nid) +kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) { /* the caller is responsible for accounting the additional reference * that this creates */ @@ -398,11 +389,11 @@ kiblnd_find_peer_locked (lnet_nid_t nid) struct list_head *tmp; kib_peer_t *peer; - list_for_each (tmp, peer_list) { + list_for_each(tmp, peer_list) { peer = list_entry(tmp, kib_peer_t, ibp_list); - LASSERT (peer->ibp_connecting > 0 || /* creating conns */ + LASSERT(peer->ibp_connecting > 0 || /* creating conns */ peer->ibp_accepting > 0 || !list_empty(&peer->ibp_conns)); /* active conn */ @@ -418,20 +409,18 @@ kiblnd_find_peer_locked (lnet_nid_t nid) return NULL; } -void -kiblnd_unlink_peer_locked (kib_peer_t *peer) +void kiblnd_unlink_peer_locked(kib_peer_t *peer) { - LASSERT (list_empty(&peer->ibp_conns)); + LASSERT(list_empty(&peer->ibp_conns)); - LASSERT (kiblnd_peer_active(peer)); + LASSERT(kiblnd_peer_active(peer)); list_del_init(&peer->ibp_list); /* lose peerlist's ref */ kiblnd_peer_decref(peer); } -static int -kiblnd_get_peer_info(lnet_ni_t *ni, int index, - lnet_nid_t *nidp, int *count) +static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, + lnet_nid_t *nidp, int *count) { kib_peer_t *peer; struct list_head *ptmp; @@ -442,10 +431,10 @@ kiblnd_get_peer_info(lnet_ni_t *ni, int index, for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - list_for_each (ptmp, &kiblnd_data.kib_peers[i]) { + list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT (peer->ibp_connecting > 0 || + LASSERT(peer->ibp_connecting > 0 || peer->ibp_accepting > 0 || !list_empty(&peer->ibp_conns)); @@ -468,8 +457,7 @@ kiblnd_get_peer_info(lnet_ni_t *ni, int index, return -ENOENT; } -static void -kiblnd_del_peer_locked(kib_peer_t *peer) +static void kiblnd_del_peer_locked(kib_peer_t *peer) { struct list_head *ctmp; struct list_head *cnxt; @@ -478,7 +466,7 @@ kiblnd_del_peer_locked(kib_peer_t *peer) if (list_empty(&peer->ibp_conns)) { kiblnd_unlink_peer_locked(peer); } else { - list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) { + list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); kiblnd_close_conn_locked(conn, 0); @@ -489,10 +477,9 @@ kiblnd_del_peer_locked(kib_peer_t *peer) * last ref on it. */ } -static int -kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) +static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) { - LIST_HEAD (zombies); + LIST_HEAD(zombies); struct list_head *ptmp; struct list_head *pnxt; kib_peer_t *peer; @@ -512,9 +499,9 @@ kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) } for (i = lo; i <= hi; i++) { - list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) { + list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT (peer->ibp_connecting > 0 || + LASSERT(peer->ibp_connecting > 0 || peer->ibp_accepting > 0 || !list_empty(&peer->ibp_conns)); @@ -525,7 +512,7 @@ kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) continue; if (!list_empty(&peer->ibp_tx_queue)) { - LASSERT (list_empty(&peer->ibp_conns)); + LASSERT(list_empty(&peer->ibp_conns)); list_splice_init(&peer->ibp_tx_queue, &zombies); @@ -543,8 +530,7 @@ kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) return rc; } -static kib_conn_t * -kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) +static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) { kib_peer_t *peer; struct list_head *ptmp; @@ -556,25 +542,26 @@ kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - list_for_each (ptmp, &kiblnd_data.kib_peers[i]) { + list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT (peer->ibp_connecting > 0 || + LASSERT(peer->ibp_connecting > 0 || peer->ibp_accepting > 0 || !list_empty(&peer->ibp_conns)); if (peer->ibp_ni != ni) continue; - list_for_each (ctmp, &peer->ibp_conns) { + list_for_each(ctmp, &peer->ibp_conns) { if (index-- > 0) continue; conn = list_entry(ctmp, kib_conn_t, ibc_list); kiblnd_conn_addref(conn); - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); + read_unlock_irqrestore( + &kiblnd_data.kib_global_lock, + flags); return conn; } } @@ -584,8 +571,7 @@ kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) return NULL; } -int -kiblnd_translate_mtu(int value) +int kiblnd_translate_mtu(int value) { switch (value) { default: @@ -605,8 +591,7 @@ kiblnd_translate_mtu(int value) } } -static void -kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) +static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) { int mtu; @@ -615,13 +600,12 @@ kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) return; mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); - LASSERT (mtu >= 0); + LASSERT(mtu >= 0); if (mtu != 0) cmid->route.path_rec->mtu = mtu; } -static int -kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) +static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) { cpumask_t *mask; int vectors; @@ -638,8 +622,8 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) return 0; /* hash NID to CPU id in this partition... */ - off = do_div(nid, cpus_weight(*mask)); - for_each_cpu_mask(i, *mask) { + off = do_div(nid, cpumask_weight(mask)); + for_each_cpu(i, mask) { if (off-- == 0) return i % vectors; } @@ -648,9 +632,8 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) return 1; } -kib_conn_t * -kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, - int state, int version) +kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, + int state, int version) { /* CAVEAT EMPTOR: * If the new conn is created successfully it takes over the caller's @@ -835,7 +818,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, } /* Init successful! */ - LASSERT (state == IBLND_CONN_ACTIVE_CONNECT || + LASSERT(state == IBLND_CONN_ACTIVE_CONNECT || state == IBLND_CONN_PASSIVE_WAIT); conn->ibc_state = state; @@ -851,23 +834,22 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, return NULL; } -void -kiblnd_destroy_conn (kib_conn_t *conn) +void kiblnd_destroy_conn(kib_conn_t *conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; kib_peer_t *peer = conn->ibc_peer; int rc; - LASSERT (!in_interrupt()); - LASSERT (atomic_read(&conn->ibc_refcount) == 0); - LASSERT (list_empty(&conn->ibc_early_rxs)); - LASSERT (list_empty(&conn->ibc_tx_noops)); - LASSERT (list_empty(&conn->ibc_tx_queue)); - LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd)); - LASSERT (list_empty(&conn->ibc_tx_queue_nocred)); - LASSERT (list_empty(&conn->ibc_active_txs)); - LASSERT (conn->ibc_noops_posted == 0); - LASSERT (conn->ibc_nsends_posted == 0); + LASSERT(!in_interrupt()); + LASSERT(atomic_read(&conn->ibc_refcount) == 0); + LASSERT(list_empty(&conn->ibc_early_rxs)); + LASSERT(list_empty(&conn->ibc_tx_noops)); + LASSERT(list_empty(&conn->ibc_tx_queue)); + LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd)); + LASSERT(list_empty(&conn->ibc_tx_queue_nocred)); + LASSERT(list_empty(&conn->ibc_active_txs)); + LASSERT(conn->ibc_noops_posted == 0); + LASSERT(conn->ibc_nsends_posted == 0); switch (conn->ibc_state) { default: @@ -876,7 +858,7 @@ kiblnd_destroy_conn (kib_conn_t *conn) case IBLND_CONN_DISCONNECTED: /* connvars should have been freed already */ - LASSERT (conn->ibc_connvars == NULL); + LASSERT(conn->ibc_connvars == NULL); break; case IBLND_CONN_INIT: @@ -898,7 +880,8 @@ kiblnd_destroy_conn (kib_conn_t *conn) if (conn->ibc_rxs != NULL) { LIBCFS_FREE(conn->ibc_rxs, - IBLND_RX_MSGS(conn->ibc_version) * sizeof(kib_rx_t)); + IBLND_RX_MSGS(conn->ibc_version) + * sizeof(kib_rx_t)); } if (conn->ibc_connvars != NULL) @@ -919,15 +902,14 @@ kiblnd_destroy_conn (kib_conn_t *conn) LIBCFS_FREE(conn, sizeof(*conn)); } -int -kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why) +int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) { kib_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; - list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) { + list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n", @@ -941,23 +923,23 @@ kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why) return count; } -int -kiblnd_close_stale_conns_locked (kib_peer_t *peer, - int version, __u64 incarnation) +int kiblnd_close_stale_conns_locked(kib_peer_t *peer, + int version, __u64 incarnation) { kib_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; - list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) { + list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); if (conn->ibc_version == version && conn->ibc_incarnation == incarnation) continue; - CDEBUG(D_NET, "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n", + CDEBUG(D_NET, + "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n", libcfs_nid2str(peer->ibp_nid), conn->ibc_version, conn->ibc_incarnation, version, incarnation); @@ -969,8 +951,7 @@ kiblnd_close_stale_conns_locked (kib_peer_t *peer, return count; } -static int -kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) +static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) { kib_peer_t *peer; struct list_head *ptmp; @@ -991,10 +972,10 @@ kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) } for (i = lo; i <= hi; i++) { - list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) { + list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT (peer->ibp_connecting > 0 || + LASSERT(peer->ibp_connecting > 0 || peer->ibp_accepting > 0 || !list_empty(&peer->ibp_conns)); @@ -1017,8 +998,7 @@ kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) return (count == 0) ? -ENOENT : 0; } -int -kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) +int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; int rc = -EINVAL; @@ -1049,7 +1029,7 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) break; } - LASSERT (conn->ibc_cmid != NULL); + LASSERT(conn->ibc_cmid != NULL); data->ioc_nid = conn->ibc_peer->ibp_nid; if (conn->ibc_cmid->route.path_rec == NULL) data->ioc_u32[0] = 0; /* iWarp has no path MTU */ @@ -1071,8 +1051,7 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) return rc; } -void -kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) +void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) { unsigned long last_alive = 0; unsigned long now = cfs_time_current(); @@ -1084,7 +1063,7 @@ kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) peer = kiblnd_find_peer_locked(nid); if (peer != NULL) { - LASSERT (peer->ibp_connecting > 0 || /* creating conns */ + LASSERT(peer->ibp_connecting > 0 || /* creating conns */ peer->ibp_accepting > 0 || !list_empty(&peer->ibp_conns)); /* active conn */ last_alive = peer->ibp_last_alive; @@ -1103,11 +1082,9 @@ kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n", libcfs_nid2str(nid), peer, last_alive ? cfs_duration_sec(now - last_alive) : -1); - return; } -void -kiblnd_free_pages(kib_pages_t *p) +void kiblnd_free_pages(kib_pages_t *p) { int npages = p->ibp_npages; int i; @@ -1120,8 +1097,7 @@ kiblnd_free_pages(kib_pages_t *p) LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages])); } -int -kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) +int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) { kib_pages_t *p; int i; @@ -1151,19 +1127,18 @@ kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) return 0; } -void -kiblnd_unmap_rx_descs(kib_conn_t *conn) +void kiblnd_unmap_rx_descs(kib_conn_t *conn) { kib_rx_t *rx; int i; - LASSERT (conn->ibc_rxs != NULL); - LASSERT (conn->ibc_hdev != NULL); + LASSERT(conn->ibc_rxs != NULL); + LASSERT(conn->ibc_hdev != NULL); for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { rx = &conn->ibc_rxs[i]; - LASSERT (rx->rx_nob >= 0); /* not posted */ + LASSERT(rx->rx_nob >= 0); /* not posted */ kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev, KIBLND_UNMAP_ADDR(rx, rx_msgunmap, @@ -1176,8 +1151,7 @@ kiblnd_unmap_rx_descs(kib_conn_t *conn) conn->ibc_rx_pages = NULL; } -void -kiblnd_map_rx_descs(kib_conn_t *conn) +void kiblnd_map_rx_descs(kib_conn_t *conn) { kib_rx_t *rx; struct page *pg; @@ -1194,9 +1168,10 @@ kiblnd_map_rx_descs(kib_conn_t *conn) rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off); rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev, - rx->rx_msg, IBLND_MSG_SIZE, + rx->rx_msg, + IBLND_MSG_SIZE, DMA_FROM_DEVICE); - LASSERT (!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev, + LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev, rx->rx_msgaddr)); KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr); @@ -1205,24 +1180,23 @@ kiblnd_map_rx_descs(kib_conn_t *conn) lnet_page2phys(pg) + pg_off); pg_off += IBLND_MSG_SIZE; - LASSERT (pg_off <= PAGE_SIZE); + LASSERT(pg_off <= PAGE_SIZE); if (pg_off == PAGE_SIZE) { pg_off = 0; ipg++; - LASSERT (ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version)); + LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version)); } } } -static void -kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) +static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) { kib_hca_dev_t *hdev = tpo->tpo_hdev; kib_tx_t *tx; int i; - LASSERT (tpo->tpo_pool.po_allocated == 0); + LASSERT(tpo->tpo_pool.po_allocated == 0); if (hdev == NULL) return; @@ -1239,8 +1213,7 @@ kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) tpo->tpo_hdev = NULL; } -static kib_hca_dev_t * -kiblnd_current_hdev(kib_dev_t *dev) +static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) { kib_hca_dev_t *hdev; unsigned long flags; @@ -1265,8 +1238,7 @@ kiblnd_current_hdev(kib_dev_t *dev) return hdev; } -static void -kiblnd_map_tx_pool(kib_tx_pool_t *tpo) +static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) { kib_pages_t *txpgs = tpo->tpo_tx_pages; kib_pool_t *pool = &tpo->tpo_pool; @@ -1278,15 +1250,15 @@ kiblnd_map_tx_pool(kib_tx_pool_t *tpo) int ipage; int i; - LASSERT (net != NULL); + LASSERT(net != NULL); dev = net->ibn_dev; /* pre-mapped messages are not bigger than 1 page */ - CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE); + CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE); /* No fancy arithmetic when we do the buffer calculations */ - CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0); + CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0); tpo->tpo_hdev = kiblnd_current_hdev(dev); @@ -1300,29 +1272,28 @@ kiblnd_map_tx_pool(kib_tx_pool_t *tpo) tx->tx_msgaddr = kiblnd_dma_map_single( tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE); - LASSERT (!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev, + LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev, tx->tx_msgaddr)); KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr); list_add(&tx->tx_list, &pool->po_free_list); page_offset += IBLND_MSG_SIZE; - LASSERT (page_offset <= PAGE_SIZE); + LASSERT(page_offset <= PAGE_SIZE); if (page_offset == PAGE_SIZE) { page_offset = 0; ipage++; - LASSERT (ipage <= txpgs->ibp_npages); + LASSERT(ipage <= txpgs->ibp_npages); } } } -struct ib_mr * -kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size) +struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size) { __u64 index; - LASSERT (hdev->ibh_mrs[0] != NULL); + LASSERT(hdev->ibh_mrs[0] != NULL); if (hdev->ibh_nmrs == 1) return hdev->ibh_mrs[0]; @@ -1336,14 +1307,13 @@ kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size) return NULL; } -struct ib_mr * -kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd) +struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd) { struct ib_mr *prev_mr; struct ib_mr *mr; int i; - LASSERT (hdev->ibh_mrs[0] != NULL); + LASSERT(hdev->ibh_mrs[0] != NULL); if (*kiblnd_tunables.kib_map_on_demand > 0 && *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags) @@ -1370,10 +1340,9 @@ kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd) return mr; } -static void -kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) +static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) { - LASSERT (pool->fpo_map_count == 0); + LASSERT(pool->fpo_map_count == 0); if (pool->fpo_fmr_pool != NULL) ib_destroy_fmr_pool(pool->fpo_fmr_pool); @@ -1384,8 +1353,7 @@ kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t)); } -static void -kiblnd_destroy_fmr_pool_list(struct list_head *head) +static void kiblnd_destroy_fmr_pool_list(struct list_head *head) { kib_fmr_pool_t *pool; @@ -1410,8 +1378,8 @@ static int kiblnd_fmr_flush_trigger(int ncpts) return max(IBLND_FMR_POOL_FLUSH, size); } -static int -kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo) +static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, + kib_fmr_pool_t **pp_fpo) { /* FMR pool for RDMA */ kib_dev_t *dev = fps->fps_net->ibn_dev; @@ -1451,8 +1419,8 @@ kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo) return 0; } -static void -kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies) +static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, + struct list_head *zombies) { if (fps->fps_net == NULL) /* intialized? */ return; @@ -1473,8 +1441,7 @@ kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies) spin_unlock(&fps->fps_lock); } -static void -kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) +static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) { if (fps->fps_net != NULL) { /* initialized? */ kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list); @@ -1482,9 +1449,9 @@ kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) } } -static int -kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net, - int pool_size, int flush_trigger) +static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, + kib_net_t *net, int pool_size, + int flush_trigger) { kib_fmr_pool_t *fpo; int rc; @@ -1506,8 +1473,7 @@ kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net, return rc; } -static int -kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) +static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) { if (fpo->fpo_map_count != 0) /* still in use */ return 0; @@ -1516,10 +1482,9 @@ kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) return cfs_time_aftereq(now, fpo->fpo_deadline); } -void -kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) +void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) { - LIST_HEAD (zombies); + LIST_HEAD(zombies); kib_fmr_pool_t *fpo = fmr->fmr_pool; kib_fmr_poolset_t *fps = fpo->fpo_owner; unsigned long now = cfs_time_current(); @@ -1527,11 +1492,11 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) int rc; rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); - LASSERT (rc == 0); + LASSERT(rc == 0); if (status != 0) { rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool); - LASSERT (rc == 0); + LASSERT(rc == 0); } fmr->fmr_pool = NULL; @@ -1556,9 +1521,8 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) kiblnd_destroy_fmr_pool_list(&zombies); } -int -kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, - __u64 iov, kib_fmr_t *fmr) +int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, + __u64 iov, kib_fmr_t *fmr) { struct ib_pool_fmr *pfmr; kib_fmr_pool_t *fpo; @@ -1597,7 +1561,8 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, if (fps->fps_increasing) { spin_unlock(&fps->fps_lock); - CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n"); + CDEBUG(D_NET, + "Another thread is allocating new FMR pool, waiting for her to complete\n"); schedule(); goto again; @@ -1627,17 +1592,15 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, goto again; } -static void -kiblnd_fini_pool(kib_pool_t *pool) +static void kiblnd_fini_pool(kib_pool_t *pool) { - LASSERT (list_empty(&pool->po_free_list)); - LASSERT (pool->po_allocated == 0); + LASSERT(list_empty(&pool->po_free_list)); + LASSERT(pool->po_allocated == 0); CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name); } -static void -kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) +static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) { CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name); @@ -1648,8 +1611,7 @@ kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) pool->po_size = size; } -static void -kiblnd_destroy_pool_list(struct list_head *head) +static void kiblnd_destroy_pool_list(struct list_head *head) { kib_pool_t *pool; @@ -1657,13 +1619,12 @@ kiblnd_destroy_pool_list(struct list_head *head) pool = list_entry(head->next, kib_pool_t, po_list); list_del(&pool->po_list); - LASSERT (pool->po_owner != NULL); + LASSERT(pool->po_owner != NULL); pool->po_owner->ps_pool_destroy(pool); } } -static void -kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) +static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) { if (ps->ps_net == NULL) /* intialized? */ return; @@ -1682,8 +1643,7 @@ kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) spin_unlock(&ps->ps_lock); } -static void -kiblnd_fini_poolset(kib_poolset_t *ps) +static void kiblnd_fini_poolset(kib_poolset_t *ps) { if (ps->ps_net != NULL) { /* initialized? */ kiblnd_destroy_pool_list(&ps->ps_failed_pool_list); @@ -1691,13 +1651,12 @@ kiblnd_fini_poolset(kib_poolset_t *ps) } } -static int -kiblnd_init_poolset(kib_poolset_t *ps, int cpt, - kib_net_t *net, char *name, int size, - kib_ps_pool_create_t po_create, - kib_ps_pool_destroy_t po_destroy, - kib_ps_node_init_t nd_init, - kib_ps_node_fini_t nd_fini) +static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, + kib_net_t *net, char *name, int size, + kib_ps_pool_create_t po_create, + kib_ps_pool_destroy_t po_destroy, + kib_ps_node_init_t nd_init, + kib_ps_node_fini_t nd_fini) { kib_pool_t *pool; int rc; @@ -1727,8 +1686,7 @@ kiblnd_init_poolset(kib_poolset_t *ps, int cpt, return rc; } -static int -kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) +static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) { if (pool->po_allocated != 0) /* still in use */ return 0; @@ -1737,10 +1695,9 @@ kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) return cfs_time_aftereq(now, pool->po_deadline); } -void -kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) +void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) { - LIST_HEAD (zombies); + LIST_HEAD(zombies); kib_poolset_t *ps = pool->po_owner; kib_pool_t *tmp; unsigned long now = cfs_time_current(); @@ -1750,7 +1707,7 @@ kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) if (ps->ps_node_fini != NULL) ps->ps_node_fini(pool, node); - LASSERT (pool->po_allocated > 0); + LASSERT(pool->po_allocated > 0); list_add(node, &pool->po_free_list); pool->po_allocated--; @@ -1768,8 +1725,7 @@ kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) kiblnd_destroy_pool_list(&zombies); } -struct list_head * -kiblnd_pool_alloc_node(kib_poolset_t *ps) +struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) { struct list_head *node; kib_pool_t *pool; @@ -1831,8 +1787,7 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps) goto again; } -void -kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr) +void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr) { kib_pmr_pool_t *ppo = pmr->pmr_pool; struct ib_mr *mr = pmr->pmr_mr; @@ -1843,8 +1798,7 @@ kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr) ib_dereg_mr(mr); } -int -kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, +int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr) { kib_phys_mr_t *pmr; @@ -1889,19 +1843,16 @@ kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, return rc; } -static void -kiblnd_destroy_pmr_pool(kib_pool_t *pool) +static void kiblnd_destroy_pmr_pool(kib_pool_t *pool) { kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool); kib_phys_mr_t *pmr; + kib_phys_mr_t *tmp; - LASSERT (pool->po_allocated == 0); - - while (!list_empty(&pool->po_free_list)) { - pmr = list_entry(pool->po_free_list.next, - kib_phys_mr_t, pmr_list); + LASSERT(pool->po_allocated == 0); - LASSERT (pmr->pmr_mr == NULL); + list_for_each_entry_safe(pmr, tmp, &pool->po_free_list, pmr_list) { + LASSERT(pmr->pmr_mr == NULL); list_del(&pmr->pmr_list); if (pmr->pmr_ipb != NULL) { @@ -1927,8 +1878,8 @@ static inline int kiblnd_pmr_pool_size(int ncpts) return max(IBLND_PMR_POOL, size); } -static int -kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) +static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, + kib_pool_t **pp_po) { struct kib_pmr_pool *ppo; struct kib_pool *pool; @@ -1970,13 +1921,12 @@ kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) return 0; } -static void -kiblnd_destroy_tx_pool(kib_pool_t *pool) +static void kiblnd_destroy_tx_pool(kib_pool_t *pool) { kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); int i; - LASSERT (pool->po_allocated == 0); + LASSERT(pool->po_allocated == 0); if (tpo->tpo_tx_pages != NULL) { kiblnd_unmap_tx_pool(tpo); @@ -2026,8 +1976,8 @@ static int kiblnd_tx_pool_size(int ncpts) return max(IBLND_TX_POOL, ntx); } -static int -kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) +static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, + kib_pool_t **pp_po) { int i; int npg; @@ -2110,8 +2060,7 @@ kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) return -ENOMEM; } -static void -kiblnd_tx_init(kib_pool_t *pool, struct list_head *node) +static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node) { kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t, tps_poolset); @@ -2120,8 +2069,7 @@ kiblnd_tx_init(kib_pool_t *pool, struct list_head *node) tx->tx_cookie = tps->tps_next_tx_cookie++; } -static void -kiblnd_net_fini_pools(kib_net_t *net) +static void kiblnd_net_fini_pools(kib_net_t *net) { int i; @@ -2162,8 +2110,7 @@ kiblnd_net_fini_pools(kib_net_t *net) } } -static int -kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) +static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) { unsigned long flags; int cpt; @@ -2291,8 +2238,7 @@ kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) return rc; } -static int -kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) +static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) { struct ib_device_attr *attr; int rc; @@ -2336,8 +2282,7 @@ kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) return -EINVAL; } -static void -kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) +static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) { int i; @@ -2356,8 +2301,7 @@ kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) hdev->ibh_nmrs = 0; } -void -kiblnd_hdev_destroy(kib_hca_dev_t *hdev) +void kiblnd_hdev_destroy(kib_hca_dev_t *hdev) { kiblnd_hdev_cleanup_mrs(hdev); @@ -2370,8 +2314,7 @@ kiblnd_hdev_destroy(kib_hca_dev_t *hdev) LIBCFS_FREE(hdev, sizeof(*hdev)); } -static int -kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) +static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) { struct ib_mr *mr; int i; @@ -2442,7 +2385,7 @@ kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) return PTR_ERR(mr); } - LASSERT (iova == ipb.addr); + LASSERT(iova == ipb.addr); hdev->ibh_mrs[i] = mr; } @@ -2454,14 +2397,14 @@ out: return 0; } -static int -kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) -{ /* DUMMY */ +/* DUMMY */ +static int kiblnd_dummy_callback(struct rdma_cm_id *cmid, + struct rdma_cm_event *event) +{ return 0; } -static int -kiblnd_dev_need_failover(kib_dev_t *dev) +static int kiblnd_dev_need_failover(kib_dev_t *dev) { struct rdma_cm_id *cmid; struct sockaddr_in srcaddr; @@ -2516,12 +2459,11 @@ kiblnd_dev_need_failover(kib_dev_t *dev) return 1; } -int -kiblnd_dev_failover(kib_dev_t *dev) +int kiblnd_dev_failover(kib_dev_t *dev) { - LIST_HEAD (zombie_tpo); - LIST_HEAD (zombie_ppo); - LIST_HEAD (zombie_fpo); + LIST_HEAD(zombie_tpo); + LIST_HEAD(zombie_ppo); + LIST_HEAD(zombie_fpo); struct rdma_cm_id *cmid = NULL; kib_hca_dev_t *hdev = NULL; kib_hca_dev_t *old; @@ -2532,7 +2474,7 @@ kiblnd_dev_failover(kib_dev_t *dev) int rc = 0; int i; - LASSERT (*kiblnd_tunables.kib_dev_failover > 1 || + LASSERT(*kiblnd_tunables.kib_dev_failover > 1 || dev->ibd_can_failover || dev->ibd_hdev == NULL); @@ -2655,11 +2597,10 @@ kiblnd_dev_failover(kib_dev_t *dev) return rc; } -void -kiblnd_destroy_dev (kib_dev_t *dev) +void kiblnd_destroy_dev(kib_dev_t *dev) { - LASSERT (dev->ibd_nnets == 0); - LASSERT (list_empty(&dev->ibd_nets)); + LASSERT(dev->ibd_nnets == 0); + LASSERT(list_empty(&dev->ibd_nets)); list_del(&dev->ibd_fail_list); list_del(&dev->ibd_list); @@ -2670,8 +2611,7 @@ kiblnd_destroy_dev (kib_dev_t *dev) LIBCFS_FREE(dev, sizeof(*dev)); } -static kib_dev_t * -kiblnd_create_dev(char *ifname) +static kib_dev_t *kiblnd_create_dev(char *ifname) { struct net_device *netdev; kib_dev_t *dev; @@ -2723,13 +2663,12 @@ kiblnd_create_dev(char *ifname) return dev; } -static void -kiblnd_base_shutdown(void) +static void kiblnd_base_shutdown(void) { struct kib_sched_info *sched; int i; - LASSERT (list_empty(&kiblnd_data.kib_devs)); + LASSERT(list_empty(&kiblnd_data.kib_devs)); CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n", atomic_read(&libcfs_kmemory)); @@ -2740,12 +2679,11 @@ kiblnd_base_shutdown(void) case IBLND_INIT_ALL: case IBLND_INIT_DATA: - LASSERT (kiblnd_data.kib_peers != NULL); - for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - LASSERT (list_empty(&kiblnd_data.kib_peers[i])); - } - LASSERT (list_empty(&kiblnd_data.kib_connd_zombies)); - LASSERT (list_empty(&kiblnd_data.kib_connd_conns)); + LASSERT(kiblnd_data.kib_peers != NULL); + for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) + LASSERT(list_empty(&kiblnd_data.kib_peers[i])); + LASSERT(list_empty(&kiblnd_data.kib_connd_zombies)); + LASSERT(list_empty(&kiblnd_data.kib_connd_conns)); /* flag threads to terminate; wake and wait for them to die */ kiblnd_data.kib_shutdown = 1; @@ -2762,7 +2700,8 @@ kiblnd_base_shutdown(void) i = 2; while (atomic_read(&kiblnd_data.kib_nthreads) != 0) { i++; - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ + /* power of 2 ? */ + CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting for %d threads to terminate\n", atomic_read(&kiblnd_data.kib_nthreads)); set_current_state(TASK_UNINTERRUPTIBLE); @@ -2791,8 +2730,7 @@ kiblnd_base_shutdown(void) module_put(THIS_MODULE); } -void -kiblnd_shutdown (lnet_ni_t *ni) +void kiblnd_shutdown(lnet_ni_t *ni) { kib_net_t *net = ni->ni_data; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; @@ -2842,7 +2780,7 @@ kiblnd_shutdown (lnet_ni_t *ni) /* fall through */ case IBLND_INIT_NOTHING: - LASSERT (atomic_read(&net->ibn_nconns) == 0); + LASSERT(atomic_read(&net->ibn_nconns) == 0); if (net->ibn_dev != NULL && net->ibn_dev->ibd_nnets == 0) @@ -2862,20 +2800,19 @@ kiblnd_shutdown (lnet_ni_t *ni) out: if (list_empty(&kiblnd_data.kib_devs)) kiblnd_base_shutdown(); - return; } -static int -kiblnd_base_startup(void) +static int kiblnd_base_startup(void) { struct kib_sched_info *sched; int rc; int i; - LASSERT (kiblnd_data.kib_init == IBLND_INIT_NOTHING); + LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING); try_module_get(THIS_MODULE); - memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */ + /* zero pointers, flags etc */ + memset(&kiblnd_data, 0, sizeof(kiblnd_data)); rwlock_init(&kiblnd_data.kib_global_lock); @@ -2886,9 +2823,8 @@ kiblnd_base_startup(void) LIBCFS_ALLOC(kiblnd_data.kib_peers, sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size); - if (kiblnd_data.kib_peers == NULL) { + if (kiblnd_data.kib_peers == NULL) goto failed; - } for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]); @@ -2955,8 +2891,7 @@ kiblnd_base_startup(void) return -ENETDOWN; } -static int -kiblnd_start_schedulers(struct kib_sched_info *sched) +static int kiblnd_start_schedulers(struct kib_sched_info *sched) { int rc = 0; int nthrs; @@ -2974,12 +2909,13 @@ kiblnd_start_schedulers(struct kib_sched_info *sched) } else { LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max); /* increase one thread if there is new interface */ - nthrs = (sched->ibs_nthreads < sched->ibs_nthreads_max); + nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max; } for (i = 0; i < nthrs; i++) { long id; char name[20]; + id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i); snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld", KIB_THREAD_CPT(id), KIB_THREAD_TID(id)); @@ -2996,8 +2932,8 @@ kiblnd_start_schedulers(struct kib_sched_info *sched) return rc; } -static int -kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts) +static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, + int ncpts) { int cpt; int rc; @@ -3022,8 +2958,7 @@ kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts) return 0; } -static kib_dev_t * -kiblnd_dev_search(char *ifname) +static kib_dev_t *kiblnd_dev_search(char *ifname) { kib_dev_t *alias = NULL; kib_dev_t *dev; @@ -3055,8 +2990,7 @@ kiblnd_dev_search(char *ifname) return alias; } -int -kiblnd_startup (lnet_ni_t *ni) +int kiblnd_startup(lnet_ni_t *ni) { char *ifname; kib_dev_t *ibdev = NULL; @@ -3066,7 +3000,7 @@ kiblnd_startup (lnet_ni_t *ni) int rc; int newdev; - LASSERT (ni->ni_lnd == &the_o2iblnd); + LASSERT(ni->ni_lnd == &the_o2iblnd); if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) { rc = kiblnd_base_startup(); @@ -3090,7 +3024,7 @@ kiblnd_startup (lnet_ni_t *ni) if (ni->ni_interfaces[0] != NULL) { /* Use the IPoIB interface specified in 'networks=' */ - CLASSERT (LNET_MAX_INTERFACES > 1); + CLASSERT(LNET_MAX_INTERFACES > 1); if (ni->ni_interfaces[1] != NULL) { CERROR("Multiple interfaces not supported\n"); goto failed; @@ -3150,22 +3084,22 @@ net_failed: return -ENETDOWN; } -static void __exit -kiblnd_module_fini (void) +static void __exit kiblnd_module_fini(void) { lnet_unregister_lnd(&the_o2iblnd); } -static int __init -kiblnd_module_init (void) +static int __init kiblnd_module_init(void) { int rc; - CLASSERT (sizeof(kib_msg_t) <= IBLND_MSG_SIZE); - CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); - CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); + CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); + CLASSERT(offsetof(kib_msg_t, + ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); + CLASSERT(offsetof(kib_msg_t, + ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); rc = kiblnd_tunables_init(); if (rc != 0) diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index ab128dee9483..cd664d025f41 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -46,8 +46,8 @@ #include <linux/errno.h> #include <linux/unistd.h> #include <linux/uio.h> +#include <linux/uaccess.h> -#include <asm/uaccess.h> #include <asm/io.h> #include <linux/fs.h> diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 48d885dc51d9..dbf3749831f9 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -41,19 +41,19 @@ #include "o2iblnd.h" static void -kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx) +kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) { lnet_msg_t *lntmsg[2]; kib_net_t *net = ni->ni_data; int rc; int i; - LASSERT (net != NULL); - LASSERT (!in_interrupt()); - LASSERT (!tx->tx_queued); /* mustn't be queued for sending */ - LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */ - LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */ - LASSERT (tx->tx_pool != NULL); + LASSERT(net != NULL); + LASSERT(!in_interrupt()); + LASSERT(!tx->tx_queued); /* mustn't be queued for sending */ + LASSERT(tx->tx_sending == 0); /* mustn't be awaiting sent callback */ + LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ + LASSERT(tx->tx_pool != NULL); kiblnd_unmap_tx(ni, tx); @@ -63,7 +63,7 @@ kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx) rc = tx->tx_status; if (tx->tx_conn != NULL) { - LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni); + LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni); kiblnd_conn_decref(tx->tx_conn); tx->tx_conn = NULL; @@ -84,12 +84,12 @@ kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx) } void -kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status) +kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status) { kib_tx_t *tx; - while (!list_empty (txlist)) { - tx = list_entry (txlist->next, kib_tx_t, tx_list); + while (!list_empty(txlist)) { + tx = list_entry(txlist->next, kib_tx_t, tx_list); list_del(&tx->tx_list); /* complete now */ @@ -113,16 +113,16 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) return NULL; tx = container_of(node, kib_tx_t, tx_list); - LASSERT (tx->tx_nwrq == 0); - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_sending == 0); - LASSERT (!tx->tx_waiting); - LASSERT (tx->tx_status == 0); - LASSERT (tx->tx_conn == NULL); - LASSERT (tx->tx_lntmsg[0] == NULL); - LASSERT (tx->tx_lntmsg[1] == NULL); - LASSERT (tx->tx_u.pmr == NULL); - LASSERT (tx->tx_nfrags == 0); + LASSERT(tx->tx_nwrq == 0); + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_sending == 0); + LASSERT(!tx->tx_waiting); + LASSERT(tx->tx_status == 0); + LASSERT(tx->tx_conn == NULL); + LASSERT(tx->tx_lntmsg[0] == NULL); + LASSERT(tx->tx_lntmsg[1] == NULL); + LASSERT(tx->tx_u.pmr == NULL); + LASSERT(tx->tx_nfrags == 0); return tx; } @@ -143,7 +143,7 @@ kiblnd_drop_rx(kib_rx_t *rx) } int -kiblnd_post_rx (kib_rx_t *rx, int credit) +kiblnd_post_rx(kib_rx_t *rx, int credit) { kib_conn_t *conn = rx->rx_conn; kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; @@ -151,14 +151,14 @@ kiblnd_post_rx (kib_rx_t *rx, int credit) struct ib_mr *mr; int rc; - LASSERT (net != NULL); - LASSERT (!in_interrupt()); - LASSERT (credit == IBLND_POSTRX_NO_CREDIT || - credit == IBLND_POSTRX_PEER_CREDIT || - credit == IBLND_POSTRX_RSRVD_CREDIT); + LASSERT(net != NULL); + LASSERT(!in_interrupt()); + LASSERT(credit == IBLND_POSTRX_NO_CREDIT || + credit == IBLND_POSTRX_PEER_CREDIT || + credit == IBLND_POSTRX_RSRVD_CREDIT); mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE); - LASSERT (mr != NULL); + LASSERT(mr != NULL); rx->rx_sge.lkey = mr->lkey; rx->rx_sge.addr = rx->rx_msgaddr; @@ -169,8 +169,8 @@ kiblnd_post_rx (kib_rx_t *rx, int credit) rx->rx_wrq.num_sge = 1; rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX); - LASSERT (conn->ibc_state >= IBLND_CONN_INIT); - LASSERT (rx->rx_nob >= 0); /* not posted */ + LASSERT(conn->ibc_state >= IBLND_CONN_INIT); + LASSERT(rx->rx_nob >= 0); /* not posted */ if (conn->ibc_state > IBLND_CONN_ESTABLISHED) { kiblnd_drop_rx(rx); /* No more posts for this rx */ @@ -217,8 +217,8 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) list_for_each(tmp, &conn->ibc_active_txs) { kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_sending != 0 || tx->tx_waiting); + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_sending != 0 || tx->tx_waiting); if (tx->tx_cookie != cookie) continue; @@ -293,7 +293,7 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) } static void -kiblnd_handle_rx (kib_rx_t *rx) +kiblnd_handle_rx(kib_rx_t *rx) { kib_msg_t *msg = rx->rx_msg; kib_conn_t *conn = rx->rx_conn; @@ -304,11 +304,11 @@ kiblnd_handle_rx (kib_rx_t *rx) int rc2; int post_credit; - LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); - CDEBUG (D_NET, "Received %x[%d] from %s\n", - msg->ibm_type, credits, - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + CDEBUG(D_NET, "Received %x[%d] from %s\n", + msg->ibm_type, credits, + libcfs_nid2str(conn->ibc_peer->ibp_nid)); if (credits != 0) { /* Have I received credits that will let me send? */ @@ -377,8 +377,8 @@ kiblnd_handle_rx (kib_rx_t *rx) break; case IBLND_MSG_PUT_NAK: - CWARN ("PUT_NACK from %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + CWARN("PUT_NACK from %s\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); post_credit = IBLND_POSTRX_RSRVD_CREDIT; kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ, msg->ibm_u.completion.ibcm_status, @@ -402,7 +402,7 @@ kiblnd_handle_rx (kib_rx_t *rx) break; } - LASSERT (tx->tx_waiting); + LASSERT(tx->tx_waiting); /* CAVEAT EMPTOR: I could be racing with tx_complete, but... * (a) I can overwrite tx_msg since my peer has received it! * (b) tx_waiting set tells tx_complete() it's not done. */ @@ -454,7 +454,7 @@ kiblnd_handle_rx (kib_rx_t *rx) } static void -kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) +kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) { kib_msg_t *msg = rx->rx_msg; kib_conn_t *conn = rx->rx_conn; @@ -463,8 +463,8 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) int rc; int err = -EIO; - LASSERT (net != NULL); - LASSERT (rx->rx_nob < 0); /* was posted */ + LASSERT(net != NULL); + LASSERT(rx->rx_nob < 0); /* was posted */ rx->rx_nob = 0; /* isn't now */ if (conn->ibc_state > IBLND_CONN_ESTABLISHED) @@ -476,12 +476,12 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) goto failed; } - LASSERT (nob >= 0); + LASSERT(nob >= 0); rx->rx_nob = nob; rc = kiblnd_unpack_msg(msg, rx->rx_nob); if (rc != 0) { - CERROR ("Error %d unpacking rx from %s\n", + CERROR("Error %d unpacking rx from %s\n", rc, libcfs_nid2str(conn->ibc_peer->ibp_nid)); goto failed; } @@ -490,7 +490,7 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) msg->ibm_dstnid != ni->ni_nid || msg->ibm_srcstamp != conn->ibc_incarnation || msg->ibm_dststamp != net->ibn_incarnation) { - CERROR ("Stale rx from %s\n", + CERROR("Stale rx from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); err = -ESTALE; goto failed; @@ -525,13 +525,13 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) } static struct page * -kiblnd_kvaddr_to_page (unsigned long vaddr) +kiblnd_kvaddr_to_page(unsigned long vaddr) { struct page *page; if (is_vmalloc_addr((void *)vaddr)) { - page = vmalloc_to_page ((void *)vaddr); - LASSERT (page != NULL); + page = vmalloc_to_page((void *)vaddr); + LASSERT(page != NULL); return page; } #ifdef CONFIG_HIGHMEM @@ -542,8 +542,8 @@ kiblnd_kvaddr_to_page (unsigned long vaddr) LBUG(); } #endif - page = virt_to_page (vaddr); - LASSERT (page != NULL); + page = virt_to_page(vaddr); + LASSERT(page != NULL); return page; } @@ -567,7 +567,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { for (size = 0; size < rd->rd_frags[i].rf_nob; size += hdev->ibh_page_size) { - pages[npages ++] = (rd->rd_frags[i].rf_addr & + pages[npages++] = (rd->rd_frags[i].rf_addr & hdev->ibh_page_mask) + size; } } @@ -577,7 +577,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) fps = net->ibn_fmr_ps[cpt]; rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr); if (rc != 0) { - CERROR ("Can't map %d pages: %d\n", npages, rc); + CERROR("Can't map %d pages: %d\n", npages, rc); return rc; } @@ -706,26 +706,26 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int fragnob; int page_offset; - LASSERT (nob > 0); - LASSERT (niov > 0); - LASSERT (net != NULL); + LASSERT(nob > 0); + LASSERT(niov > 0); + LASSERT(net != NULL); while (offset >= iov->iov_len) { offset -= iov->iov_len; niov--; iov++; - LASSERT (niov > 0); + LASSERT(niov > 0); } sg = tx->tx_frags; do { - LASSERT (niov > 0); + LASSERT(niov > 0); vaddr = ((unsigned long)iov->iov_base) + offset; page_offset = vaddr & (PAGE_SIZE - 1); page = kiblnd_kvaddr_to_page(vaddr); if (page == NULL) { - CERROR ("Can't find page\n"); + CERROR("Can't find page\n"); return -EFAULT; } @@ -749,7 +749,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, } static int -kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, +kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nkiov, lnet_kiov_t *kiov, int offset, int nob) { kib_net_t *net = ni->ni_data; @@ -758,20 +758,20 @@ kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob); - LASSERT (nob > 0); - LASSERT (nkiov > 0); - LASSERT (net != NULL); + LASSERT(nob > 0); + LASSERT(nkiov > 0); + LASSERT(net != NULL); while (offset >= kiov->kiov_len) { offset -= kiov->kiov_len; nkiov--; kiov++; - LASSERT (nkiov > 0); + LASSERT(nkiov > 0); } sg = tx->tx_frags; do { - LASSERT (nkiov > 0); + LASSERT(nkiov > 0); fragnob = min((int)(kiov->kiov_len - offset), nob); @@ -789,7 +789,7 @@ kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, } static int -kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) +kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) __releases(conn->ibc_lock) __acquires(conn->ibc_lock) { @@ -800,16 +800,16 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) int done; struct ib_send_wr *bad_wrq; - LASSERT (tx->tx_queued); + LASSERT(tx->tx_queued); /* We rely on this for QP sizing */ - LASSERT (tx->tx_nwrq > 0); - LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver)); + LASSERT(tx->tx_nwrq > 0); + LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver)); - LASSERT (credit == 0 || credit == 1); - LASSERT (conn->ibc_outstanding_credits >= 0); - LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver)); - LASSERT (conn->ibc_credits >= 0); - LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver)); + LASSERT(credit == 0 || credit == 1); + LASSERT(conn->ibc_outstanding_credits >= 0); + LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver)); + LASSERT(conn->ibc_credits >= 0); + LASSERT(conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver)); if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { /* tx completions outstanding... */ @@ -923,7 +923,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) } void -kiblnd_check_sends (kib_conn_t *conn) +kiblnd_check_sends(kib_conn_t *conn) { int ver = conn->ibc_version; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; @@ -938,10 +938,10 @@ kiblnd_check_sends (kib_conn_t *conn) spin_lock(&conn->ibc_lock); - LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); - LASSERT (!IBLND_OOB_CAPABLE(ver) || + LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); + LASSERT(!IBLND_OOB_CAPABLE(ver) || conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); - LASSERT (conn->ibc_reserved_credits >= 0); + LASSERT(conn->ibc_reserved_credits >= 0); while (conn->ibc_reserved_credits > 0 && !list_empty(&conn->ibc_tx_queue_rsrvd)) { @@ -974,7 +974,7 @@ kiblnd_check_sends (kib_conn_t *conn) tx = list_entry(conn->ibc_tx_queue_nocred.next, kib_tx_t, tx_list); } else if (!list_empty(&conn->ibc_tx_noops)) { - LASSERT (!IBLND_OOB_CAPABLE(ver)); + LASSERT(!IBLND_OOB_CAPABLE(ver)); credit = 1; tx = list_entry(conn->ibc_tx_noops.next, kib_tx_t, tx_list); @@ -995,13 +995,13 @@ kiblnd_check_sends (kib_conn_t *conn) } static void -kiblnd_tx_complete (kib_tx_t *tx, int status) +kiblnd_tx_complete(kib_tx_t *tx, int status) { int failed = (status != IB_WC_SUCCESS); kib_conn_t *conn = tx->tx_conn; int idle; - LASSERT (tx->tx_sending > 0); + LASSERT(tx->tx_sending > 0); if (failed) { if (conn->ibc_state == IBLND_CONN_ESTABLISHED) @@ -1049,22 +1049,22 @@ kiblnd_tx_complete (kib_tx_t *tx, int status) } void -kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) +kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) { kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; - int nob = offsetof (kib_msg_t, ibm_u) + body_nob; + int nob = offsetof(kib_msg_t, ibm_u) + body_nob; struct ib_mr *mr; - LASSERT (tx->tx_nwrq >= 0); - LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); - LASSERT (nob <= IBLND_MSG_SIZE); + LASSERT(tx->tx_nwrq >= 0); + LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); + LASSERT(nob <= IBLND_MSG_SIZE); kiblnd_init_msg(tx->tx_msg, type, body_nob); mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob); - LASSERT (mr != NULL); + LASSERT(mr != NULL); sge->lkey = mr->lkey; sge->addr = tx->tx_msgaddr; @@ -1083,7 +1083,7 @@ kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) } int -kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, +kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) { kib_msg_t *ibmsg = tx->tx_msg; @@ -1095,9 +1095,9 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, int dstidx; int wrknob; - LASSERT (!in_interrupt()); - LASSERT (tx->tx_nwrq == 0); - LASSERT (type == IBLND_MSG_GET_DONE || + LASSERT(!in_interrupt()); + LASSERT(tx->tx_nwrq == 0); + LASSERT(type == IBLND_MSG_GET_DONE || type == IBLND_MSG_PUT_DONE); srcidx = dstidx = 0; @@ -1162,19 +1162,19 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, ibmsg->ibm_u.completion.ibcm_status = rc; ibmsg->ibm_u.completion.ibcm_cookie = dstcookie; kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx, - type, sizeof (kib_completion_msg_t)); + type, sizeof(kib_completion_msg_t)); return rc; } void -kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) +kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) { struct list_head *q; - LASSERT (tx->tx_nwrq > 0); /* work items set up */ - LASSERT (!tx->tx_queued); /* not queued for sending already */ - LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); + LASSERT(tx->tx_nwrq > 0); /* work items set up */ + LASSERT(!tx->tx_queued); /* not queued for sending already */ + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); tx->tx_queued = 1; tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ); @@ -1182,11 +1182,11 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) if (tx->tx_conn == NULL) { kiblnd_conn_addref(conn); tx->tx_conn = conn; - LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE); + LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE); } else { /* PUT_DONE first attached to conn as a PUT_REQ */ - LASSERT (tx->tx_conn == conn); - LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE); + LASSERT(tx->tx_conn == conn); + LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE); } switch (tx->tx_msg->ibm_type) { @@ -1221,7 +1221,7 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) } void -kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn) +kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn) { spin_lock(&conn->ibc_lock); kiblnd_queue_tx_locked(tx, conn); @@ -1268,7 +1268,7 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, } static void -kiblnd_connect_peer (kib_peer_t *peer) +kiblnd_connect_peer(kib_peer_t *peer) { struct rdma_cm_id *cmid; kib_dev_t *dev; @@ -1277,8 +1277,8 @@ kiblnd_connect_peer (kib_peer_t *peer) struct sockaddr_in dstaddr; int rc; - LASSERT (net != NULL); - LASSERT (peer->ibp_connecting > 0); + LASSERT(net != NULL); + LASSERT(peer->ibp_connecting > 0); cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP, IB_QPT_RC); @@ -1318,7 +1318,7 @@ kiblnd_connect_peer (kib_peer_t *peer) goto failed2; } - LASSERT (cmid->device != NULL); + LASSERT(cmid->device != NULL); CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, &dev->ibd_ifip, cmid->device->name); @@ -1333,7 +1333,7 @@ kiblnd_connect_peer (kib_peer_t *peer) } void -kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) +kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) { kib_peer_t *peer; kib_peer_t *peer2; @@ -1345,8 +1345,8 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) /* If I get here, I've committed to send, so I complete the tx with * failure on any problems */ - LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ - LASSERT (tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ + LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ + LASSERT(tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ /* First time, just use a read lock since I expect to find my peer * connected */ @@ -1374,7 +1374,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) if (peer != NULL) { if (list_empty(&peer->ibp_conns)) { /* found a peer, but it's still connecting... */ - LASSERT (peer->ibp_connecting != 0 || + LASSERT(peer->ibp_connecting != 0 || peer->ibp_accepting != 0); if (tx != NULL) list_add_tail(&tx->tx_list, @@ -1413,7 +1413,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) if (peer2 != NULL) { if (list_empty(&peer2->ibp_conns)) { /* found a peer, but it's still connecting... */ - LASSERT (peer2->ibp_connecting != 0 || + LASSERT(peer2->ibp_connecting != 0 || peer2->ibp_accepting != 0); if (tx != NULL) list_add_tail(&tx->tx_list, @@ -1435,11 +1435,11 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) } /* Brand new peer */ - LASSERT (peer->ibp_connecting == 0); + LASSERT(peer->ibp_connecting == 0); peer->ibp_connecting = 1; /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0); + LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0); if (tx != NULL) list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); @@ -1454,7 +1454,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) } int -kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) +kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { lnet_hdr_t *hdr = &lntmsg->msg_hdr; int type = lntmsg->msg_type; @@ -1476,13 +1476,13 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= LNET_MAX_IOV); + LASSERT(payload_nob == 0 || payload_niov > 0); + LASSERT(payload_niov <= LNET_MAX_IOV); /* Thread context */ - LASSERT (!in_interrupt()); + LASSERT(!in_interrupt()); /* payload is either all vaddrs or all pages */ - LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); + LASSERT(!(payload_kiov != NULL && payload_iov != NULL)); switch (type) { default: @@ -1490,7 +1490,7 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) return -EIO; case LNET_MSG_ACK: - LASSERT (payload_nob == 0); + LASSERT(payload_nob == 0); break; case LNET_MSG_GET: @@ -1592,12 +1592,12 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) /* send IMMEDIATE */ - LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]) + LASSERT(offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]) <= IBLND_MSG_SIZE); tx = kiblnd_get_idle_tx(ni, target.nid); if (tx == NULL) { - CERROR ("Can't send %d to %s: tx descs exhausted\n", + CERROR("Can't send %d to %s: tx descs exhausted\n", type, libcfs_nid2str(target.nid)); return -ENOMEM; } @@ -1625,7 +1625,7 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) } static void -kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) +kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) { lnet_process_id_t target = lntmsg->msg_target; unsigned int niov = lntmsg->msg_niov; @@ -1687,7 +1687,7 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) } int -kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, +kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { @@ -1700,10 +1700,10 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, int post_credit = IBLND_POSTRX_PEER_CREDIT; int rc = 0; - LASSERT (mlen <= rlen); - LASSERT (!in_interrupt()); + LASSERT(mlen <= rlen); + LASSERT(!in_interrupt()); /* Either all pages or all vaddrs */ - LASSERT (!(kiov != NULL && iov != NULL)); + LASSERT(!(kiov != NULL && iov != NULL)); switch (rxmsg->ibm_type) { default: @@ -1712,7 +1712,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, case IBLND_MSG_IMMEDIATE: nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]); if (nob > rx->rx_nob) { - CERROR ("Immediate message from %s too big: %d(%d)\n", + CERROR("Immediate message from %s too big: %d(%d)\n", libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), nob, rx->rx_nob); rc = -EPROTO; @@ -1729,7 +1729,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, IBLND_MSG_SIZE, rxmsg, offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), mlen); - lnet_finalize (ni, lntmsg, 0); + lnet_finalize(ni, lntmsg, 0); break; case IBLND_MSG_PUT_REQ: @@ -1812,13 +1812,13 @@ kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) } static void -kiblnd_thread_fini (void) +kiblnd_thread_fini(void) { - atomic_dec (&kiblnd_data.kib_nthreads); + atomic_dec(&kiblnd_data.kib_nthreads); } void -kiblnd_peer_alive (kib_peer_t *peer) +kiblnd_peer_alive(kib_peer_t *peer) { /* This is racy, but everyone's only writing cfs_time_current() */ peer->ibp_last_alive = cfs_time_current(); @@ -1826,7 +1826,7 @@ kiblnd_peer_alive (kib_peer_t *peer) } static void -kiblnd_peer_notify (kib_peer_t *peer) +kiblnd_peer_notify(kib_peer_t *peer) { int error = 0; unsigned long last_alive = 0; @@ -1852,7 +1852,7 @@ kiblnd_peer_notify (kib_peer_t *peer) } void -kiblnd_close_conn_locked (kib_conn_t *conn, int error) +kiblnd_close_conn_locked(kib_conn_t *conn, int error) { /* This just does the immediate housekeeping. 'error' is zero for a * normal shutdown which can happen only after the connection has been @@ -1864,7 +1864,7 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) kib_dev_t *dev; unsigned long flags; - LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED); + LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED); if (error != 0 && conn->ibc_comms_error == 0) conn->ibc_comms_error = error; @@ -1894,7 +1894,7 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) list_del(&conn->ibc_list); /* connd (see below) takes over ibc_list's ref */ - if (list_empty (&peer->ibp_conns) && /* no more conns */ + if (list_empty(&peer->ibp_conns) && /* no more conns */ kiblnd_peer_active(peer)) { /* still in peer table */ kiblnd_unlink_peer_locked(peer); @@ -1936,14 +1936,13 @@ kiblnd_handle_early_rxs(kib_conn_t *conn) { unsigned long flags; kib_rx_t *rx; + kib_rx_t *tmp; LASSERT(!in_interrupt()); LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - while (!list_empty(&conn->ibc_early_rxs)) { - rx = list_entry(conn->ibc_early_rxs.next, - kib_rx_t, rx_list); + list_for_each_entry_safe(rx, tmp, &conn->ibc_early_rxs, rx_list) { list_del(&rx->rx_list); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -1957,22 +1956,22 @@ kiblnd_handle_early_rxs(kib_conn_t *conn) static void kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) { - LIST_HEAD (zombies); + LIST_HEAD(zombies); struct list_head *tmp; struct list_head *nxt; kib_tx_t *tx; spin_lock(&conn->ibc_lock); - list_for_each_safe (tmp, nxt, txs) { - tx = list_entry (tmp, kib_tx_t, tx_list); + list_for_each_safe(tmp, nxt, txs) { + tx = list_entry(tmp, kib_tx_t, tx_list); if (txs == &conn->ibc_active_txs) { - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_waiting || + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_waiting || tx->tx_sending != 0); } else { - LASSERT (tx->tx_queued); + LASSERT(tx->tx_queued); } tx->tx_status = -ECONNABORTED; @@ -1980,8 +1979,8 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) if (tx->tx_sending == 0) { tx->tx_queued = 0; - list_del (&tx->tx_list); - list_add (&tx->tx_list, &zombies); + list_del(&tx->tx_list); + list_add(&tx->tx_list, &zombies); } } @@ -1991,10 +1990,10 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) } static void -kiblnd_finalise_conn (kib_conn_t *conn) +kiblnd_finalise_conn(kib_conn_t *conn) { - LASSERT (!in_interrupt()); - LASSERT (conn->ibc_state > IBLND_CONN_INIT); + LASSERT(!in_interrupt()); + LASSERT(conn->ibc_state > IBLND_CONN_INIT); kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); @@ -2016,21 +2015,21 @@ kiblnd_finalise_conn (kib_conn_t *conn) } void -kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error) +kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) { - LIST_HEAD (zombies); + LIST_HEAD(zombies); unsigned long flags; - LASSERT (error != 0); - LASSERT (!in_interrupt()); + LASSERT(error != 0); + LASSERT(!in_interrupt()); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (active) { - LASSERT (peer->ibp_connecting > 0); + LASSERT(peer->ibp_connecting > 0); peer->ibp_connecting--; } else { - LASSERT (peer->ibp_accepting > 0); + LASSERT(peer->ibp_accepting > 0); peer->ibp_accepting--; } @@ -2053,14 +2052,14 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error) peer->ibp_error = error; } else { /* Can't have blocked transmits if there are connections */ - LASSERT (list_empty(&peer->ibp_tx_queue)); + LASSERT(list_empty(&peer->ibp_tx_queue)); } write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); kiblnd_peer_notify(peer); - if (list_empty (&zombies)) + if (list_empty(&zombies)) return; CNETERR("Deleting messages for %s: connection failed\n", @@ -2074,6 +2073,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) { kib_peer_t *peer = conn->ibc_peer; kib_tx_t *tx; + kib_tx_t *tmp; struct list_head txs; unsigned long flags; int active; @@ -2084,8 +2084,8 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) libcfs_nid2str(peer->ibp_nid), active, conn->ibc_version, status); - LASSERT (!in_interrupt()); - LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && + LASSERT(!in_interrupt()); + LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && peer->ibp_connecting > 0) || (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT && peer->ibp_accepting > 0)); @@ -2150,8 +2150,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) /* Schedule blocked txs */ spin_lock(&conn->ibc_lock); - while (!list_empty(&txs)) { - tx = list_entry(txs.next, kib_tx_t, tx_list); + list_for_each_entry_safe(tx, tmp, &txs, tx_list) { list_del(&tx->tx_list); kiblnd_queue_tx_locked(tx, conn); @@ -2176,7 +2175,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) } static int -kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) +kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) { rwlock_t *g_lock = &kiblnd_data.kib_global_lock; kib_msg_t *reqmsg = priv; @@ -2194,11 +2193,11 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) unsigned long flags; int rc; struct sockaddr_in *peer_addr; - LASSERT (!in_interrupt()); + LASSERT(!in_interrupt()); /* cmid inherits 'context' from the corresponding listener id */ ibdev = (kib_dev_t *)cmid->context; - LASSERT (ibdev != NULL); + LASSERT(ibdev != NULL); memset(&rej, 0, sizeof(rej)); rej.ibr_magic = IBLND_MSG_MAGIC; @@ -2366,8 +2365,8 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) peer = peer2; } else { /* Brand new peer */ - LASSERT (peer->ibp_accepting == 0); - LASSERT (peer->ibp_version == 0 && + LASSERT(peer->ibp_accepting == 0); + LASSERT(peer->ibp_version == 0 && peer->ibp_incarnation == 0); peer->ibp_accepting = 1; @@ -2375,7 +2374,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) peer->ibp_incarnation = reqmsg->ibm_srcstamp; /* I have a ref on ni that prevents it being shutdown */ - LASSERT (net->ibn_shutdown == 0); + LASSERT(net->ibn_shutdown == 0); kiblnd_peer_addref(peer); list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); @@ -2397,7 +2396,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) conn->ibc_incarnation = reqmsg->ibm_srcstamp; conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version); - LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(version)); ackmsg = &conn->ibc_connvars->cv_msg; @@ -2449,7 +2448,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) } static void -kiblnd_reconnect (kib_conn_t *conn, int version, +kiblnd_reconnect(kib_conn_t *conn, int version, __u64 incarnation, int why, kib_connparams_t *cp) { kib_peer_t *peer = conn->ibc_peer; @@ -2457,8 +2456,8 @@ kiblnd_reconnect (kib_conn_t *conn, int version, int retry = 0; unsigned long flags; - LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); - LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */ + LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); + LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -2504,20 +2503,20 @@ kiblnd_reconnect (kib_conn_t *conn, int version, CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n", libcfs_nid2str(peer->ibp_nid), reason, IBLND_MSG_VERSION, version, - cp != NULL? cp->ibcp_queue_depth :IBLND_MSG_QUEUE_SIZE(version), - cp != NULL? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version), - cp != NULL? cp->ibcp_max_msg_size: IBLND_MSG_SIZE); + cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version), + cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version), + cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE); kiblnd_connect_peer(peer); } static void -kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) +kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) { kib_peer_t *peer = conn->ibc_peer; - LASSERT (!in_interrupt()); - LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); + LASSERT(!in_interrupt()); + LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); switch (reason) { case IB_CM_REJ_STALE_CONN: @@ -2651,7 +2650,7 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) } static void -kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) +kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) { kib_peer_t *peer = conn->ibc_peer; lnet_ni_t *ni = peer->ibp_ni; @@ -2661,7 +2660,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) int rc = kiblnd_unpack_msg(msg, priv_nob); unsigned long flags; - LASSERT (net != NULL); + LASSERT(net != NULL); if (rc != 0) { CERROR("Can't unpack connack from %s: %d\n", @@ -2730,7 +2729,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) conn->ibc_incarnation = msg->ibm_srcstamp; conn->ibc_credits = conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver); - LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(ver)); kiblnd_connreq_done(conn, 0); @@ -2742,13 +2741,13 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then * immediately tears it down. */ - LASSERT (rc != 0); + LASSERT(rc != 0); conn->ibc_comms_error = rc; kiblnd_connreq_done(conn, 0); } static int -kiblnd_active_connect (struct rdma_cm_id *cmid) +kiblnd_active_connect(struct rdma_cm_id *cmid) { kib_peer_t *peer = (kib_peer_t *)cmid->context; kib_conn_t *conn; @@ -2913,7 +2912,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) LBUG(); case IBLND_CONN_PASSIVE_WAIT: - CERROR ("%s: REJECTED %d\n", + CERROR("%s: REJECTED %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); kiblnd_connreq_done(conn, -ECONNRESET); @@ -2987,17 +2986,17 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) kib_tx_t *tx; struct list_head *ttmp; - list_for_each (ttmp, txs) { - tx = list_entry (ttmp, kib_tx_t, tx_list); + list_for_each(ttmp, txs) { + tx = list_entry(ttmp, kib_tx_t, tx_list); if (txs != &conn->ibc_active_txs) { - LASSERT (tx->tx_queued); + LASSERT(tx->tx_queued); } else { - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_waiting || tx->tx_sending != 0); + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_waiting || tx->tx_sending != 0); } - if (cfs_time_aftereq (jiffies, tx->tx_deadline)) { + if (cfs_time_aftereq(jiffies, tx->tx_deadline)) { CERROR("Timed out tx: %s, %lu seconds\n", kiblnd_queue2str(conn, txs), cfs_duration_sec(jiffies - tx->tx_deadline)); @@ -3019,14 +3018,15 @@ kiblnd_conn_timed_out_locked(kib_conn_t *conn) } static void -kiblnd_check_conns (int idx) +kiblnd_check_conns(int idx) { - LIST_HEAD (closes); - LIST_HEAD (checksends); + LIST_HEAD(closes); + LIST_HEAD(checksends); struct list_head *peers = &kiblnd_data.kib_peers[idx]; struct list_head *ptmp; kib_peer_t *peer; kib_conn_t *conn; + kib_conn_t *tmp; struct list_head *ctmp; unsigned long flags; @@ -3035,16 +3035,16 @@ kiblnd_check_conns (int idx) * take a look... */ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - list_for_each (ptmp, peers) { - peer = list_entry (ptmp, kib_peer_t, ibp_list); + list_for_each(ptmp, peers) { + peer = list_entry(ptmp, kib_peer_t, ibp_list); - list_for_each (ctmp, &peer->ibp_conns) { + list_for_each(ctmp, &peer->ibp_conns) { int timedout; int sendnoop; conn = list_entry(ctmp, kib_conn_t, ibc_list); - LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED); + LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED); spin_lock(&conn->ibc_lock); @@ -3080,9 +3080,7 @@ kiblnd_check_conns (int idx) /* Handle timeout by closing the whole * connection. We can only be sure RDMA activity * has ceased once the QP has been modified. */ - while (!list_empty(&closes)) { - conn = list_entry(closes.next, - kib_conn_t, ibc_connd_list); + list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) { list_del(&conn->ibc_connd_list); kiblnd_close_conn(conn, -ETIMEDOUT); kiblnd_conn_decref(conn); @@ -3101,11 +3099,11 @@ kiblnd_check_conns (int idx) } static void -kiblnd_disconnect_conn (kib_conn_t *conn) +kiblnd_disconnect_conn(kib_conn_t *conn) { - LASSERT (!in_interrupt()); - LASSERT (current == kiblnd_data.kib_connd); - LASSERT (conn->ibc_state == IBLND_CONN_CLOSING); + LASSERT(!in_interrupt()); + LASSERT(current == kiblnd_data.kib_connd); + LASSERT(conn->ibc_state == IBLND_CONN_CLOSING); rdma_disconnect(conn->ibc_cmid); kiblnd_finalise_conn(conn); @@ -3114,7 +3112,7 @@ kiblnd_disconnect_conn (kib_conn_t *conn) } int -kiblnd_connd (void *arg) +kiblnd_connd(void *arg) { wait_queue_t wait; unsigned long flags; @@ -3125,7 +3123,7 @@ kiblnd_connd (void *arg) int peer_index = 0; unsigned long deadline = jiffies; - cfs_block_allsigs (); + cfs_block_allsigs(); init_waitqueue_entry(&wait, current); kiblnd_data.kib_connd = current; @@ -3136,7 +3134,7 @@ kiblnd_connd (void *arg) dropped_lock = 0; - if (!list_empty (&kiblnd_data.kib_connd_zombies)) { + if (!list_empty(&kiblnd_data.kib_connd_zombies)) { conn = list_entry(kiblnd_data. \ kib_connd_zombies.next, kib_conn_t, ibc_list); @@ -3239,7 +3237,7 @@ kiblnd_qp_event(struct ib_event *event, void *arg) } static void -kiblnd_complete (struct ib_wc *wc) +kiblnd_complete(struct ib_wc *wc) { switch (kiblnd_wreqid2type(wc->wr_id)) { default: @@ -3440,9 +3438,9 @@ kiblnd_failover_thread(void *arg) unsigned long flags; int rc; - LASSERT (*kiblnd_tunables.kib_dev_failover != 0); + LASSERT(*kiblnd_tunables.kib_dev_failover != 0); - cfs_block_allsigs (); + cfs_block_allsigs(); init_waitqueue_entry(&wait, current); write_lock_irqsave(glock, flags); @@ -3469,7 +3467,7 @@ kiblnd_failover_thread(void *arg) write_lock_irqsave(glock, flags); - LASSERT (dev->ibd_failover); + LASSERT(dev->ibd_failover); dev->ibd_failover = 0; if (rc >= 0) { /* Device is OK or failover succeed */ dev->ibd_next_failover = cfs_time_shift(3); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c index 8b4a8e9a29b4..eedf01afd57f 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c @@ -44,7 +44,7 @@ static int service = 987; module_param(service, int, 0444); MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)"); -static int cksum = 0; +static int cksum; module_param(cksum, int, 0644); MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums"); @@ -72,11 +72,11 @@ static int peer_credits = 8; module_param(peer_credits, int, 0444); MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer"); -static int peer_credits_hiw = 0; +static int peer_credits_hiw; module_param(peer_credits_hiw, int, 0444); MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits"); -static int peer_buffer_credits = 0; +static int peer_buffer_credits; module_param(peer_buffer_credits, int, 0444); MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits"); @@ -100,15 +100,15 @@ static int keepalive = 100; module_param(keepalive, int, 0644); MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive"); -static int ib_mtu = 0; +static int ib_mtu; module_param(ib_mtu, int, 0444); MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096"); -static int concurrent_sends = 0; +static int concurrent_sends; module_param(concurrent_sends, int, 0444); MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing"); -static int map_on_demand = 0; +static int map_on_demand; module_param(map_on_demand, int, 0444); MODULE_PARM_DESC(map_on_demand, "map on demand"); @@ -136,12 +136,12 @@ MODULE_PARM_DESC(pmr_pool_size, "size of MR cache pmr pool on each CPT"); * 1: enable failover if necessary * 2: force to failover (for debug) */ -static int dev_failover = 0; +static int dev_failover; module_param(dev_failover, int, 0444); MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)"); -static int require_privileged_port = 0; +static int require_privileged_port; module_param(require_privileged_port, int, 0644); MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection"); @@ -177,7 +177,7 @@ kib_tunables_t kiblnd_tunables = { }; int -kiblnd_tunables_init (void) +kiblnd_tunables_init(void) { if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) { CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n", diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index 5956dbac5d04..7586b7e4040b 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c @@ -65,15 +65,15 @@ ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip) } static ksock_route_t * -ksocknal_create_route (__u32 ipaddr, int port) +ksocknal_create_route(__u32 ipaddr, int port) { ksock_route_t *route; - LIBCFS_ALLOC (route, sizeof (*route)); + LIBCFS_ALLOC(route, sizeof(*route)); if (route == NULL) return NULL; - atomic_set (&route->ksnr_refcount, 1); + atomic_set(&route->ksnr_refcount, 1); route->ksnr_peer = NULL; route->ksnr_retry_interval = 0; /* OK to connect at any time */ route->ksnr_ipaddr = ipaddr; @@ -89,43 +89,43 @@ ksocknal_create_route (__u32 ipaddr, int port) } void -ksocknal_destroy_route (ksock_route_t *route) +ksocknal_destroy_route(ksock_route_t *route) { - LASSERT (atomic_read(&route->ksnr_refcount) == 0); + LASSERT(atomic_read(&route->ksnr_refcount) == 0); if (route->ksnr_peer != NULL) ksocknal_peer_decref(route->ksnr_peer); - LIBCFS_FREE (route, sizeof (*route)); + LIBCFS_FREE(route, sizeof(*route)); } static int -ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) +ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) { ksock_net_t *net = ni->ni_data; ksock_peer_t *peer; - LASSERT (id.nid != LNET_NID_ANY); - LASSERT (id.pid != LNET_PID_ANY); - LASSERT (!in_interrupt()); + LASSERT(id.nid != LNET_NID_ANY); + LASSERT(id.pid != LNET_PID_ANY); + LASSERT(!in_interrupt()); - LIBCFS_ALLOC (peer, sizeof (*peer)); + LIBCFS_ALLOC(peer, sizeof(*peer)); if (peer == NULL) return -ENOMEM; peer->ksnp_ni = ni; peer->ksnp_id = id; - atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */ + atomic_set(&peer->ksnp_refcount, 1); /* 1 ref for caller */ peer->ksnp_closing = 0; peer->ksnp_accepting = 0; peer->ksnp_proto = NULL; peer->ksnp_last_alive = 0; peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; - INIT_LIST_HEAD (&peer->ksnp_conns); - INIT_LIST_HEAD (&peer->ksnp_routes); - INIT_LIST_HEAD (&peer->ksnp_tx_queue); - INIT_LIST_HEAD (&peer->ksnp_zc_req_list); + INIT_LIST_HEAD(&peer->ksnp_conns); + INIT_LIST_HEAD(&peer->ksnp_routes); + INIT_LIST_HEAD(&peer->ksnp_tx_queue); + INIT_LIST_HEAD(&peer->ksnp_zc_req_list); spin_lock_init(&peer->ksnp_lock); spin_lock_bh(&net->ksnn_lock); @@ -147,21 +147,21 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) } void -ksocknal_destroy_peer (ksock_peer_t *peer) +ksocknal_destroy_peer(ksock_peer_t *peer) { ksock_net_t *net = peer->ksnp_ni->ni_data; - CDEBUG (D_NET, "peer %s %p deleted\n", + CDEBUG(D_NET, "peer %s %p deleted\n", libcfs_id2str(peer->ksnp_id), peer); - LASSERT (atomic_read (&peer->ksnp_refcount) == 0); - LASSERT (peer->ksnp_accepting == 0); - LASSERT (list_empty (&peer->ksnp_conns)); - LASSERT (list_empty (&peer->ksnp_routes)); - LASSERT (list_empty (&peer->ksnp_tx_queue)); - LASSERT (list_empty (&peer->ksnp_zc_req_list)); + LASSERT(atomic_read(&peer->ksnp_refcount) == 0); + LASSERT(peer->ksnp_accepting == 0); + LASSERT(list_empty(&peer->ksnp_conns)); + LASSERT(list_empty(&peer->ksnp_routes)); + LASSERT(list_empty(&peer->ksnp_tx_queue)); + LASSERT(list_empty(&peer->ksnp_zc_req_list)); - LIBCFS_FREE (peer, sizeof (*peer)); + LIBCFS_FREE(peer, sizeof(*peer)); /* NB a peer's connections and routes keep a reference on their peer * until they are destroyed, so we can be assured that _all_ state to @@ -173,17 +173,17 @@ ksocknal_destroy_peer (ksock_peer_t *peer) } ksock_peer_t * -ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id) +ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id) { struct list_head *peer_list = ksocknal_nid2peerlist(id.nid); struct list_head *tmp; ksock_peer_t *peer; - list_for_each (tmp, peer_list) { + list_for_each(tmp, peer_list) { - peer = list_entry (tmp, ksock_peer_t, ksnp_list); + peer = list_entry(tmp, ksock_peer_t, ksnp_list); - LASSERT (!peer->ksnp_closing); + LASSERT(!peer->ksnp_closing); if (peer->ksnp_ni != ni) continue; @@ -201,7 +201,7 @@ ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id) } ksock_peer_t * -ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id) +ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id) { ksock_peer_t *peer; @@ -215,37 +215,37 @@ ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id) } static void -ksocknal_unlink_peer_locked (ksock_peer_t *peer) +ksocknal_unlink_peer_locked(ksock_peer_t *peer) { int i; __u32 ip; ksock_interface_t *iface; for (i = 0; i < peer->ksnp_n_passive_ips; i++) { - LASSERT (i < LNET_MAX_INTERFACES); + LASSERT(i < LNET_MAX_INTERFACES); ip = peer->ksnp_passive_ips[i]; iface = ksocknal_ip2iface(peer->ksnp_ni, ip); /* All IPs in peer->ksnp_passive_ips[] come from the * interface list, therefore the call must succeed. */ - LASSERT (iface != NULL); + LASSERT(iface != NULL); CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n", peer, iface, iface->ksni_nroutes); iface->ksni_npeers--; } - LASSERT (list_empty(&peer->ksnp_conns)); - LASSERT (list_empty(&peer->ksnp_routes)); - LASSERT (!peer->ksnp_closing); + LASSERT(list_empty(&peer->ksnp_conns)); + LASSERT(list_empty(&peer->ksnp_routes)); + LASSERT(!peer->ksnp_closing); peer->ksnp_closing = 1; - list_del (&peer->ksnp_list); + list_del(&peer->ksnp_list); /* lose peerlist's ref */ ksocknal_peer_decref(peer); } static int -ksocknal_get_peer_info (lnet_ni_t *ni, int index, +ksocknal_get_peer_info(lnet_ni_t *ni, int index, lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, int *port, int *conn_count, int *share_count) { @@ -261,8 +261,8 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index, for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry (ptmp, ksock_peer_t, ksnp_list); + list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { + peer = list_entry(ptmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni != ni) continue; @@ -296,7 +296,7 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index, goto out; } - list_for_each (rtmp, &peer->ksnp_routes) { + list_for_each(rtmp, &peer->ksnp_routes) { if (index-- > 0) continue; @@ -364,17 +364,17 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) } static void -ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route) +ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) { struct list_head *tmp; ksock_conn_t *conn; ksock_route_t *route2; - LASSERT (!peer->ksnp_closing); - LASSERT (route->ksnr_peer == NULL); - LASSERT (!route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); - LASSERT (route->ksnr_connected == 0); + LASSERT(!peer->ksnp_closing); + LASSERT(route->ksnr_peer == NULL); + LASSERT(!route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting); + LASSERT(route->ksnr_connected == 0); /* LASSERT(unique) */ list_for_each(tmp, &peer->ksnp_routes) { @@ -405,7 +405,7 @@ ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route) } static void -ksocknal_del_route_locked (ksock_route_t *route) +ksocknal_del_route_locked(ksock_route_t *route) { ksock_peer_t *peer = route->ksnr_peer; ksock_interface_t *iface; @@ -413,16 +413,16 @@ ksocknal_del_route_locked (ksock_route_t *route) struct list_head *ctmp; struct list_head *cnxt; - LASSERT (!route->ksnr_deleted); + LASSERT(!route->ksnr_deleted); /* Close associated conns */ - list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { + list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { conn = list_entry(ctmp, ksock_conn_t, ksnc_list); if (conn->ksnc_route != route) continue; - ksocknal_close_conn_locked (conn, 0); + ksocknal_close_conn_locked(conn, 0); } if (route->ksnr_myipaddr != 0) { @@ -433,19 +433,19 @@ ksocknal_del_route_locked (ksock_route_t *route) } route->ksnr_deleted = 1; - list_del (&route->ksnr_list); + list_del(&route->ksnr_list); ksocknal_route_decref(route); /* drop peer's ref */ - if (list_empty (&peer->ksnp_routes) && - list_empty (&peer->ksnp_conns)) { + if (list_empty(&peer->ksnp_routes) && + list_empty(&peer->ksnp_conns)) { /* I've just removed the last route to a peer with no active * connections */ - ksocknal_unlink_peer_locked (peer); + ksocknal_unlink_peer_locked(peer); } } int -ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) +ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) { struct list_head *tmp; ksock_peer_t *peer; @@ -463,7 +463,7 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) if (rc != 0) return rc; - route = ksocknal_create_route (ipaddr, port); + route = ksocknal_create_route(ipaddr, port); if (route == NULL) { ksocknal_peer_decref(peer); return -ENOMEM; @@ -472,20 +472,20 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) write_lock_bh(&ksocknal_data.ksnd_global_lock); /* always called with a ref on ni, so shutdown can't have started */ - LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0); + LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0); - peer2 = ksocknal_find_peer_locked (ni, id); + peer2 = ksocknal_find_peer_locked(ni, id); if (peer2 != NULL) { ksocknal_peer_decref(peer); peer = peer2; } else { /* peer table takes my ref on peer */ - list_add_tail (&peer->ksnp_list, - ksocknal_nid2peerlist (id.nid)); + list_add_tail(&peer->ksnp_list, + ksocknal_nid2peerlist(id.nid)); } route2 = NULL; - list_for_each (tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer->ksnp_routes) { route2 = list_entry(tmp, ksock_route_t, ksnr_list); if (route2->ksnr_ipaddr == ipaddr) @@ -507,7 +507,7 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) } static void -ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) +ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) { ksock_conn_t *conn; ksock_route_t *route; @@ -515,12 +515,12 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) struct list_head *nxt; int nshared; - LASSERT (!peer->ksnp_closing); + LASSERT(!peer->ksnp_closing); /* Extra ref prevents peer disappearing until I'm done with it */ ksocknal_peer_addref(peer); - list_for_each_safe (tmp, nxt, &peer->ksnp_routes) { + list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); /* no match */ @@ -529,11 +529,11 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) route->ksnr_share_count = 0; /* This deletes associated conns too */ - ksocknal_del_route_locked (route); + ksocknal_del_route_locked(route); } nshared = 0; - list_for_each_safe (tmp, nxt, &peer->ksnp_routes) { + list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); nshared += route->ksnr_share_count; } @@ -542,15 +542,15 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) /* remove everything else if there are no explicit entries * left */ - list_for_each_safe (tmp, nxt, &peer->ksnp_routes) { + list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); /* we should only be removing auto-entries */ LASSERT(route->ksnr_share_count == 0); - ksocknal_del_route_locked (route); + ksocknal_del_route_locked(route); } - list_for_each_safe (tmp, nxt, &peer->ksnp_conns) { + list_for_each_safe(tmp, nxt, &peer->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); ksocknal_close_conn_locked(conn, 0); @@ -562,9 +562,9 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip) } static int -ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) +ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) { - LIST_HEAD (zombies); + LIST_HEAD(zombies); struct list_head *ptmp; struct list_head *pnxt; ksock_peer_t *peer; @@ -583,9 +583,9 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) } for (i = lo; i <= hi; i++) { - list_for_each_safe (ptmp, pnxt, + list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry (ptmp, ksock_peer_t, ksnp_list); + peer = list_entry(ptmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni != ni) continue; @@ -596,12 +596,12 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) ksocknal_peer_addref(peer); /* a ref for me... */ - ksocknal_del_peer_locked (peer, ip); + ksocknal_del_peer_locked(peer, ip); if (peer->ksnp_closing && !list_empty(&peer->ksnp_tx_queue)) { - LASSERT (list_empty(&peer->ksnp_conns)); - LASSERT (list_empty(&peer->ksnp_routes)); + LASSERT(list_empty(&peer->ksnp_conns)); + LASSERT(list_empty(&peer->ksnp_routes)); list_splice_init(&peer->ksnp_tx_queue, &zombies); @@ -621,7 +621,7 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) } static ksock_conn_t * -ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index) +ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index) { ksock_peer_t *peer; struct list_head *ptmp; @@ -632,19 +632,19 @@ ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index) read_lock(&ksocknal_data.ksnd_global_lock); for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry (ptmp, ksock_peer_t, ksnp_list); + list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { + peer = list_entry(ptmp, ksock_peer_t, ksnp_list); - LASSERT (!peer->ksnp_closing); + LASSERT(!peer->ksnp_closing); if (peer->ksnp_ni != ni) continue; - list_for_each (ctmp, &peer->ksnp_conns) { + list_for_each(ctmp, &peer->ksnp_conns) { if (index-- > 0) continue; - conn = list_entry (ctmp, ksock_conn_t, + conn = list_entry(ctmp, ksock_conn_t, ksnc_list); ksocknal_conn_addref(conn); read_unlock(&ksocknal_data.ksnd_global_lock); @@ -681,7 +681,7 @@ ksocknal_choose_scheduler_locked(unsigned int cpt) } static int -ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs) +ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) { ksock_net_t *net = ni->ni_data; int i; @@ -690,7 +690,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs) read_lock(&ksocknal_data.ksnd_global_lock); nip = net->ksnn_ninterfaces; - LASSERT (nip <= LNET_MAX_INTERFACES); + LASSERT(nip <= LNET_MAX_INTERFACES); /* Only offer interfaces for additional connections if I have * more than one. */ @@ -701,7 +701,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs) for (i = 0; i < nip; i++) { ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr; - LASSERT (ipaddrs[i] != 0); + LASSERT(ipaddrs[i] != 0); } read_unlock(&ksocknal_data.ksnd_global_lock); @@ -709,7 +709,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs) } static int -ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips) +ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips) { int best_netmatch = 0; int best_xor = 0; @@ -722,7 +722,7 @@ ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips) if (ips[i] == 0) continue; - this_xor = (ips[i] ^ iface->ksni_ipaddr); + this_xor = ips[i] ^ iface->ksni_ipaddr; this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0; if (!(best < 0 || @@ -736,7 +736,7 @@ ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips) best_xor = this_xor; } - LASSERT (best >= 0); + LASSERT(best >= 0); return best; } @@ -767,8 +767,8 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) write_lock_bh(global_lock); - LASSERT (n_peerips <= LNET_MAX_INTERFACES); - LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES); + LASSERT(n_peerips <= LNET_MAX_INTERFACES); + LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES); /* Only match interfaces for additional connections * if I have > 1 interface */ @@ -791,7 +791,7 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) } else { /* choose a new interface */ - LASSERT (i == peer->ksnp_n_passive_ips); + LASSERT(i == peer->ksnp_n_passive_ips); best_iface = NULL; best_netmatch = 0; @@ -809,7 +809,7 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) continue; k = ksocknal_match_peerip(iface, peerips, n_peerips); - xor = (ip ^ peerips[k]); + xor = ip ^ peerips[k]; this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0; if (!(best_iface == NULL || @@ -874,7 +874,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, return; } - LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES); + LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES); for (i = 0; i < npeer_ipaddrs; i++) { if (newroute != NULL) { @@ -911,7 +911,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, best_nroutes = 0; best_netmatch = 0; - LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES); + LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES); /* Select interface to connect from */ for (j = 0; j < net->ksnn_ninterfaces; j++) { @@ -961,7 +961,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, } int -ksocknal_accept (lnet_ni_t *ni, struct socket *sock) +ksocknal_accept(lnet_ni_t *ni, struct socket *sock) { ksock_connreq_t *cr; int rc; @@ -969,7 +969,7 @@ ksocknal_accept (lnet_ni_t *ni, struct socket *sock) int peer_port; rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); - LASSERT (rc == 0); /* we succeeded before */ + LASSERT(rc == 0); /* we succeeded before */ LIBCFS_ALLOC(cr, sizeof(*cr)); if (cr == NULL) { @@ -992,11 +992,11 @@ ksocknal_accept (lnet_ni_t *ni, struct socket *sock) } static int -ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr) +ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr) { ksock_route_t *route; - list_for_each_entry (route, &peer->ksnp_routes, ksnr_list) { + list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) { if (route->ksnr_ipaddr == ipaddr) return route->ksnr_connecting; @@ -1005,11 +1005,11 @@ ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr) } int -ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, +ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, struct socket *sock, int type) { rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - LIST_HEAD (zombies); + LIST_HEAD(zombies); lnet_process_id_t peerid; struct list_head *tmp; __u64 incarnation; @@ -1028,7 +1028,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, active = (route != NULL); - LASSERT (active == (type != SOCKLND_CONN_NONE)); + LASSERT(active == (type != SOCKLND_CONN_NONE)); LIBCFS_ALLOC(conn, sizeof(*conn)); if (conn == NULL) { @@ -1041,19 +1041,19 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, conn->ksnc_sock = sock; /* 2 ref, 1 for conn, another extra ref prevents socket * being closed before establishment of connection */ - atomic_set (&conn->ksnc_sock_refcount, 2); + atomic_set(&conn->ksnc_sock_refcount, 2); conn->ksnc_type = type; ksocknal_lib_save_callback(sock, conn); - atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */ + atomic_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */ conn->ksnc_rx_ready = 0; conn->ksnc_rx_scheduled = 0; - INIT_LIST_HEAD (&conn->ksnc_tx_queue); + INIT_LIST_HEAD(&conn->ksnc_tx_queue); conn->ksnc_tx_ready = 0; conn->ksnc_tx_scheduled = 0; conn->ksnc_tx_carrier = NULL; - atomic_set (&conn->ksnc_tx_nob, 0); + atomic_set(&conn->ksnc_tx_nob, 0); LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); @@ -1063,7 +1063,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, } /* stash conn's local and remote addrs */ - rc = ksocknal_lib_get_conn_addrs (conn); + rc = ksocknal_lib_get_conn_addrs(conn); if (rc != 0) goto failed_1; @@ -1094,7 +1094,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, #endif } - rc = ksocknal_send_hello (ni, conn, peerid.nid, hello); + rc = ksocknal_send_hello(ni, conn, peerid.nid, hello); if (rc != 0) goto failed_1; } else { @@ -1105,13 +1105,13 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, conn->ksnc_proto = NULL; } - rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation); + rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation); if (rc < 0) goto failed_1; - LASSERT (rc == 0 || active); - LASSERT (conn->ksnc_proto != NULL); - LASSERT (peerid.nid != LNET_NID_ANY); + LASSERT(rc == 0 || active); + LASSERT(conn->ksnc_proto != NULL); + LASSERT(peerid.nid != LNET_NID_ANY); cpt = lnet_cpt_of_nid(peerid.nid); @@ -1126,7 +1126,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, write_lock_bh(global_lock); /* called with a ref on ni, so shutdown can't have started */ - LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0); + LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0); peer2 = ksocknal_find_peer_locked(ni, peerid); if (peer2 == NULL) { @@ -1166,7 +1166,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, * NB recv_hello may have returned EPROTO to signal my peer * wants a different protocol than the one I asked for. */ - LASSERT (list_empty(&peer->ksnp_conns)); + LASSERT(list_empty(&peer->ksnp_conns)); peer->ksnp_proto = conn->ksnc_proto; peer->ksnp_incarnation = incarnation; @@ -1211,7 +1211,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, /* Reply on a passive connection attempt so the peer * realises we're connected. */ - LASSERT (rc == 0); + LASSERT(rc == 0); if (!active) rc = EALREADY; @@ -1235,7 +1235,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, * create an association. This allows incoming connections created * by routes in my peer to match my own route entries so I don't * continually create duplicate routes. */ - list_for_each (tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); if (route->ksnr_ipaddr != conn->ksnc_ipaddr) @@ -1260,7 +1260,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); mb(); /* order with adding to peer's conn list */ - list_add (&conn->ksnc_list, &peer->ksnp_conns); + list_add(&conn->ksnc_list, &peer->ksnp_conns); ksocknal_conn_addref(conn); ksocknal_new_packet(conn, 0); @@ -1272,8 +1272,8 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO) continue; - list_del (&tx->tx_list); - ksocknal_queue_tx_locked (tx, conn); + list_del(&tx->tx_list); + ksocknal_queue_tx_locked(tx, conn); } write_unlock_bh(global_lock); @@ -1343,8 +1343,8 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, failed_2: if (!peer->ksnp_closing && - list_empty (&peer->ksnp_conns) && - list_empty (&peer->ksnp_routes)) { + list_empty(&peer->ksnp_conns) && + list_empty(&peer->ksnp_routes)) { list_add(&zombies, &peer->ksnp_tx_queue); list_del_init(&peer->ksnp_tx_queue); ksocknal_unlink_peer_locked(peer); @@ -1383,7 +1383,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); - LIBCFS_FREE (conn, sizeof(*conn)); + LIBCFS_FREE(conn, sizeof(*conn)); failed_0: libcfs_sock_release(sock); @@ -1391,7 +1391,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, } void -ksocknal_close_conn_locked (ksock_conn_t *conn, int error) +ksocknal_close_conn_locked(ksock_conn_t *conn, int error) { /* This just does the immmediate housekeeping, and queues the * connection for the reaper to terminate. @@ -1401,18 +1401,18 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) ksock_conn_t *conn2; struct list_head *tmp; - LASSERT (peer->ksnp_error == 0); - LASSERT (!conn->ksnc_closing); + LASSERT(peer->ksnp_error == 0); + LASSERT(!conn->ksnc_closing); conn->ksnc_closing = 1; /* ksnd_deathrow_conns takes over peer's ref */ - list_del (&conn->ksnc_list); + list_del(&conn->ksnc_list); route = conn->ksnc_route; if (route != NULL) { /* dissociate conn from route... */ - LASSERT (!route->ksnr_deleted); - LASSERT ((route->ksnr_connected & (1 << conn->ksnc_type)) != 0); + LASSERT(!route->ksnr_deleted); + LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0); conn2 = NULL; list_for_each(tmp, &peer->ksnp_conns) { @@ -1431,19 +1431,19 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) #if 0 /* irrelevant with only eager routes */ /* make route least favourite */ - list_del (&route->ksnr_list); - list_add_tail (&route->ksnr_list, &peer->ksnp_routes); + list_del(&route->ksnr_list); + list_add_tail(&route->ksnr_list, &peer->ksnp_routes); #endif ksocknal_route_decref(route); /* drop conn's ref on route */ } - if (list_empty (&peer->ksnp_conns)) { + if (list_empty(&peer->ksnp_conns)) { /* No more connections to this peer */ if (!list_empty(&peer->ksnp_tx_queue)) { ksock_tx_t *tx; - LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x); + LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); /* throw them to the last connection..., * these TXs will be send to /dev/null by scheduler */ @@ -1460,10 +1460,10 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) peer->ksnp_proto = NULL; /* renegotiate protocol version */ peer->ksnp_error = error; /* stash last conn close reason */ - if (list_empty (&peer->ksnp_routes)) { + if (list_empty(&peer->ksnp_routes)) { /* I've just closed last conn belonging to a * peer with no routes to it */ - ksocknal_unlink_peer_locked (peer); + ksocknal_unlink_peer_locked(peer); } } @@ -1477,7 +1477,7 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) } void -ksocknal_peer_failed (ksock_peer_t *peer) +ksocknal_peer_failed(ksock_peer_t *peer) { int notify = 0; unsigned long last_alive = 0; @@ -1499,7 +1499,7 @@ ksocknal_peer_failed (ksock_peer_t *peer) read_unlock(&ksocknal_data.ksnd_global_lock); if (notify) - lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0, + lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0, last_alive); } @@ -1509,11 +1509,11 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) ksock_peer_t *peer = conn->ksnc_peer; ksock_tx_t *tx; ksock_tx_t *tmp; - LIST_HEAD (zlist); + LIST_HEAD(zlist); /* NB safe to finalize TXs because closing of socket will * abort all buffered data */ - LASSERT (conn->ksnc_sock == NULL); + LASSERT(conn->ksnc_sock == NULL); spin_lock(&peer->ksnp_lock); @@ -1521,7 +1521,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) if (tx->tx_conn != conn) continue; - LASSERT (tx->tx_msg.ksm_zc_cookies[0] != 0); + LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0); tx->tx_msg.ksm_zc_cookies[0] = 0; tx->tx_zc_aborted = 1; /* mark it as not-acked */ @@ -1540,7 +1540,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) } void -ksocknal_terminate_conn (ksock_conn_t *conn) +ksocknal_terminate_conn(ksock_conn_t *conn) { /* This gets called by the reaper (guaranteed thread context) to * disengage the socket from its callbacks and close it. @@ -1560,13 +1560,13 @@ ksocknal_terminate_conn (ksock_conn_t *conn) if (!conn->ksnc_tx_scheduled && !list_empty(&conn->ksnc_tx_queue)) { - list_add_tail (&conn->ksnc_tx_list, + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); conn->ksnc_tx_scheduled = 1; /* extra ref for scheduler */ ksocknal_conn_addref(conn); - wake_up (&sched->kss_waitq); + wake_up(&sched->kss_waitq); } spin_unlock_bh(&sched->kss_lock); @@ -1582,7 +1582,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn) if (peer->ksnp_error != 0) { /* peer's last conn closed in error */ - LASSERT (list_empty (&peer->ksnp_conns)); + LASSERT(list_empty(&peer->ksnp_conns)); failed = 1; peer->ksnp_error = 0; /* avoid multiple notifications */ } @@ -1601,7 +1601,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn) } void -ksocknal_queue_zombie_conn (ksock_conn_t *conn) +ksocknal_queue_zombie_conn(ksock_conn_t *conn) { /* Queue the conn for the reaper to destroy */ @@ -1615,20 +1615,20 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn) } void -ksocknal_destroy_conn (ksock_conn_t *conn) +ksocknal_destroy_conn(ksock_conn_t *conn) { unsigned long last_rcv; /* Final coup-de-grace of the reaper */ - CDEBUG (D_NET, "connection %p\n", conn); + CDEBUG(D_NET, "connection %p\n", conn); - LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0); - LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0); - LASSERT (conn->ksnc_sock == NULL); - LASSERT (conn->ksnc_route == NULL); - LASSERT (!conn->ksnc_tx_scheduled); - LASSERT (!conn->ksnc_rx_scheduled); - LASSERT (list_empty(&conn->ksnc_tx_queue)); + LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0); + LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0); + LASSERT(conn->ksnc_sock == NULL); + LASSERT(conn->ksnc_route == NULL); + LASSERT(!conn->ksnc_tx_scheduled); + LASSERT(!conn->ksnc_rx_scheduled); + LASSERT(list_empty(&conn->ksnc_tx_queue)); /* complete current receive if any */ switch (conn->ksnc_rx_state) { @@ -1641,7 +1641,7 @@ ksocknal_destroy_conn (ksock_conn_t *conn) conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left, cfs_duration_sec(cfs_time_sub(cfs_time_current(), last_rcv))); - lnet_finalize (conn->ksnc_peer->ksnp_ni, + lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, -EIO); break; case SOCKNAL_RX_LNET_HEADER: @@ -1665,30 +1665,30 @@ ksocknal_destroy_conn (ksock_conn_t *conn) &conn->ksnc_ipaddr, conn->ksnc_port); break; default: - LBUG (); + LBUG(); break; } ksocknal_peer_decref(conn->ksnc_peer); - LIBCFS_FREE (conn, sizeof (*conn)); + LIBCFS_FREE(conn, sizeof(*conn)); } int -ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why) +ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why) { ksock_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; - list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { - conn = list_entry (ctmp, ksock_conn_t, ksnc_list); + list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { + conn = list_entry(ctmp, ksock_conn_t, ksnc_list); if (ipaddr == 0 || conn->ksnc_ipaddr == ipaddr) { count++; - ksocknal_close_conn_locked (conn, why); + ksocknal_close_conn_locked(conn, why); } } @@ -1696,7 +1696,7 @@ ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why) } int -ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why) +ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why) { ksock_peer_t *peer = conn->ksnc_peer; __u32 ipaddr = conn->ksnc_ipaddr; @@ -1704,7 +1704,7 @@ ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why) write_lock_bh(&ksocknal_data.ksnd_global_lock); - count = ksocknal_close_peer_conns_locked (peer, ipaddr, why); + count = ksocknal_close_peer_conns_locked(peer, ipaddr, why); write_unlock_bh(&ksocknal_data.ksnd_global_lock); @@ -1712,7 +1712,7 @@ ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why) } int -ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr) +ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) { ksock_peer_t *peer; struct list_head *ptmp; @@ -1732,16 +1732,16 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr) } for (i = lo; i <= hi; i++) { - list_for_each_safe (ptmp, pnxt, + list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry (ptmp, ksock_peer_t, ksnp_list); + peer = list_entry(ptmp, ksock_peer_t, ksnp_list); if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) && (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid))) continue; - count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0); + count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0); } } @@ -1758,7 +1758,7 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr) } void -ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) +ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) { /* The router is telling me she's been notified of a change in * gateway state.... */ @@ -1767,12 +1767,12 @@ ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) id.nid = gw_nid; id.pid = LNET_PID_ANY; - CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid), + CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid), alive ? "up" : "down"); if (!alive) { /* If the gateway crashed, close all open connections... */ - ksocknal_close_matching_conns (id, 0); + ksocknal_close_matching_conns(id, 0); return; } @@ -1781,7 +1781,7 @@ ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) } void -ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) +ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) { int connect = 1; unsigned long last_alive = 0; @@ -1798,7 +1798,7 @@ ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) ksock_conn_t *conn; int bufnob; - list_for_each (tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); bufnob = conn->ksnc_sock->sk->sk_wmem_queued; @@ -1842,7 +1842,7 @@ ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) } static void -ksocknal_push_peer (ksock_peer_t *peer) +ksocknal_push_peer(ksock_peer_t *peer) { int index; int i; @@ -1855,9 +1855,9 @@ ksocknal_push_peer (ksock_peer_t *peer) i = 0; conn = NULL; - list_for_each (tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer->ksnp_conns) { if (i++ == index) { - conn = list_entry (tmp, ksock_conn_t, + conn = list_entry(tmp, ksock_conn_t, ksnc_list); ksocknal_conn_addref(conn); break; @@ -1869,13 +1869,13 @@ ksocknal_push_peer (ksock_peer_t *peer) if (conn == NULL) break; - ksocknal_lib_push_conn (conn); + ksocknal_lib_push_conn(conn); ksocknal_conn_decref(conn); } } static int -ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id) +ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) { ksock_peer_t *peer; struct list_head *tmp; @@ -1891,7 +1891,7 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id) index = 0; peer = NULL; - list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) { + list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) { peer = list_entry(tmp, ksock_peer_t, ksnp_list); @@ -1913,7 +1913,7 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id) if (peer != NULL) { rc = 0; - ksocknal_push_peer (peer); + ksocknal_push_peer(peer); ksocknal_peer_decref(peer); } } @@ -2005,7 +2005,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) } list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { - route = list_entry (tmp, ksock_route_t, ksnr_list); + route = list_entry(tmp, ksock_route_t, ksnr_list); if (route->ksnr_myipaddr != ipaddr) continue; @@ -2022,7 +2022,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) conn = list_entry(tmp, ksock_conn_t, ksnc_list); if (conn->ksnc_myipaddr == ipaddr) - ksocknal_close_conn_locked (conn, 0); + ksocknal_close_conn_locked(conn, 0); } } @@ -2139,21 +2139,21 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) case IOC_LIBCFS_ADD_PEER: id.nid = data->ioc_nid; id.pid = LUSTRE_SRV_LNET_PID; - return ksocknal_add_peer (ni, id, + return ksocknal_add_peer(ni, id, data->ioc_u32[0], /* IP */ data->ioc_u32[1]); /* port */ case IOC_LIBCFS_DEL_PEER: id.nid = data->ioc_nid; id.pid = LNET_PID_ANY; - return ksocknal_del_peer (ni, id, + return ksocknal_del_peer(ni, id, data->ioc_u32[0]); /* IP */ case IOC_LIBCFS_GET_CONN: { int txmem; int rxmem; int nagle; - ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count); + ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); if (conn == NULL) return -ENOENT; @@ -2177,7 +2177,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) case IOC_LIBCFS_CLOSE_CONNECTION: id.nid = data->ioc_nid; id.pid = LNET_PID_ANY; - return ksocknal_close_matching_conns (id, + return ksocknal_close_matching_conns(id, data->ioc_u32[0]); case IOC_LIBCFS_REGISTER_MYNID: @@ -2202,9 +2202,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) } static void -ksocknal_free_buffers (void) +ksocknal_free_buffers(void) { - LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0); + LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0); if (ksocknal_data.ksnd_sched_info != NULL) { struct ksock_sched_info *info; @@ -2220,8 +2220,8 @@ ksocknal_free_buffers (void) cfs_percpt_free(ksocknal_data.ksnd_sched_info); } - LIBCFS_FREE (ksocknal_data.ksnd_peers, - sizeof (struct list_head) * + LIBCFS_FREE(ksocknal_data.ksnd_peers, + sizeof(struct list_head) * ksocknal_data.ksnd_peer_hash_size); spin_lock(&ksocknal_data.ksnd_tx_lock); @@ -2253,25 +2253,25 @@ ksocknal_base_shutdown(void) int j; CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", - atomic_read (&libcfs_kmemory)); - LASSERT (ksocknal_data.ksnd_nnets == 0); + atomic_read(&libcfs_kmemory)); + LASSERT(ksocknal_data.ksnd_nnets == 0); switch (ksocknal_data.ksnd_init) { default: - LASSERT (0); + LASSERT(0); case SOCKNAL_INIT_ALL: case SOCKNAL_INIT_DATA: - LASSERT (ksocknal_data.ksnd_peers != NULL); + LASSERT(ksocknal_data.ksnd_peers != NULL); for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - LASSERT (list_empty (&ksocknal_data.ksnd_peers[i])); + LASSERT(list_empty(&ksocknal_data.ksnd_peers[i])); } LASSERT(list_empty(&ksocknal_data.ksnd_nets)); - LASSERT (list_empty (&ksocknal_data.ksnd_enomem_conns)); - LASSERT (list_empty (&ksocknal_data.ksnd_zombie_conns)); - LASSERT (list_empty (&ksocknal_data.ksnd_connd_connreqs)); - LASSERT (list_empty (&ksocknal_data.ksnd_connd_routes)); + LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns)); + LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns)); + LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs)); + LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes)); if (ksocknal_data.ksnd_sched_info != NULL) { cfs_percpt_for_each(info, i, @@ -2332,13 +2332,13 @@ ksocknal_base_shutdown(void) } CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n", - atomic_read (&libcfs_kmemory)); + atomic_read(&libcfs_kmemory)); module_put(THIS_MODULE); } static __u64 -ksocknal_new_incarnation (void) +ksocknal_new_incarnation(void) { /* The incarnation number is the time this module loaded and it @@ -2354,14 +2354,14 @@ ksocknal_base_startup(void) int rc; int i; - LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); - LASSERT (ksocknal_data.ksnd_nnets == 0); + LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); + LASSERT(ksocknal_data.ksnd_nnets == 0); - memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */ + memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */ ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE; - LIBCFS_ALLOC (ksocknal_data.ksnd_peers, - sizeof (struct list_head) * + LIBCFS_ALLOC(ksocknal_data.ksnd_peers, + sizeof(struct list_head) * ksocknal_data.ksnd_peer_hash_size); if (ksocknal_data.ksnd_peers == NULL) return -ENOMEM; @@ -2373,18 +2373,18 @@ ksocknal_base_startup(void) INIT_LIST_HEAD(&ksocknal_data.ksnd_nets); spin_lock_init(&ksocknal_data.ksnd_reaper_lock); - INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns); - INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns); - INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns); + INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns); + INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns); + INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns); init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq); spin_lock_init(&ksocknal_data.ksnd_connd_lock); - INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs); - INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes); + INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs); + INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes); init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq); spin_lock_init(&ksocknal_data.ksnd_tx_lock); - INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs); + INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs); /* NB memset above zeros whole of ksocknal_data */ @@ -2465,7 +2465,7 @@ ksocknal_base_startup(void) rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper"); if (rc != 0) { - CERROR ("Can't spawn socknal reaper: %d\n", rc); + CERROR("Can't spawn socknal reaper: %d\n", rc); goto failed; } @@ -2480,7 +2480,7 @@ ksocknal_base_startup(void) } static void -ksocknal_debug_peerhash (lnet_ni_t *ni) +ksocknal_debug_peerhash(lnet_ni_t *ni) { ksock_peer_t *peer = NULL; struct list_head *tmp; @@ -2489,8 +2489,8 @@ ksocknal_debug_peerhash (lnet_ni_t *ni) read_lock(&ksocknal_data.ksnd_global_lock); for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry (tmp, ksock_peer_t, ksnp_list); + list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) { + peer = list_entry(tmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni == ni) break; @@ -2512,7 +2512,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni) !list_empty(&peer->ksnp_tx_queue), !list_empty(&peer->ksnp_zc_req_list)); - list_for_each (tmp, &peer->ksnp_routes) { + list_for_each(tmp, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n", atomic_read(&route->ksnr_refcount), @@ -2520,9 +2520,9 @@ ksocknal_debug_peerhash (lnet_ni_t *ni) route->ksnr_connected, route->ksnr_deleted); } - list_for_each (tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); - CWARN ("Conn: ref %d, sref %d, t %d, c %d\n", + CWARN("Conn: ref %d, sref %d, t %d, c %d\n", atomic_read(&conn->ksnc_conn_refcount), atomic_read(&conn->ksnc_sock_refcount), conn->ksnc_type, conn->ksnc_closing); @@ -2534,7 +2534,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni) } void -ksocknal_shutdown (lnet_ni_t *ni) +ksocknal_shutdown(lnet_ni_t *ni) { ksock_net_t *net = ni->ni_data; int i; @@ -2573,8 +2573,8 @@ ksocknal_shutdown (lnet_ni_t *ni) spin_unlock_bh(&net->ksnn_lock); for (i = 0; i < net->ksnn_ninterfaces; i++) { - LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0); - LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0); + LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0); + LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0); } list_del(&net->ksnn_list); @@ -2757,13 +2757,13 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) } int -ksocknal_startup (lnet_ni_t *ni) +ksocknal_startup(lnet_ni_t *ni) { ksock_net_t *net; int rc; int i; - LASSERT (ni->ni_lnd == &the_ksocklnd); + LASSERT(ni->ni_lnd == &the_ksocklnd); if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) { rc = ksocknal_base_startup(); @@ -2843,19 +2843,19 @@ ksocknal_startup (lnet_ni_t *ni) static void __exit -ksocknal_module_fini (void) +ksocknal_module_fini(void) { lnet_unregister_lnd(&the_ksocklnd); } static int __init -ksocknal_module_init (void) +ksocknal_module_init(void) { int rc; /* check ksnr_connected/connecting field large enough */ - CLASSERT (SOCKLND_CONN_NTYPES <= 4); - CLASSERT (SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN); + CLASSERT(SOCKLND_CONN_NTYPES <= 4); + CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN); /* initialize the_ksocklnd */ the_ksocklnd.lnd_type = SOCKLND; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index 03488d289c74..c54c9955164e 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -406,7 +406,7 @@ ksocknal_route_mask(void) } static inline struct list_head * -ksocknal_nid2peerlist (lnet_nid_t nid) +ksocknal_nid2peerlist(lnet_nid_t nid) { unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size; @@ -414,25 +414,25 @@ ksocknal_nid2peerlist (lnet_nid_t nid) } static inline void -ksocknal_conn_addref (ksock_conn_t *conn) +ksocknal_conn_addref(ksock_conn_t *conn) { - LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); + LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0); atomic_inc(&conn->ksnc_conn_refcount); } -extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn); +extern void ksocknal_queue_zombie_conn(ksock_conn_t *conn); extern void ksocknal_finalize_zcreq(ksock_conn_t *conn); static inline void -ksocknal_conn_decref (ksock_conn_t *conn) +ksocknal_conn_decref(ksock_conn_t *conn) { - LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); + LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0); if (atomic_dec_and_test(&conn->ksnc_conn_refcount)) ksocknal_queue_zombie_conn(conn); } static inline int -ksocknal_connsock_addref (ksock_conn_t *conn) +ksocknal_connsock_addref(ksock_conn_t *conn) { int rc = -ESHUTDOWN; @@ -448,11 +448,11 @@ ksocknal_connsock_addref (ksock_conn_t *conn) } static inline void -ksocknal_connsock_decref (ksock_conn_t *conn) +ksocknal_connsock_decref(ksock_conn_t *conn) { - LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0); + LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0); if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) { - LASSERT (conn->ksnc_closing); + LASSERT(conn->ksnc_closing); libcfs_sock_release(conn->ksnc_sock); conn->ksnc_sock = NULL; ksocknal_finalize_zcreq(conn); @@ -460,61 +460,61 @@ ksocknal_connsock_decref (ksock_conn_t *conn) } static inline void -ksocknal_tx_addref (ksock_tx_t *tx) +ksocknal_tx_addref(ksock_tx_t *tx) { - LASSERT (atomic_read(&tx->tx_refcount) > 0); + LASSERT(atomic_read(&tx->tx_refcount) > 0); atomic_inc(&tx->tx_refcount); } -extern void ksocknal_tx_prep (ksock_conn_t *, ksock_tx_t *tx); -extern void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx); +extern void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx); +extern void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx); static inline void -ksocknal_tx_decref (ksock_tx_t *tx) +ksocknal_tx_decref(ksock_tx_t *tx) { - LASSERT (atomic_read(&tx->tx_refcount) > 0); + LASSERT(atomic_read(&tx->tx_refcount) > 0); if (atomic_dec_and_test(&tx->tx_refcount)) ksocknal_tx_done(NULL, tx); } static inline void -ksocknal_route_addref (ksock_route_t *route) +ksocknal_route_addref(ksock_route_t *route) { - LASSERT (atomic_read(&route->ksnr_refcount) > 0); + LASSERT(atomic_read(&route->ksnr_refcount) > 0); atomic_inc(&route->ksnr_refcount); } -extern void ksocknal_destroy_route (ksock_route_t *route); +extern void ksocknal_destroy_route(ksock_route_t *route); static inline void -ksocknal_route_decref (ksock_route_t *route) +ksocknal_route_decref(ksock_route_t *route) { - LASSERT (atomic_read (&route->ksnr_refcount) > 0); + LASSERT(atomic_read(&route->ksnr_refcount) > 0); if (atomic_dec_and_test(&route->ksnr_refcount)) - ksocknal_destroy_route (route); + ksocknal_destroy_route(route); } static inline void -ksocknal_peer_addref (ksock_peer_t *peer) +ksocknal_peer_addref(ksock_peer_t *peer) { - LASSERT (atomic_read (&peer->ksnp_refcount) > 0); + LASSERT(atomic_read(&peer->ksnp_refcount) > 0); atomic_inc(&peer->ksnp_refcount); } -extern void ksocknal_destroy_peer (ksock_peer_t *peer); +extern void ksocknal_destroy_peer(ksock_peer_t *peer); static inline void -ksocknal_peer_decref (ksock_peer_t *peer) +ksocknal_peer_decref(ksock_peer_t *peer) { - LASSERT (atomic_read (&peer->ksnp_refcount) > 0); + LASSERT(atomic_read(&peer->ksnp_refcount) > 0); if (atomic_dec_and_test(&peer->ksnp_refcount)) - ksocknal_destroy_peer (peer); + ksocknal_destroy_peer(peer); } -int ksocknal_startup (lnet_ni_t *ni); -void ksocknal_shutdown (lnet_ni_t *ni); +int ksocknal_startup(lnet_ni_t *ni); +void ksocknal_shutdown(lnet_ni_t *ni); int ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg); -int ksocknal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg); +int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg); int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, @@ -522,44 +522,44 @@ int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int ksocknal_accept(lnet_ni_t *ni, struct socket *sock); extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port); -extern ksock_peer_t *ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id); -extern ksock_peer_t *ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id); -extern void ksocknal_peer_failed (ksock_peer_t *peer); -extern int ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route, +extern ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id); +extern ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id); +extern void ksocknal_peer_failed(ksock_peer_t *peer); +extern int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, struct socket *sock, int type); -extern void ksocknal_close_conn_locked (ksock_conn_t *conn, int why); -extern void ksocknal_terminate_conn (ksock_conn_t *conn); -extern void ksocknal_destroy_conn (ksock_conn_t *conn); -extern int ksocknal_close_peer_conns_locked (ksock_peer_t *peer, +extern void ksocknal_close_conn_locked(ksock_conn_t *conn, int why); +extern void ksocknal_terminate_conn(ksock_conn_t *conn); +extern void ksocknal_destroy_conn(ksock_conn_t *conn); +extern int ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why); -extern int ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why); -extern int ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr); +extern int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why); +extern int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr); extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk); extern int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id); extern ksock_tx_t *ksocknal_alloc_tx(int type, int size); -extern void ksocknal_free_tx (ksock_tx_t *tx); +extern void ksocknal_free_tx(ksock_tx_t *tx); extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk); extern void ksocknal_next_tx_carrier(ksock_conn_t *conn); -extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn); -extern void ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, +extern void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn); +extern void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error); -extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive); -extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when); +extern void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive); +extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when); extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name); -extern void ksocknal_thread_fini (void); -extern void ksocknal_launch_all_connections_locked (ksock_peer_t *peer); -extern ksock_route_t *ksocknal_find_connectable_route_locked (ksock_peer_t *peer); -extern ksock_route_t *ksocknal_find_connecting_route_locked (ksock_peer_t *peer); -extern int ksocknal_new_packet (ksock_conn_t *conn, int skip); -extern int ksocknal_scheduler (void *arg); -extern int ksocknal_connd (void *arg); -extern int ksocknal_reaper (void *arg); -extern int ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, +extern void ksocknal_thread_fini(void); +extern void ksocknal_launch_all_connections_locked(ksock_peer_t *peer); +extern ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer); +extern ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer); +extern int ksocknal_new_packet(ksock_conn_t *conn, int skip); +extern int ksocknal_scheduler(void *arg); +extern int ksocknal_connd(void *arg); +extern int ksocknal_reaper(void *arg); +extern int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn, lnet_nid_t peer_nid, ksock_hello_msg_t *hello); -extern int ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, +extern int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, ksock_hello_msg_t *hello, lnet_process_id_t *id, __u64 *incarnation); extern void ksocknal_read_callback(ksock_conn_t *conn); @@ -569,15 +569,15 @@ extern int ksocknal_lib_zc_capable(ksock_conn_t *conn); extern void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn); extern void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn); extern void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn); -extern void ksocknal_lib_push_conn (ksock_conn_t *conn); -extern int ksocknal_lib_get_conn_addrs (ksock_conn_t *conn); -extern int ksocknal_lib_setup_sock (struct socket *so); -extern int ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx); -extern int ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx); -extern void ksocknal_lib_eager_ack (ksock_conn_t *conn); -extern int ksocknal_lib_recv_iov (ksock_conn_t *conn); -extern int ksocknal_lib_recv_kiov (ksock_conn_t *conn); -extern int ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, +extern void ksocknal_lib_push_conn(ksock_conn_t *conn); +extern int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn); +extern int ksocknal_lib_setup_sock(struct socket *so); +extern int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx); +extern int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx); +extern void ksocknal_lib_eager_ack(ksock_conn_t *conn); +extern int ksocknal_lib_recv_iov(ksock_conn_t *conn); +extern int ksocknal_lib_recv_kiov(ksock_conn_t *conn); +extern int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle); extern int ksocknal_tunables_init(void); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 92760fe94184..fa7ad883bda9 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -1374,9 +1374,9 @@ ksocknal_sched_cansleep(ksock_sched_t *sched) spin_lock_bh(&sched->kss_lock); - rc = (!ksocknal_data.ksnd_shuttingdown && + rc = !ksocknal_data.ksnd_shuttingdown && list_empty(&sched->kss_rx_conns) && - list_empty(&sched->kss_tx_conns)); + list_empty(&sched->kss_tx_conns); spin_unlock_bh(&sched->kss_lock); return rc; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c index 66cc509295e5..f5e8ab06070c 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c @@ -37,24 +37,24 @@ #include "socklnd.h" int -ksocknal_lib_get_conn_addrs (ksock_conn_t *conn) +ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) { int rc = libcfs_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr, &conn->ksnc_port); /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */ - LASSERT (!conn->ksnc_closing); + LASSERT(!conn->ksnc_closing); if (rc != 0) { - CERROR ("Error %d getting sock peer IP\n", rc); + CERROR("Error %d getting sock peer IP\n", rc); return rc; } rc = libcfs_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL); if (rc != 0) { - CERROR ("Error %d getting sock local IP\n", rc); + CERROR("Error %d getting sock local IP\n", rc); return rc; } @@ -75,7 +75,7 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn) } int -ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) { struct socket *sock = conn->ksnc_sock; int nob; @@ -117,7 +117,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) } int -ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) { struct socket *sock = conn->ksnc_sock; lnet_kiov_t *kiov = tx->tx_kiov; @@ -125,7 +125,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) int nob; /* Not NOOP message */ - LASSERT (tx->tx_lnetmsg != NULL); + LASSERT(tx->tx_lnetmsg != NULL); /* NB we can't trust socket ops to either consume our iovs * or leave them alone. */ @@ -185,7 +185,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) } void -ksocknal_lib_eager_ack (ksock_conn_t *conn) +ksocknal_lib_eager_ack(ksock_conn_t *conn) { int opt = 1; struct socket *sock = conn->ksnc_sock; @@ -196,11 +196,11 @@ ksocknal_lib_eager_ack (ksock_conn_t *conn) * peer. */ kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, - (char *)&opt, sizeof (opt)); + (char *)&opt, sizeof(opt)); } int -ksocknal_lib_recv_iov (ksock_conn_t *conn) +ksocknal_lib_recv_iov(ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX struct kvec scratch; @@ -223,13 +223,13 @@ ksocknal_lib_recv_iov (ksock_conn_t *conn) /* NB we can't trust socket ops to either consume our iovs * or leave them alone. */ - LASSERT (niov > 0); + LASSERT(niov > 0); for (nob = i = 0; i < niov; i++) { scratchiov[i] = iov[i]; nob += scratchiov[i].iov_len; } - LASSERT (nob <= conn->ksnc_rx_nob_wanted); + LASSERT(nob <= conn->ksnc_rx_nob_wanted); rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob, MSG_DONTWAIT); @@ -243,7 +243,7 @@ ksocknal_lib_recv_iov (ksock_conn_t *conn) if (saved_csum != 0) { /* accumulate checksum */ for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { - LASSERT (i < niov); + LASSERT(i < niov); fragnob = iov[i].iov_len; if (fragnob > sum) @@ -278,7 +278,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL) return NULL; - LASSERT (niov <= LNET_MAX_IOV); + LASSERT(niov <= LNET_MAX_IOV); if (niov < 2 || niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags) @@ -304,7 +304,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, } int -ksocknal_lib_recv_kiov (ksock_conn_t *conn) +ksocknal_lib_recv_kiov(ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK struct kvec scratch; @@ -348,14 +348,14 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn) n = niov; } - LASSERT (nob <= conn->ksnc_rx_nob_wanted); + LASSERT(nob <= conn->ksnc_rx_nob_wanted); rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT); if (conn->ksnc_msg.ksm_csum != 0) { for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { - LASSERT (i < niov); + LASSERT(i < niov); /* Dang! have to kmap again because I have nowhere to stash the * mapped address. But by doing it while the page is still @@ -423,7 +423,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx) } int -ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle) +ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle) { struct socket *sock = conn->ksnc_sock; int len; @@ -431,7 +431,7 @@ ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int rc = ksocknal_connsock_addref(conn); if (rc != 0) { - LASSERT (conn->ksnc_closing); + LASSERT(conn->ksnc_closing); *txmem = *rxmem = *nagle = 0; return -ESHUTDOWN; } @@ -454,7 +454,7 @@ ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int } int -ksocknal_lib_setup_sock (struct socket *sock) +ksocknal_lib_setup_sock(struct socket *sock) { int rc; int option; @@ -473,17 +473,17 @@ ksocknal_lib_setup_sock (struct socket *sock) linger.l_linger = 0; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, - (char *)&linger, sizeof (linger)); + (char *)&linger, sizeof(linger)); if (rc != 0) { - CERROR ("Can't set SO_LINGER: %d\n", rc); + CERROR("Can't set SO_LINGER: %d\n", rc); return rc; } option = -1; rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, - (char *)&option, sizeof (option)); + (char *)&option, sizeof(option)); if (rc != 0) { - CERROR ("Can't set SO_LINGER2: %d\n", rc); + CERROR("Can't set SO_LINGER2: %d\n", rc); return rc; } @@ -491,9 +491,9 @@ ksocknal_lib_setup_sock (struct socket *sock) option = 1; rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)&option, sizeof (option)); + (char *)&option, sizeof(option)); if (rc != 0) { - CERROR ("Can't disable nagle: %d\n", rc); + CERROR("Can't disable nagle: %d\n", rc); return rc; } } @@ -502,7 +502,7 @@ ksocknal_lib_setup_sock (struct socket *sock) *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size); if (rc != 0) { - CERROR ("Can't set buffer tx %d, rx %d buffers: %d\n", + CERROR("Can't set buffer tx %d, rx %d buffers: %d\n", *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size, rc); return rc; @@ -519,9 +519,9 @@ ksocknal_lib_setup_sock (struct socket *sock) option = (do_keepalive ? 1 : 0); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, - (char *)&option, sizeof (option)); + (char *)&option, sizeof(option)); if (rc != 0) { - CERROR ("Can't set SO_KEEPALIVE: %d\n", rc); + CERROR("Can't set SO_KEEPALIVE: %d\n", rc); return rc; } @@ -529,23 +529,23 @@ ksocknal_lib_setup_sock (struct socket *sock) return 0; rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, - (char *)&keep_idle, sizeof (keep_idle)); + (char *)&keep_idle, sizeof(keep_idle)); if (rc != 0) { - CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc); + CERROR("Can't set TCP_KEEPIDLE: %d\n", rc); return rc; } rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, - (char *)&keep_intvl, sizeof (keep_intvl)); + (char *)&keep_intvl, sizeof(keep_intvl)); if (rc != 0) { - CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc); + CERROR("Can't set TCP_KEEPINTVL: %d\n", rc); return rc; } rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, - (char *)&keep_count, sizeof (keep_count)); + (char *)&keep_count, sizeof(keep_count)); if (rc != 0) { - CERROR ("Can't set TCP_KEEPCNT: %d\n", rc); + CERROR("Can't set TCP_KEEPCNT: %d\n", rc); return rc; } @@ -553,7 +553,7 @@ ksocknal_lib_setup_sock (struct socket *sock) } void -ksocknal_lib_push_conn (ksock_conn_t *conn) +ksocknal_lib_push_conn(ksock_conn_t *conn) { struct sock *sk; struct tcp_sock *tp; @@ -568,29 +568,29 @@ ksocknal_lib_push_conn (ksock_conn_t *conn) sk = conn->ksnc_sock->sk; tp = tcp_sk(sk); - lock_sock (sk); + lock_sock(sk); nonagle = tp->nonagle; tp->nonagle = 1; - release_sock (sk); + release_sock(sk); rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof (val)); - LASSERT (rc == 0); + (char *)&val, sizeof(val)); + LASSERT(rc == 0); - lock_sock (sk); + lock_sock(sk); tp->nonagle = nonagle; - release_sock (sk); + release_sock(sk); ksocknal_connsock_decref(conn); } -extern void ksocknal_read_callback (ksock_conn_t *conn); -extern void ksocknal_write_callback (ksock_conn_t *conn); +extern void ksocknal_read_callback(ksock_conn_t *conn); +extern void ksocknal_write_callback(ksock_conn_t *conn); /* * socket call back in Linux */ static void -ksocknal_data_ready (struct sock *sk) +ksocknal_data_ready(struct sock *sk) { ksock_conn_t *conn; @@ -600,8 +600,8 @@ ksocknal_data_ready (struct sock *sk) conn = sk->sk_user_data; if (conn == NULL) { /* raced with ksocknal_terminate_conn */ - LASSERT (sk->sk_data_ready != &ksocknal_data_ready); - sk->sk_data_ready (sk); + LASSERT(sk->sk_data_ready != &ksocknal_data_ready); + sk->sk_data_ready(sk); } else ksocknal_read_callback(conn); @@ -609,7 +609,7 @@ ksocknal_data_ready (struct sock *sk) } static void -ksocknal_write_space (struct sock *sk) +ksocknal_write_space(struct sock *sk) { ksock_conn_t *conn; int wspace; @@ -629,12 +629,12 @@ ksocknal_write_space (struct sock *sk) " ready" : " blocked"), (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ? " scheduled" : " idle"), - (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ? + (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ? " empty" : " queued")); if (conn == NULL) { /* raced with ksocknal_terminate_conn */ - LASSERT (sk->sk_write_space != &ksocknal_write_space); - sk->sk_write_space (sk); + LASSERT(sk->sk_write_space != &ksocknal_write_space); + sk->sk_write_space(sk); read_unlock(&ksocknal_data.ksnd_global_lock); return; @@ -647,7 +647,7 @@ ksocknal_write_space (struct sock *sk) * ENOMEM check in ksocknal_transmit is race-free (think about * it). */ - clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags); + clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); } read_unlock(&ksocknal_data.ksnd_global_lock); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h index 7a793d2d3582..f5563881b25c 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h @@ -50,8 +50,8 @@ #include <net/tcp.h> #include <linux/uio.h> #include <linux/if.h> +#include <linux/uaccess.h> -#include <asm/uaccess.h> #include <asm/irq.h> #include <linux/fs.h> diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c index 66d78c9be650..86b88db1cf20 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c @@ -72,7 +72,7 @@ static int typed_conns = 1; module_param(typed_conns, int, 0444); MODULE_PARM_DESC(typed_conns, "use different sockets for bulk"); -static int min_bulk = (1<<10); +static int min_bulk = 1<<10; module_param(min_bulk, int, 0644); MODULE_PARM_DESC(min_bulk, "smallest 'large' message"); @@ -122,7 +122,7 @@ static int nonblk_zcack = 1; module_param(nonblk_zcack, int, 0644); MODULE_PARM_DESC(nonblk_zcack, "always send ZC-ACK on non-blocking connection"); -static unsigned int zc_min_payload = (16 << 10); +static unsigned int zc_min_payload = 16 << 10; module_param(zc_min_payload, int, 0644); MODULE_PARM_DESC(zc_min_payload, "minimum payload size to zero copy"); @@ -182,7 +182,7 @@ int ksocknal_tunables_init(void) #endif if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10)) - *ksocknal_tunables.ksnd_zc_min_payload = (2 << 10); + *ksocknal_tunables.ksnd_zc_min_payload = 2 << 10; return 0; }; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c index b2f88eb47bba..8596581f54ff 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c @@ -55,8 +55,8 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn) ksock_tx_t *tx = conn->ksnc_tx_carrier; /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ - LASSERT (!list_empty(&conn->ksnc_tx_queue)); - LASSERT (tx != NULL); + LASSERT(!list_empty(&conn->ksnc_tx_queue)); + LASSERT(tx != NULL); /* Next TX that can carry ZC-ACK or LNet message */ if (tx->tx_list.next == &conn->ksnc_tx_queue) { @@ -65,7 +65,7 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn) } else { conn->ksnc_tx_carrier = list_entry(tx->tx_list.next, ksock_tx_t, tx_list); - LASSERT (conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type); + LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type); } } @@ -75,7 +75,7 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, { ksock_tx_t *tx = conn->ksnc_tx_carrier; - LASSERT (tx_ack == NULL || + LASSERT(tx_ack == NULL || tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); /* @@ -139,7 +139,7 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg) return NULL; } - LASSERT (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); + LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); /* There is a noop zc-ack can be piggybacked */ tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1]; @@ -162,7 +162,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie); /* non-blocking ZC-ACK (to router) */ - LASSERT (tx_ack == NULL || + LASSERT(tx_ack == NULL || tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); tx = conn->ksnc_tx_carrier; @@ -185,7 +185,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) { /* replace the keepalive PING with a real ACK */ - LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); + LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); tx->tx_msg.ksm_zc_cookies[1] = cookie; return 1; } @@ -220,7 +220,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, __u64 tmp = 0; /* two separated cookies: (a+2, a) or (a+1, a) */ - LASSERT (tx->tx_msg.ksm_zc_cookies[0] - + LASSERT(tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] <= 2); if (tx->tx_msg.ksm_zc_cookies[0] - @@ -408,7 +408,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) ksock_peer_t *peer = conn->ksnc_peer; ksock_tx_t *tx; ksock_tx_t *tmp; - LIST_HEAD (zlist); + LIST_HEAD(zlist); int count; if (cookie1 == 0) @@ -450,7 +450,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) } static int -ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello) +ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) { struct socket *sock = conn->ksnc_sock; lnet_hdr_t *hdr; @@ -526,7 +526,7 @@ out: } static int -ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello) +ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) { struct socket *sock = conn->ksnc_sock; int rc; @@ -584,12 +584,12 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, } rc = libcfs_sock_read(sock, &hdr->src_nid, - sizeof (*hdr) - offsetof (lnet_hdr_t, src_nid), + sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid), timeout); if (rc != 0) { CERROR("Error %d reading rest of HELLO hdr from %pI4h\n", rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0 && rc != -EALREADY); + LASSERT(rc < 0 && rc != -EALREADY); goto out; } @@ -602,12 +602,12 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, goto out; } - hello->kshm_src_nid = le64_to_cpu (hdr->src_nid); - hello->kshm_src_pid = le32_to_cpu (hdr->src_pid); - hello->kshm_src_incarnation = le64_to_cpu (hdr->msg.hello.incarnation); - hello->kshm_ctype = le32_to_cpu (hdr->msg.hello.type); - hello->kshm_nips = le32_to_cpu (hdr->payload_length) / - sizeof (__u32); + hello->kshm_src_nid = le64_to_cpu(hdr->src_nid); + hello->kshm_src_pid = le32_to_cpu(hdr->src_pid); + hello->kshm_src_incarnation = le64_to_cpu(hdr->msg.hello.incarnation); + hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type); + hello->kshm_nips = le32_to_cpu(hdr->payload_length) / + sizeof(__u32); if (hello->kshm_nips > LNET_MAX_INTERFACES) { CERROR("Bad nips %d from ip %pI4h\n", @@ -645,7 +645,7 @@ out: } static int -ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) +ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) { struct socket *sock = conn->ksnc_sock; int rc; |