diff options
Diffstat (limited to 'drivers/staging/lustre/lnet')
57 files changed, 2784 insertions, 2649 deletions
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 0d32e6541a3f..4f5978b3767b 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -44,7 +40,7 @@ static lnd_t the_o2iblnd; -kib_data_t kiblnd_data; +struct kib_data kiblnd_data; static __u32 kiblnd_cksum(void *ptr, int nob) { @@ -98,40 +94,40 @@ static char *kiblnd_msgtype2str(int type) static int kiblnd_msgtype2size(int type) { - const int hdr_size = offsetof(kib_msg_t, ibm_u); + const int hdr_size = offsetof(struct kib_msg, ibm_u); switch (type) { case IBLND_MSG_CONNREQ: case IBLND_MSG_CONNACK: - return hdr_size + sizeof(kib_connparams_t); + return hdr_size + sizeof(struct kib_connparams); case IBLND_MSG_NOOP: return hdr_size; case IBLND_MSG_IMMEDIATE: - return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]); + return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]); case IBLND_MSG_PUT_REQ: - return hdr_size + sizeof(kib_putreq_msg_t); + return hdr_size + sizeof(struct kib_putreq_msg); case IBLND_MSG_PUT_ACK: - return hdr_size + sizeof(kib_putack_msg_t); + return hdr_size + sizeof(struct kib_putack_msg); case IBLND_MSG_GET_REQ: - return hdr_size + sizeof(kib_get_msg_t); + return hdr_size + sizeof(struct kib_get_msg); case IBLND_MSG_PUT_NAK: case IBLND_MSG_PUT_DONE: case IBLND_MSG_GET_DONE: - return hdr_size + sizeof(kib_completion_msg_t); + return hdr_size + sizeof(struct kib_completion_msg); default: return -1; } } -static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) +static int kiblnd_unpack_rd(struct kib_msg *msg, int flip) { - kib_rdma_desc_t *rd; + struct kib_rdma_desc *rd; int nob; int n; int i; @@ -156,7 +152,7 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) return 1; } - nob = offsetof(kib_msg_t, ibm_u) + + nob = offsetof(struct kib_msg, ibm_u) + kiblnd_rd_msg_size(rd, msg->ibm_type, n); if (msg->ibm_nob < nob) { @@ -176,10 +172,10 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) return 0; } -void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, +void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version, int credits, lnet_nid_t dstnid, __u64 dststamp) { - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; /* * CAVEAT EMPTOR! all message fields not set here should have been @@ -202,9 +198,9 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, } } -int kiblnd_unpack_msg(kib_msg_t *msg, int nob) +int kiblnd_unpack_msg(struct kib_msg *msg, int nob) { - const int hdr_size = offsetof(kib_msg_t, ibm_u); + const int hdr_size = offsetof(struct kib_msg, ibm_u); __u32 msg_cksum; __u16 version; int msg_nob; @@ -315,10 +311,10 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob) return 0; } -int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) +int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid) { - kib_peer_t *peer; - kib_net_t *net = ni->ni_data; + struct kib_peer *peer; + struct kib_net *net = ni->ni_data; int cpt = lnet_cpt_of_nid(nid); unsigned long flags; @@ -335,8 +331,8 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) peer->ibp_nid = nid; peer->ibp_error = 0; peer->ibp_last_alive = 0; - peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS; - peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits; + peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni); + peer->ibp_queue_depth = ni->ni_peertxcredits; atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ @@ -357,9 +353,9 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) return 0; } -void kiblnd_destroy_peer(kib_peer_t *peer) +void kiblnd_destroy_peer(struct kib_peer *peer) { - kib_net_t *net = peer->ibp_ni->ni_data; + struct kib_net *net = peer->ibp_ni->ni_data; LASSERT(net); LASSERT(!atomic_read(&peer->ibp_refcount)); @@ -378,7 +374,7 @@ void kiblnd_destroy_peer(kib_peer_t *peer) atomic_dec(&net->ibn_npeers); } -kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) +struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid) { /* * the caller is responsible for accounting the additional reference @@ -386,10 +382,10 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) */ struct list_head *peer_list = kiblnd_nid2peerlist(nid); struct list_head *tmp; - kib_peer_t *peer; + struct kib_peer *peer; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, kib_peer_t, ibp_list); + peer = list_entry(tmp, struct kib_peer, ibp_list); LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_nid != nid) @@ -404,7 +400,7 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) return NULL; } -void kiblnd_unlink_peer_locked(kib_peer_t *peer) +void kiblnd_unlink_peer_locked(struct kib_peer *peer) { LASSERT(list_empty(&peer->ibp_conns)); @@ -417,7 +413,7 @@ void kiblnd_unlink_peer_locked(kib_peer_t *peer) static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, lnet_nid_t *nidp, int *count) { - kib_peer_t *peer; + struct kib_peer *peer; struct list_head *ptmp; int i; unsigned long flags; @@ -426,7 +422,7 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); + peer = list_entry(ptmp, struct kib_peer, ibp_list); LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) @@ -448,17 +444,17 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, return -ENOENT; } -static void kiblnd_del_peer_locked(kib_peer_t *peer) +static void kiblnd_del_peer_locked(struct kib_peer *peer) { struct list_head *ctmp; struct list_head *cnxt; - kib_conn_t *conn; + struct kib_conn *conn; if (list_empty(&peer->ibp_conns)) { kiblnd_unlink_peer_locked(peer); } else { list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { - conn = list_entry(ctmp, kib_conn_t, ibc_list); + conn = list_entry(ctmp, struct kib_conn, ibc_list); kiblnd_close_conn_locked(conn, 0); } @@ -475,7 +471,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) LIST_HEAD(zombies); struct list_head *ptmp; struct list_head *pnxt; - kib_peer_t *peer; + struct kib_peer *peer; int lo; int hi; int i; @@ -494,7 +490,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); + peer = list_entry(ptmp, struct kib_peer, ibp_list); LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) @@ -522,11 +518,11 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) return rc; } -static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) +static struct kib_conn *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) { - kib_peer_t *peer; + struct kib_peer *peer; struct list_head *ptmp; - kib_conn_t *conn; + struct kib_conn *conn; struct list_head *ctmp; int i; unsigned long flags; @@ -535,7 +531,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); + peer = list_entry(ptmp, struct kib_peer, ibp_list); LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) @@ -545,7 +541,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) if (index-- > 0) continue; - conn = list_entry(ctmp, kib_conn_t, + conn = list_entry(ctmp, struct kib_conn, ibc_list); kiblnd_conn_addref(conn); read_unlock_irqrestore( @@ -594,7 +590,7 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) cmid->route.path_rec->mtu = mtu; } -static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) +static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt) { cpumask_t *mask; int vectors; @@ -621,7 +617,7 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) return 1; } -kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, +struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid, int state, int version) { /* @@ -634,12 +630,12 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, * its ref on 'cmid'). */ rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_net_t *net = peer->ibp_ni->ni_data; - kib_dev_t *dev; + struct kib_net *net = peer->ibp_ni->ni_data; + struct kib_dev *dev; struct ib_qp_init_attr *init_qp_attr; struct kib_sched_info *sched; struct ib_cq_init_attr cq_attr = {}; - kib_conn_t *conn; + struct kib_conn *conn; struct ib_cq *cq; unsigned long flags; int cpt; @@ -723,7 +719,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, write_unlock_irqrestore(glock, flags); LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, - IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); + IBLND_RX_MSGS(conn) * sizeof(struct kib_rx)); if (!conn->ibc_rxs) { CERROR("Cannot allocate RX buffers\n"); goto failed_2; @@ -833,10 +829,10 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, return NULL; } -void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) +void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; - kib_peer_t *peer = conn->ibc_peer; + struct kib_peer *peer = conn->ibc_peer; int rc; LASSERT(!in_interrupt()); @@ -879,7 +875,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) if (conn->ibc_rxs) { LIBCFS_FREE(conn->ibc_rxs, - IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); + IBLND_RX_MSGS(conn) * sizeof(struct kib_rx)); } if (conn->ibc_connvars) @@ -890,7 +886,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) /* See CAVEAT EMPTOR above in kiblnd_create_conn */ if (conn->ibc_state != IBLND_CONN_INIT) { - kib_net_t *net = peer->ibp_ni->ni_data; + struct kib_net *net = peer->ibp_ni->ni_data; kiblnd_peer_decref(peer); rdma_destroy_id(cmid); @@ -900,15 +896,15 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) LIBCFS_FREE(conn, sizeof(*conn)); } -int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) +int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why) { - kib_conn_t *conn; + struct kib_conn *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { - conn = list_entry(ctmp, kib_conn_t, ibc_list); + conn = list_entry(ctmp, struct kib_conn, ibc_list); CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n", libcfs_nid2str(peer->ibp_nid), @@ -921,16 +917,16 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) return count; } -int kiblnd_close_stale_conns_locked(kib_peer_t *peer, +int kiblnd_close_stale_conns_locked(struct kib_peer *peer, int version, __u64 incarnation) { - kib_conn_t *conn; + struct kib_conn *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { - conn = list_entry(ctmp, kib_conn_t, ibc_list); + conn = list_entry(ctmp, struct kib_conn, ibc_list); if (conn->ibc_version == version && conn->ibc_incarnation == incarnation) @@ -951,7 +947,7 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer, static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) { - kib_peer_t *peer; + struct kib_peer *peer; struct list_head *ptmp; struct list_head *pnxt; int lo; @@ -972,7 +968,7 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); + peer = list_entry(ptmp, struct kib_peer, ibp_list); LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) @@ -1016,7 +1012,7 @@ static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) break; } case IOC_LIBCFS_GET_CONN: { - kib_conn_t *conn; + struct kib_conn *conn; rc = 0; conn = kiblnd_get_conn_by_idx(ni, data->ioc_count); @@ -1052,7 +1048,7 @@ static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) unsigned long last_alive = 0; unsigned long now = cfs_time_current(); rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_peer_t *peer; + struct kib_peer *peer; unsigned long flags; read_lock_irqsave(glock, flags); @@ -1078,7 +1074,7 @@ static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) last_alive ? cfs_duration_sec(now - last_alive) : -1); } -static void kiblnd_free_pages(kib_pages_t *p) +static void kiblnd_free_pages(struct kib_pages *p) { int npages = p->ibp_npages; int i; @@ -1088,22 +1084,22 @@ static void kiblnd_free_pages(kib_pages_t *p) __free_page(p->ibp_pages[i]); } - LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages])); + LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages])); } -int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) +int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages) { - kib_pages_t *p; + struct kib_pages *p; int i; LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, - offsetof(kib_pages_t, ibp_pages[npages])); + offsetof(struct kib_pages, ibp_pages[npages])); if (!p) { CERROR("Can't allocate descriptor for %d pages\n", npages); return -ENOMEM; } - memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages])); + memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages])); p->ibp_npages = npages; for (i = 0; i < npages; i++) { @@ -1121,9 +1117,9 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) return 0; } -void kiblnd_unmap_rx_descs(kib_conn_t *conn) +void kiblnd_unmap_rx_descs(struct kib_conn *conn) { - kib_rx_t *rx; + struct kib_rx *rx; int i; LASSERT(conn->ibc_rxs); @@ -1145,9 +1141,9 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn) conn->ibc_rx_pages = NULL; } -void kiblnd_map_rx_descs(kib_conn_t *conn) +void kiblnd_map_rx_descs(struct kib_conn *conn) { - kib_rx_t *rx; + struct kib_rx *rx; struct page *pg; int pg_off; int ipg; @@ -1158,7 +1154,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) rx = &conn->ibc_rxs[i]; rx->rx_conn = conn; - rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off); + rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off); rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev, rx->rx_msg, @@ -1183,10 +1179,10 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) } } -static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) +static void kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo) { - kib_hca_dev_t *hdev = tpo->tpo_hdev; - kib_tx_t *tx; + struct kib_hca_dev *hdev = tpo->tpo_hdev; + struct kib_tx *tx; int i; LASSERT(!tpo->tpo_pool.po_allocated); @@ -1206,9 +1202,9 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) tpo->tpo_hdev = NULL; } -static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) +static struct kib_hca_dev *kiblnd_current_hdev(struct kib_dev *dev) { - kib_hca_dev_t *hdev; + struct kib_hca_dev *hdev; unsigned long flags; int i = 0; @@ -1232,14 +1228,14 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) return hdev; } -static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) +static void kiblnd_map_tx_pool(struct kib_tx_pool *tpo) { - kib_pages_t *txpgs = tpo->tpo_tx_pages; - kib_pool_t *pool = &tpo->tpo_pool; - kib_net_t *net = pool->po_owner->ps_net; - kib_dev_t *dev; + struct kib_pages *txpgs = tpo->tpo_tx_pages; + struct kib_pool *pool = &tpo->tpo_pool; + struct kib_net *net = pool->po_owner->ps_net; + struct kib_dev *dev; struct page *page; - kib_tx_t *tx; + struct kib_tx *tx; int page_offset; int ipage; int i; @@ -1260,7 +1256,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) page = txpgs->ibp_pages[ipage]; tx = &tpo->tpo_tx_descs[i]; - tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + + tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) + page_offset); tx->tx_msgaddr = kiblnd_dma_map_single( @@ -1283,65 +1279,86 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) } } -struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd, +struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd, int negotiated_nfrags) { - __u16 nfrags = (negotiated_nfrags != -1) ? - negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand; + struct kib_net *net = ni->ni_data; + struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev; + struct lnet_ioctl_config_o2iblnd_tunables *tunables; + __u16 nfrags; + int mod; + + tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; + mod = tunables->lnd_map_on_demand; + nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod; LASSERT(hdev->ibh_mrs); - if (*kiblnd_tunables.kib_map_on_demand > 0 && - nfrags <= rd->rd_nfrags) + if (mod > 0 && nfrags <= rd->rd_nfrags) return NULL; return hdev->ibh_mrs; } -static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) +static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo) { - LASSERT(!pool->fpo_map_count); + LASSERT(!fpo->fpo_map_count); - if (pool->fpo_fmr_pool) - ib_destroy_fmr_pool(pool->fpo_fmr_pool); + if (fpo->fpo_is_fmr) { + if (fpo->fmr.fpo_fmr_pool) + ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool); + } else { + struct kib_fast_reg_descriptor *frd, *tmp; + int i = 0; + + list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list, + frd_list) { + list_del(&frd->frd_list); + ib_dereg_mr(frd->frd_mr); + LIBCFS_FREE(frd, sizeof(*frd)); + i++; + } + if (i < fpo->fast_reg.fpo_pool_size) + CERROR("FastReg pool still has %d regions registered\n", + fpo->fast_reg.fpo_pool_size - i); + } - if (pool->fpo_hdev) - kiblnd_hdev_decref(pool->fpo_hdev); + if (fpo->fpo_hdev) + kiblnd_hdev_decref(fpo->fpo_hdev); - LIBCFS_FREE(pool, sizeof(*pool)); + LIBCFS_FREE(fpo, sizeof(*fpo)); } static void kiblnd_destroy_fmr_pool_list(struct list_head *head) { - kib_fmr_pool_t *pool; + struct kib_fmr_pool *fpo, *tmp; - while (!list_empty(head)) { - pool = list_entry(head->next, kib_fmr_pool_t, fpo_list); - list_del(&pool->fpo_list); - kiblnd_destroy_fmr_pool(pool); + list_for_each_entry_safe(fpo, tmp, head, fpo_list) { + list_del(&fpo->fpo_list); + kiblnd_destroy_fmr_pool(fpo); } } -static int kiblnd_fmr_pool_size(int ncpts) +static int +kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables, + int ncpts) { - int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts; + int size = tunables->lnd_fmr_pool_size / ncpts; return max(IBLND_FMR_POOL, size); } -static int kiblnd_fmr_flush_trigger(int ncpts) +static int +kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables, + int ncpts) { - int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts; + int size = tunables->lnd_fmr_flush_trigger / ncpts; return max(IBLND_FMR_POOL_FLUSH, size); } -static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, - kib_fmr_pool_t **pp_fpo) +static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo) { - /* FMR pool for RDMA */ - kib_dev_t *dev = fps->fps_net->ibn_dev; - kib_fmr_pool_t *fpo; struct ib_fmr_pool_param param = { .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE, .page_shift = PAGE_SHIFT, @@ -1351,7 +1368,78 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, .dirty_watermark = fps->fps_flush_trigger, .flush_function = NULL, .flush_arg = NULL, - .cache = !!*kiblnd_tunables.kib_fmr_cache}; + .cache = !!fps->fps_cache }; + int rc = 0; + + fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, + ¶m); + if (IS_ERR(fpo->fmr.fpo_fmr_pool)) { + rc = PTR_ERR(fpo->fmr.fpo_fmr_pool); + if (rc != -ENOSYS) + CERROR("Failed to create FMR pool: %d\n", rc); + else + CERROR("FMRs are not supported\n"); + } + + return rc; +} + +static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo) +{ + struct kib_fast_reg_descriptor *frd, *tmp; + int i, rc; + + INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list); + fpo->fast_reg.fpo_pool_size = 0; + for (i = 0; i < fps->fps_pool_size; i++) { + LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt, + sizeof(*frd)); + if (!frd) { + CERROR("Failed to allocate a new fast_reg descriptor\n"); + rc = -ENOMEM; + goto out; + } + + frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd, + IB_MR_TYPE_MEM_REG, + LNET_MAX_PAYLOAD / PAGE_SIZE); + if (IS_ERR(frd->frd_mr)) { + rc = PTR_ERR(frd->frd_mr); + CERROR("Failed to allocate ib_alloc_mr: %d\n", rc); + frd->frd_mr = NULL; + goto out_middle; + } + + frd->frd_valid = true; + + list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list); + fpo->fast_reg.fpo_pool_size++; + } + + return 0; + +out_middle: + if (frd->frd_mr) + ib_dereg_mr(frd->frd_mr); + LIBCFS_FREE(frd, sizeof(*frd)); + +out: + list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list, + frd_list) { + list_del(&frd->frd_list); + ib_dereg_mr(frd->frd_mr); + LIBCFS_FREE(frd, sizeof(*frd)); + } + + return rc; +} + +static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps, + struct kib_fmr_pool **pp_fpo) +{ + struct kib_dev *dev = fps->fps_net->ibn_dev; + struct ib_device_attr *dev_attr; + struct kib_fmr_pool *fpo; int rc; LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); @@ -1359,25 +1447,44 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, return -ENOMEM; fpo->fpo_hdev = kiblnd_current_hdev(dev); - - fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, ¶m); - if (IS_ERR(fpo->fpo_fmr_pool)) { - rc = PTR_ERR(fpo->fpo_fmr_pool); - CERROR("Failed to create FMR pool: %d\n", rc); - - kiblnd_hdev_decref(fpo->fpo_hdev); - LIBCFS_FREE(fpo, sizeof(*fpo)); - return rc; + dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs; + + /* Check for FMR or FastReg support */ + fpo->fpo_is_fmr = 0; + if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr && + fpo->fpo_hdev->ibh_ibdev->dealloc_fmr && + fpo->fpo_hdev->ibh_ibdev->map_phys_fmr && + fpo->fpo_hdev->ibh_ibdev->unmap_fmr) { + LCONSOLE_INFO("Using FMR for registration\n"); + fpo->fpo_is_fmr = 1; + } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { + LCONSOLE_INFO("Using FastReg for registration\n"); + } else { + rc = -ENOSYS; + LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n"); + goto out_fpo; } + if (fpo->fpo_is_fmr) + rc = kiblnd_alloc_fmr_pool(fps, fpo); + else + rc = kiblnd_alloc_freg_pool(fps, fpo); + if (rc) + goto out_fpo; + fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); - fpo->fpo_owner = fps; + fpo->fpo_owner = fps; *pp_fpo = fpo; return 0; + +out_fpo: + kiblnd_hdev_decref(fpo->fpo_hdev); + LIBCFS_FREE(fpo, sizeof(*fpo)); + return rc; } -static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, +static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies) { if (!fps->fps_net) /* intialized? */ @@ -1386,8 +1493,8 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, spin_lock(&fps->fps_lock); while (!list_empty(&fps->fps_pool_list)) { - kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next, - kib_fmr_pool_t, fpo_list); + struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next, + struct kib_fmr_pool, fpo_list); fpo->fpo_failed = 1; list_del(&fpo->fpo_list); if (!fpo->fpo_map_count) @@ -1399,7 +1506,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, spin_unlock(&fps->fps_lock); } -static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) +static void kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps) { if (fps->fps_net) { /* initialized? */ kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list); @@ -1407,19 +1514,23 @@ static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) } } -static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, - kib_net_t *net, int pool_size, - int flush_trigger) +static int +kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts, + struct kib_net *net, + struct lnet_ioctl_config_o2iblnd_tunables *tunables) { - kib_fmr_pool_t *fpo; + struct kib_fmr_pool *fpo; int rc; memset(fps, 0, sizeof(*fps)); fps->fps_net = net; fps->fps_cpt = cpt; - fps->fps_pool_size = pool_size; - fps->fps_flush_trigger = flush_trigger; + + fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts); + fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts); + fps->fps_cache = tunables->lnd_fmr_cache; + spin_lock_init(&fps->fps_lock); INIT_LIST_HEAD(&fps->fps_pool_list); INIT_LIST_HEAD(&fps->fps_failed_pool_list); @@ -1431,7 +1542,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, return rc; } -static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) +static int kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, unsigned long now) { if (fpo->fpo_map_count) /* still in use */ return 0; @@ -1440,25 +1551,64 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) return cfs_time_aftereq(now, fpo->fpo_deadline); } -void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) +static int +kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd) +{ + __u64 *pages = tx->tx_pages; + struct kib_hca_dev *hdev; + int npages; + int size; + int i; + + hdev = tx->tx_pool->tpo_hdev; + + for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { + for (size = 0; size < rd->rd_frags[i].rf_nob; + size += hdev->ibh_page_size) { + pages[npages++] = (rd->rd_frags[i].rf_addr & + hdev->ibh_page_mask) + size; + } + } + + return npages; +} + +void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status) { LIST_HEAD(zombies); - kib_fmr_pool_t *fpo = fmr->fmr_pool; - kib_fmr_poolset_t *fps = fpo->fpo_owner; + struct kib_fmr_pool *fpo = fmr->fmr_pool; + struct kib_fmr_poolset *fps; unsigned long now = cfs_time_current(); - kib_fmr_pool_t *tmp; + struct kib_fmr_pool *tmp; int rc; - rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); - LASSERT(!rc); + if (!fpo) + return; - if (status) { - rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool); - LASSERT(!rc); - } + fps = fpo->fpo_owner; + if (fpo->fpo_is_fmr) { + if (fmr->fmr_pfmr) { + rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); + LASSERT(!rc); + fmr->fmr_pfmr = NULL; + } + + if (status) { + rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool); + LASSERT(!rc); + } + } else { + struct kib_fast_reg_descriptor *frd = fmr->fmr_frd; + if (frd) { + frd->frd_valid = false; + spin_lock(&fps->fps_lock); + list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list); + spin_unlock(&fps->fps_lock); + fmr->fmr_frd = NULL; + } + } fmr->fmr_pool = NULL; - fmr->fmr_pfmr = NULL; spin_lock(&fps->fps_lock); fpo->fpo_map_count--; /* decref the pool */ @@ -1479,11 +1629,15 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) kiblnd_destroy_fmr_pool_list(&zombies); } -int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, - __u64 iov, kib_fmr_t *fmr) +int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, + struct kib_rdma_desc *rd, __u32 nob, __u64 iov, + struct kib_fmr *fmr) { - struct ib_pool_fmr *pfmr; - kib_fmr_pool_t *fpo; + __u64 *pages = tx->tx_pages; + bool is_rx = (rd != tx->tx_rd); + bool tx_pages_mapped = 0; + struct kib_fmr_pool *fpo; + int npages = 0; __u64 version; int rc; @@ -1493,21 +1647,95 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) { fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); fpo->fpo_map_count++; - spin_unlock(&fps->fps_lock); - pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool, - pages, npages, iov); - if (likely(!IS_ERR(pfmr))) { - fmr->fmr_pool = fpo; - fmr->fmr_pfmr = pfmr; - return 0; + if (fpo->fpo_is_fmr) { + struct ib_pool_fmr *pfmr; + + spin_unlock(&fps->fps_lock); + + if (!tx_pages_mapped) { + npages = kiblnd_map_tx_pages(tx, rd); + tx_pages_mapped = 1; + } + + pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool, + pages, npages, iov); + if (likely(!IS_ERR(pfmr))) { + fmr->fmr_key = is_rx ? pfmr->fmr->rkey : + pfmr->fmr->lkey; + fmr->fmr_frd = NULL; + fmr->fmr_pfmr = pfmr; + fmr->fmr_pool = fpo; + return 0; + } + rc = PTR_ERR(pfmr); + } else { + if (!list_empty(&fpo->fast_reg.fpo_pool_list)) { + struct kib_fast_reg_descriptor *frd; + struct ib_reg_wr *wr; + struct ib_mr *mr; + int n; + + frd = list_first_entry(&fpo->fast_reg.fpo_pool_list, + struct kib_fast_reg_descriptor, + frd_list); + list_del(&frd->frd_list); + spin_unlock(&fps->fps_lock); + + mr = frd->frd_mr; + + if (!frd->frd_valid) { + __u32 key = is_rx ? mr->rkey : mr->lkey; + struct ib_send_wr *inv_wr; + + inv_wr = &frd->frd_inv_wr; + memset(inv_wr, 0, sizeof(*inv_wr)); + inv_wr->opcode = IB_WR_LOCAL_INV; + inv_wr->wr_id = IBLND_WID_MR; + inv_wr->ex.invalidate_rkey = key; + + /* Bump the key */ + key = ib_inc_rkey(key); + ib_update_fast_reg_key(mr, key); + } + + n = ib_map_mr_sg(mr, tx->tx_frags, + tx->tx_nfrags, NULL, PAGE_SIZE); + if (unlikely(n != tx->tx_nfrags)) { + CERROR("Failed to map mr %d/%d elements\n", + n, tx->tx_nfrags); + return n < 0 ? n : -EINVAL; + } + + mr->iova = iov; + + /* Prepare FastReg WR */ + wr = &frd->frd_fastreg_wr; + memset(wr, 0, sizeof(*wr)); + wr->wr.opcode = IB_WR_REG_MR; + wr->wr.wr_id = IBLND_WID_MR; + wr->wr.num_sge = 0; + wr->wr.send_flags = 0; + wr->mr = mr; + wr->key = is_rx ? mr->rkey : mr->lkey; + wr->access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE); + + fmr->fmr_key = is_rx ? mr->rkey : mr->lkey; + fmr->fmr_frd = frd; + fmr->fmr_pfmr = NULL; + fmr->fmr_pool = fpo; + return 0; + } + spin_unlock(&fps->fps_lock); + rc = -EBUSY; } spin_lock(&fps->fps_lock); fpo->fpo_map_count--; - if (PTR_ERR(pfmr) != -EAGAIN) { + if (rc != -EAGAIN) { spin_unlock(&fps->fps_lock); - return PTR_ERR(pfmr); + return rc; } /* EAGAIN and ... */ @@ -1548,7 +1776,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, goto again; } -static void kiblnd_fini_pool(kib_pool_t *pool) +static void kiblnd_fini_pool(struct kib_pool *pool) { LASSERT(list_empty(&pool->po_free_list)); LASSERT(!pool->po_allocated); @@ -1556,7 +1784,7 @@ static void kiblnd_fini_pool(kib_pool_t *pool) CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name); } -static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) +static void kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size) { CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name); @@ -1569,10 +1797,10 @@ static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) static void kiblnd_destroy_pool_list(struct list_head *head) { - kib_pool_t *pool; + struct kib_pool *pool; while (!list_empty(head)) { - pool = list_entry(head->next, kib_pool_t, po_list); + pool = list_entry(head->next, struct kib_pool, po_list); list_del(&pool->po_list); LASSERT(pool->po_owner); @@ -1580,15 +1808,15 @@ static void kiblnd_destroy_pool_list(struct list_head *head) } } -static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) +static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies) { if (!ps->ps_net) /* intialized? */ return; spin_lock(&ps->ps_lock); while (!list_empty(&ps->ps_pool_list)) { - kib_pool_t *po = list_entry(ps->ps_pool_list.next, - kib_pool_t, po_list); + struct kib_pool *po = list_entry(ps->ps_pool_list.next, + struct kib_pool, po_list); po->po_failed = 1; list_del(&po->po_list); if (!po->po_allocated) @@ -1599,7 +1827,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) spin_unlock(&ps->ps_lock); } -static void kiblnd_fini_poolset(kib_poolset_t *ps) +static void kiblnd_fini_poolset(struct kib_poolset *ps) { if (ps->ps_net) { /* initialized? */ kiblnd_destroy_pool_list(&ps->ps_failed_pool_list); @@ -1607,14 +1835,14 @@ static void kiblnd_fini_poolset(kib_poolset_t *ps) } } -static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, - kib_net_t *net, char *name, int size, +static int kiblnd_init_poolset(struct kib_poolset *ps, int cpt, + struct kib_net *net, char *name, int size, kib_ps_pool_create_t po_create, kib_ps_pool_destroy_t po_destroy, kib_ps_node_init_t nd_init, kib_ps_node_fini_t nd_fini) { - kib_pool_t *pool; + struct kib_pool *pool; int rc; memset(ps, 0, sizeof(*ps)); @@ -1642,7 +1870,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, return rc; } -static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) +static int kiblnd_pool_is_idle(struct kib_pool *pool, unsigned long now) { if (pool->po_allocated) /* still in use */ return 0; @@ -1651,11 +1879,11 @@ static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) return cfs_time_aftereq(now, pool->po_deadline); } -void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) +void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node) { LIST_HEAD(zombies); - kib_poolset_t *ps = pool->po_owner; - kib_pool_t *tmp; + struct kib_poolset *ps = pool->po_owner; + struct kib_pool *tmp; unsigned long now = cfs_time_current(); spin_lock(&ps->ps_lock); @@ -1681,10 +1909,10 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) kiblnd_destroy_pool_list(&zombies); } -struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) +struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps) { struct list_head *node; - kib_pool_t *pool; + struct kib_pool *pool; unsigned int interval = 1; unsigned long time_before; unsigned int trips = 0; @@ -1754,9 +1982,9 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) goto again; } -static void kiblnd_destroy_tx_pool(kib_pool_t *pool) +static void kiblnd_destroy_tx_pool(struct kib_pool *pool) { - kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); + struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool, tpo_pool); int i; LASSERT(!pool->po_allocated); @@ -1770,7 +1998,7 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool) goto out; for (i = 0; i < pool->po_size; i++) { - kib_tx_t *tx = &tpo->tpo_tx_descs[i]; + struct kib_tx *tx = &tpo->tpo_tx_descs[i]; list_del(&tx->tx_list); if (tx->tx_pages) @@ -1779,8 +2007,8 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool) sizeof(*tx->tx_pages)); if (tx->tx_frags) LIBCFS_FREE(tx->tx_frags, - IBLND_MAX_RDMA_FRAGS * - sizeof(*tx->tx_frags)); + (1 + IBLND_MAX_RDMA_FRAGS) * + sizeof(*tx->tx_frags)); if (tx->tx_wrq) LIBCFS_FREE(tx->tx_wrq, (1 + IBLND_MAX_RDMA_FRAGS) * @@ -1791,12 +2019,12 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool) sizeof(*tx->tx_sge)); if (tx->tx_rd) LIBCFS_FREE(tx->tx_rd, - offsetof(kib_rdma_desc_t, + offsetof(struct kib_rdma_desc, rd_frags[IBLND_MAX_RDMA_FRAGS])); } LIBCFS_FREE(tpo->tpo_tx_descs, - pool->po_size * sizeof(kib_tx_t)); + pool->po_size * sizeof(struct kib_tx)); out: kiblnd_fini_pool(pool); LIBCFS_FREE(tpo, sizeof(*tpo)); @@ -1809,13 +2037,13 @@ static int kiblnd_tx_pool_size(int ncpts) return max(IBLND_TX_POOL, ntx); } -static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, - kib_pool_t **pp_po) +static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size, + struct kib_pool **pp_po) { int i; int npg; - kib_pool_t *pool; - kib_tx_pool_t *tpo; + struct kib_pool *pool; + struct kib_tx_pool *tpo; LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); if (!tpo) { @@ -1836,17 +2064,17 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, } LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt, - size * sizeof(kib_tx_t)); + size * sizeof(struct kib_tx)); if (!tpo->tpo_tx_descs) { CERROR("Can't allocate %d tx descriptors\n", size); ps->ps_pool_destroy(pool); return -ENOMEM; } - memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t)); + memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx)); for (i = 0; i < size; i++) { - kib_tx_t *tx = &tpo->tpo_tx_descs[i]; + struct kib_tx *tx = &tpo->tpo_tx_descs[i]; tx->tx_pool = tpo; if (ps->ps_net->ibn_fmr_ps) { @@ -1858,11 +2086,12 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, } LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt, - IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); + (1 + IBLND_MAX_RDMA_FRAGS) * + sizeof(*tx->tx_frags)); if (!tx->tx_frags) break; - sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS); + sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1); LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt, (1 + IBLND_MAX_RDMA_FRAGS) * @@ -1877,7 +2106,7 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, break; LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt, - offsetof(kib_rdma_desc_t, + offsetof(struct kib_rdma_desc, rd_frags[IBLND_MAX_RDMA_FRAGS])); if (!tx->tx_rd) break; @@ -1893,22 +2122,23 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, return -ENOMEM; } -static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node) +static void kiblnd_tx_init(struct kib_pool *pool, struct list_head *node) { - kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t, - tps_poolset); - kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list); + struct kib_tx_poolset *tps = container_of(pool->po_owner, + struct kib_tx_poolset, + tps_poolset); + struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list); tx->tx_cookie = tps->tps_next_tx_cookie++; } -static void kiblnd_net_fini_pools(kib_net_t *net) +static void kiblnd_net_fini_pools(struct kib_net *net) { int i; cfs_cpt_for_each(i, lnet_cpt_table()) { - kib_tx_poolset_t *tps; - kib_fmr_poolset_t *fps; + struct kib_tx_poolset *tps; + struct kib_fmr_poolset *fps; if (net->ibn_tx_ps) { tps = net->ibn_tx_ps[i]; @@ -1932,25 +2162,28 @@ static void kiblnd_net_fini_pools(kib_net_t *net) } } -static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) +static int kiblnd_net_init_pools(struct kib_net *net, lnet_ni_t *ni, __u32 *cpts, + int ncpts) { + struct lnet_ioctl_config_o2iblnd_tunables *tunables; unsigned long flags; int cpt; - int rc = 0; + int rc; int i; + tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (!*kiblnd_tunables.kib_map_on_demand) { + if (!tunables->lnd_map_on_demand) { read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); goto create_tx_pool; } read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (*kiblnd_tunables.kib_fmr_pool_size < - *kiblnd_tunables.kib_ntx / 4) { + if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) { CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n", - *kiblnd_tunables.kib_fmr_pool_size, + tunables->lnd_fmr_pool_size, *kiblnd_tunables.kib_ntx / 4); rc = -EINVAL; goto failed; @@ -1965,10 +2198,13 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) /* * premapping can fail if ibd_nmr > 1, so we always create * FMR pool and map-on-demand if premapping failed + * + * cfs_precpt_alloc is creating an array of struct kib_fmr_poolset + * The number of struct kib_fmr_poolsets create is equal to the + * number of CPTs that exist, i.e net->ibn_fmr_ps[cpt]. */ - net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(kib_fmr_poolset_t)); + sizeof(struct kib_fmr_poolset)); if (!net->ibn_fmr_ps) { CERROR("Failed to allocate FMR pool array\n"); rc = -ENOMEM; @@ -1977,9 +2213,8 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) for (i = 0; i < ncpts; i++) { cpt = !cpts ? i : cpts[i]; - rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net, - kiblnd_fmr_pool_size(ncpts), - kiblnd_fmr_flush_trigger(ncpts)); + rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts, + net, tunables); if (rc) { CERROR("Can't initialize FMR pool for CPT %d: %d\n", cpt, rc); @@ -1991,8 +2226,13 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) LASSERT(i == ncpts); create_tx_pool: + /* + * cfs_precpt_alloc is creating an array of struct kib_tx_poolset + * The number of struct kib_tx_poolsets create is equal to the + * number of CPTs that exist, i.e net->ibn_tx_ps[cpt]. + */ net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(kib_tx_poolset_t)); + sizeof(struct kib_tx_poolset)); if (!net->ibn_tx_ps) { CERROR("Failed to allocate tx pool array\n"); rc = -ENOMEM; @@ -2021,7 +2261,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) return rc; } -static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) +static int kiblnd_hdev_get_attr(struct kib_hca_dev *hdev) { /* * It's safe to assume a HCA can handle a page size @@ -2041,7 +2281,7 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) return -EINVAL; } -static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) +static void kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev) { if (!hdev->ibh_mrs) return; @@ -2051,7 +2291,7 @@ static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) hdev->ibh_mrs = NULL; } -void kiblnd_hdev_destroy(kib_hca_dev_t *hdev) +void kiblnd_hdev_destroy(struct kib_hca_dev *hdev) { kiblnd_hdev_cleanup_mrs(hdev); @@ -2064,7 +2304,7 @@ void kiblnd_hdev_destroy(kib_hca_dev_t *hdev) LIBCFS_FREE(hdev, sizeof(*hdev)); } -static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) +static int kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev) { struct ib_mr *mr; int rc; @@ -2093,7 +2333,7 @@ static int kiblnd_dummy_callback(struct rdma_cm_id *cmid, return 0; } -static int kiblnd_dev_need_failover(kib_dev_t *dev) +static int kiblnd_dev_need_failover(struct kib_dev *dev) { struct rdma_cm_id *cmid; struct sockaddr_in srcaddr; @@ -2147,15 +2387,15 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) return rc; } -int kiblnd_dev_failover(kib_dev_t *dev) +int kiblnd_dev_failover(struct kib_dev *dev) { LIST_HEAD(zombie_tpo); LIST_HEAD(zombie_ppo); LIST_HEAD(zombie_fpo); struct rdma_cm_id *cmid = NULL; - kib_hca_dev_t *hdev = NULL; + struct kib_hca_dev *hdev = NULL; struct ib_pd *pd; - kib_net_t *net; + struct kib_net *net; struct sockaddr_in addr; unsigned long flags; int rc = 0; @@ -2280,7 +2520,7 @@ int kiblnd_dev_failover(kib_dev_t *dev) return rc; } -void kiblnd_destroy_dev(kib_dev_t *dev) +void kiblnd_destroy_dev(struct kib_dev *dev) { LASSERT(!dev->ibd_nnets); LASSERT(list_empty(&dev->ibd_nets)); @@ -2294,10 +2534,10 @@ void kiblnd_destroy_dev(kib_dev_t *dev) LIBCFS_FREE(dev, sizeof(*dev)); } -static kib_dev_t *kiblnd_create_dev(char *ifname) +static struct kib_dev *kiblnd_create_dev(char *ifname) { struct net_device *netdev; - kib_dev_t *dev; + struct kib_dev *dev; __u32 netmask; __u32 ip; int up; @@ -2412,7 +2652,7 @@ static void kiblnd_base_shutdown(void) static void kiblnd_shutdown(lnet_ni_t *ni) { - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; int i; unsigned long flags; @@ -2609,7 +2849,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) return rc; } -static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, +static int kiblnd_dev_start_threads(struct kib_dev *dev, int newdev, __u32 *cpts, int ncpts) { int cpt; @@ -2635,10 +2875,10 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, return 0; } -static kib_dev_t *kiblnd_dev_search(char *ifname) +static struct kib_dev *kiblnd_dev_search(char *ifname) { - kib_dev_t *alias = NULL; - kib_dev_t *dev; + struct kib_dev *alias = NULL; + struct kib_dev *dev; char *colon; char *colon2; @@ -2670,8 +2910,8 @@ static kib_dev_t *kiblnd_dev_search(char *ifname) static int kiblnd_startup(lnet_ni_t *ni) { char *ifname; - kib_dev_t *ibdev = NULL; - kib_net_t *net; + struct kib_dev *ibdev = NULL; + struct kib_net *net; struct timespec64 tv; unsigned long flags; int rc; @@ -2694,10 +2934,9 @@ static int kiblnd_startup(lnet_ni_t *ni) net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC + tv.tv_nsec / NSEC_PER_USEC; - ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout; - ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits; - ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits; - ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits; + rc = kiblnd_tunables_setup(ni); + if (rc) + goto net_failed; if (ni->ni_interfaces[0]) { /* Use the IPoIB interface specified in 'networks=' */ @@ -2736,7 +2975,7 @@ static int kiblnd_startup(lnet_ni_t *ni) if (rc) goto failed; - rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts); + rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts); if (rc) { CERROR("Failed to initialize NI pools: %d\n", rc); goto failed; @@ -2779,19 +3018,15 @@ static void __exit ko2iblnd_exit(void) static int __init ko2iblnd_init(void) { - int rc; - - CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); - CLASSERT(offsetof(kib_msg_t, + CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE); + CLASSERT(offsetof(struct kib_msg, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <= IBLND_MSG_SIZE); - CLASSERT(offsetof(kib_msg_t, + CLASSERT(offsetof(struct kib_msg, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <= IBLND_MSG_SIZE); - rc = kiblnd_tunables_init(); - if (rc) - return rc; + kiblnd_tunables_init(); lnet_register_lnd(&the_o2iblnd); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index bfcbdd167da7..078a0c3e8845 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -78,81 +74,47 @@ #define IBLND_N_SCHED 2 #define IBLND_N_SCHED_HIGH 4 -typedef struct { +struct kib_tunables { int *kib_dev_failover; /* HCA failover */ unsigned int *kib_service; /* IB service number */ int *kib_min_reconnect_interval; /* first failed connection retry... */ int *kib_max_reconnect_interval; /* exponentially increasing to this */ - int *kib_cksum; /* checksum kib_msg_t? */ + int *kib_cksum; /* checksum struct kib_msg? */ int *kib_timeout; /* comms timeout (seconds) */ int *kib_keepalive; /* keepalive timeout (seconds) */ int *kib_ntx; /* # tx descs */ - int *kib_credits; /* # concurrent sends */ - int *kib_peertxcredits; /* # concurrent sends to 1 peer */ - int *kib_peerrtrcredits; /* # per-peer router buffer credits */ - int *kib_peercredits_hiw; /* # when eagerly to return credits */ - int *kib_peertimeout; /* seconds to consider peer dead */ char **kib_default_ipif; /* default IPoIB interface */ int *kib_retry_count; int *kib_rnr_retry_count; - int *kib_concurrent_sends; /* send work queue sizing */ int *kib_ib_mtu; /* IB MTU */ - int *kib_map_on_demand; /* map-on-demand if RD has more */ - /* fragments than this value, 0 */ - /* disable map-on-demand */ - int *kib_fmr_pool_size; /* # FMRs in pool */ - int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ - int *kib_fmr_cache; /* enable FMR pool cache? */ int *kib_require_priv_port; /* accept only privileged ports */ int *kib_use_priv_port; /* use privileged port for active connect */ int *kib_nscheds; /* # threads on each CPT */ -} kib_tunables_t; +}; -extern kib_tunables_t kiblnd_tunables; +extern struct kib_tunables kiblnd_tunables; #define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */ #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */ #define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */ -#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */ +#define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *)0)->ibm_credits)) - 1) /* Max # of peer credits */ -#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \ - IBLND_MSG_QUEUE_SIZE_V1 : \ - *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */ -#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \ - IBLND_CREDIT_HIGHWATER_V1 : \ - *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */ +/* when eagerly to return credits */ +#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \ + IBLND_CREDIT_HIGHWATER_V1 : \ + t->lnd_peercredits_hiw) -#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \ +#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \ cb, dev, \ ps, qpt) -static inline int -kiblnd_concurrent_sends_v1(void) -{ - if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2) - return IBLND_MSG_QUEUE_SIZE_V1 * 2; - - if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2) - return IBLND_MSG_QUEUE_SIZE_V1 / 2; - - return *kiblnd_tunables.kib_concurrent_sends; -} - -#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \ - kiblnd_concurrent_sends_v1() : \ - *kiblnd_tunables.kib_concurrent_sends) /* 2 OOB shall suffice for 1 keepalive and 1 returning credits */ #define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1) #define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0) #define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */ #define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */ -#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \ - *kiblnd_tunables.kib_map_on_demand : \ - IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */ -#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \ - IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS) /************************/ /* derived constants... */ @@ -171,7 +133,8 @@ kiblnd_concurrent_sends_v1(void) /* WRs and CQEs (per connection) */ #define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c) #define IBLND_SEND_WRS(c) \ - ((c->ibc_max_frags + 1) * IBLND_CONCURRENT_SENDS(c->ibc_version)) + ((c->ibc_max_frags + 1) * kiblnd_concurrent_sends(c->ibc_version, \ + c->ibc_peer->ibp_ni)) #define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c)) struct kib_hca_dev; @@ -183,7 +146,7 @@ struct kib_hca_dev; #define KIB_IFNAME_SIZE 256 #endif -typedef struct { +struct kib_dev { struct list_head ibd_list; /* chain on kib_devs */ struct list_head ibd_fail_list; /* chain on kib_failed_devs */ __u32 ibd_ifip; /* IPoIB interface IP */ @@ -198,9 +161,9 @@ typedef struct { unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */ struct list_head ibd_nets; struct kib_hca_dev *ibd_hdev; -} kib_dev_t; +}; -typedef struct kib_hca_dev { +struct kib_hca_dev { struct rdma_cm_id *ibh_cmid; /* listener cmid */ struct ib_device *ibh_ibdev; /* IB device */ int ibh_page_shift; /* page shift of current HCA */ @@ -210,19 +173,19 @@ typedef struct kib_hca_dev { __u64 ibh_mr_size; /* size of MR */ struct ib_mr *ibh_mrs; /* global MR */ struct ib_pd *ibh_pd; /* PD */ - kib_dev_t *ibh_dev; /* owner */ + struct kib_dev *ibh_dev; /* owner */ atomic_t ibh_ref; /* refcount */ -} kib_hca_dev_t; +}; /** # of seconds to keep pool alive */ #define IBLND_POOL_DEADLINE 300 /** # of seconds to retry if allocation failed */ #define IBLND_POOL_RETRY 1 -typedef struct { +struct kib_pages { int ibp_npages; /* # pages */ struct page *ibp_pages[0]; /* page array */ -} kib_pages_t; +}; struct kib_pool; struct kib_poolset; @@ -237,7 +200,7 @@ struct kib_net; #define IBLND_POOL_NAME_LEN 32 -typedef struct kib_poolset { +struct kib_poolset { spinlock_t ps_lock; /* serialize */ struct kib_net *ps_net; /* network it belongs to */ char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */ @@ -253,31 +216,31 @@ typedef struct kib_poolset { kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */ kib_ps_node_init_t ps_node_init; /* initialize new allocated node */ kib_ps_node_fini_t ps_node_fini; /* finalize node */ -} kib_poolset_t; +}; -typedef struct kib_pool { +struct kib_pool { struct list_head po_list; /* chain on pool list */ struct list_head po_free_list; /* pre-allocated node */ - kib_poolset_t *po_owner; /* pool_set of this pool */ + struct kib_poolset *po_owner; /* pool_set of this pool */ unsigned long po_deadline; /* deadline of this pool */ int po_allocated; /* # of elements in use */ int po_failed; /* pool is created on failed HCA */ int po_size; /* # of pre-allocated elements */ -} kib_pool_t; +}; -typedef struct { - kib_poolset_t tps_poolset; /* pool-set */ +struct kib_tx_poolset { + struct kib_poolset tps_poolset; /* pool-set */ __u64 tps_next_tx_cookie; /* cookie of TX */ -} kib_tx_poolset_t; +}; -typedef struct { - kib_pool_t tpo_pool; /* pool */ - struct kib_hca_dev *tpo_hdev; /* device for this pool */ - struct kib_tx *tpo_tx_descs; /* all the tx descriptors */ - kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */ -} kib_tx_pool_t; +struct kib_tx_pool { + struct kib_pool tpo_pool; /* pool */ + struct kib_hca_dev *tpo_hdev; /* device for this pool */ + struct kib_tx *tpo_tx_descs; /* all the tx descriptors */ + struct kib_pages *tpo_tx_pages; /* premapped tx msg pages */ +}; -typedef struct { +struct kib_fmr_poolset { spinlock_t fps_lock; /* serialize */ struct kib_net *fps_net; /* IB network */ struct list_head fps_pool_list; /* FMR pool list */ @@ -286,28 +249,48 @@ typedef struct { int fps_cpt; /* CPT id */ int fps_pool_size; int fps_flush_trigger; + int fps_cache; int fps_increasing; /* is allocating new pool */ unsigned long fps_next_retry; /* time stamp for retry if*/ /* failed to allocate */ -} kib_fmr_poolset_t; +}; -typedef struct { - struct list_head fpo_list; /* chain on pool list */ - struct kib_hca_dev *fpo_hdev; /* device for this pool */ - kib_fmr_poolset_t *fpo_owner; /* owner of this pool */ - struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ +struct kib_fast_reg_descriptor { /* For fast registration */ + struct list_head frd_list; + struct ib_send_wr frd_inv_wr; + struct ib_reg_wr frd_fastreg_wr; + struct ib_mr *frd_mr; + bool frd_valid; +}; + +struct kib_fmr_pool { + struct list_head fpo_list; /* chain on pool list */ + struct kib_hca_dev *fpo_hdev; /* device for this pool */ + struct kib_fmr_poolset *fpo_owner; /* owner of this pool */ + union { + struct { + struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ + } fmr; + struct { /* For fast registration */ + struct list_head fpo_pool_list; + int fpo_pool_size; + } fast_reg; + }; unsigned long fpo_deadline; /* deadline of this pool */ int fpo_failed; /* fmr pool is failed */ int fpo_map_count; /* # of mapped FMR */ -} kib_fmr_pool_t; + int fpo_is_fmr; +}; -typedef struct { - struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */ - kib_fmr_pool_t *fmr_pool; /* pool of FMR */ -} kib_fmr_t; +struct kib_fmr { + struct kib_fmr_pool *fmr_pool; /* pool of FMR */ + struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */ + struct kib_fast_reg_descriptor *fmr_frd; + u32 fmr_key; +}; -typedef struct kib_net { - struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */ +struct kib_net { + struct list_head ibn_list; /* chain on struct kib_dev::ibd_nets */ __u64 ibn_incarnation;/* my epoch */ int ibn_init; /* initialisation state */ int ibn_shutdown; /* shutting down? */ @@ -315,11 +298,11 @@ typedef struct kib_net { atomic_t ibn_npeers; /* # peers extant */ atomic_t ibn_nconns; /* # connections extant */ - kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */ - kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */ + struct kib_tx_poolset **ibn_tx_ps; /* tx pool-set */ + struct kib_fmr_poolset **ibn_fmr_ps; /* fmr pool-set */ - kib_dev_t *ibn_dev; /* underlying IB device */ -} kib_net_t; + struct kib_dev *ibn_dev; /* underlying IB device */ +}; #define KIB_THREAD_SHIFT 16 #define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid)) @@ -335,7 +318,7 @@ struct kib_sched_info { int ibs_cpt; /* CPT id */ }; -typedef struct { +struct kib_data { int kib_init; /* initialisation state */ int kib_shutdown; /* shut down? */ struct list_head kib_devs; /* IB devices extant */ @@ -362,7 +345,7 @@ typedef struct { spinlock_t kib_connd_lock; /* serialise */ struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ struct kib_sched_info **kib_scheds; /* percpt data for schedulers */ -} kib_data_t; +}; #define IBLND_INIT_NOTHING 0 #define IBLND_INIT_DATA 1 @@ -373,51 +356,51 @@ typedef struct { * These are sent in sender's byte order (i.e. receiver flips). */ -typedef struct kib_connparams { +struct kib_connparams { __u16 ibcp_queue_depth; __u16 ibcp_max_frags; __u32 ibcp_max_msg_size; -} WIRE_ATTR kib_connparams_t; +} WIRE_ATTR; -typedef struct { +struct kib_immediate_msg { lnet_hdr_t ibim_hdr; /* portals header */ char ibim_payload[0]; /* piggy-backed payload */ -} WIRE_ATTR kib_immediate_msg_t; +} WIRE_ATTR; -typedef struct { +struct kib_rdma_frag { __u32 rf_nob; /* # bytes this frag */ __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */ -} WIRE_ATTR kib_rdma_frag_t; +} WIRE_ATTR; -typedef struct { +struct kib_rdma_desc { __u32 rd_key; /* local/remote key */ __u32 rd_nfrags; /* # fragments */ - kib_rdma_frag_t rd_frags[0]; /* buffer frags */ -} WIRE_ATTR kib_rdma_desc_t; + struct kib_rdma_frag rd_frags[0]; /* buffer frags */ +} WIRE_ATTR; -typedef struct { +struct kib_putreq_msg { lnet_hdr_t ibprm_hdr; /* portals header */ __u64 ibprm_cookie; /* opaque completion cookie */ -} WIRE_ATTR kib_putreq_msg_t; +} WIRE_ATTR; -typedef struct { +struct kib_putack_msg { __u64 ibpam_src_cookie; /* reflected completion cookie */ __u64 ibpam_dst_cookie; /* opaque completion cookie */ - kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */ -} WIRE_ATTR kib_putack_msg_t; + struct kib_rdma_desc ibpam_rd; /* sender's sink buffer */ +} WIRE_ATTR; -typedef struct { +struct kib_get_msg { lnet_hdr_t ibgm_hdr; /* portals header */ __u64 ibgm_cookie; /* opaque completion cookie */ - kib_rdma_desc_t ibgm_rd; /* rdma descriptor */ -} WIRE_ATTR kib_get_msg_t; + struct kib_rdma_desc ibgm_rd; /* rdma descriptor */ +} WIRE_ATTR; -typedef struct { +struct kib_completion_msg { __u64 ibcm_cookie; /* opaque completion cookie */ __s32 ibcm_status; /* < 0 failure: >= 0 length */ -} WIRE_ATTR kib_completion_msg_t; +} WIRE_ATTR; -typedef struct { +struct kib_msg { /* First 2 fields fixed FOR ALL TIME */ __u32 ibm_magic; /* I'm an ibnal message */ __u16 ibm_version; /* this is my version number */ @@ -432,14 +415,14 @@ typedef struct { __u64 ibm_dststamp; /* destination's incarnation */ union { - kib_connparams_t connparams; - kib_immediate_msg_t immediate; - kib_putreq_msg_t putreq; - kib_putack_msg_t putack; - kib_get_msg_t get; - kib_completion_msg_t completion; + struct kib_connparams connparams; + struct kib_immediate_msg immediate; + struct kib_putreq_msg putreq; + struct kib_putack_msg putack; + struct kib_get_msg get; + struct kib_completion_msg completion; } WIRE_ATTR ibm_u; -} WIRE_ATTR kib_msg_t; +} WIRE_ATTR; #define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */ @@ -458,14 +441,14 @@ typedef struct { #define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */ #define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */ -typedef struct { +struct kib_rej { __u32 ibr_magic; /* sender's magic */ __u16 ibr_version; /* sender's version */ __u8 ibr_why; /* reject reason */ __u8 ibr_padding; /* padding */ __u64 ibr_incarnation; /* incarnation of peer */ - kib_connparams_t ibr_cp; /* connection parameters */ -} WIRE_ATTR kib_rej_t; + struct kib_connparams ibr_cp; /* connection parameters */ +} WIRE_ATTR; /* connection rejection reasons */ #define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */ @@ -480,28 +463,26 @@ typedef struct { /***********************************************************************/ -typedef struct kib_rx /* receive message */ -{ +struct kib_rx { /* receive message */ struct list_head rx_list; /* queue for attention */ struct kib_conn *rx_conn; /* owning conn */ int rx_nob; /* # bytes received (-1 while posted) */ enum ib_wc_status rx_status; /* completion status */ - kib_msg_t *rx_msg; /* message buffer (host vaddr) */ + struct kib_msg *rx_msg; /* message buffer (host vaddr) */ __u64 rx_msgaddr; /* message buffer (I/O addr) */ DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */ struct ib_recv_wr rx_wrq; /* receive work item... */ struct ib_sge rx_sge; /* ...and its memory */ -} kib_rx_t; +}; #define IBLND_POSTRX_DONT_POST 0 /* don't post */ #define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */ #define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */ #define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */ -typedef struct kib_tx /* transmit message */ -{ +struct kib_tx { /* transmit message */ struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */ - kib_tx_pool_t *tx_pool; /* pool I'm from */ + struct kib_tx_pool *tx_pool; /* pool I'm from */ struct kib_conn *tx_conn; /* owning conn */ short tx_sending; /* # tx callbacks outstanding */ short tx_queued; /* queued for sending */ @@ -510,28 +491,28 @@ typedef struct kib_tx /* transmit message */ unsigned long tx_deadline; /* completion deadline */ __u64 tx_cookie; /* completion cookie */ lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */ - kib_msg_t *tx_msg; /* message buffer (host vaddr) */ + struct kib_msg *tx_msg; /* message buffer (host vaddr) */ __u64 tx_msgaddr; /* message buffer (I/O addr) */ DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */ int tx_nwrq; /* # send work items */ struct ib_rdma_wr *tx_wrq; /* send work items... */ struct ib_sge *tx_sge; /* ...and their memory */ - kib_rdma_desc_t *tx_rd; /* rdma descriptor */ + struct kib_rdma_desc *tx_rd; /* rdma descriptor */ int tx_nfrags; /* # entries in... */ struct scatterlist *tx_frags; /* dma_map_sg descriptor */ __u64 *tx_pages; /* rdma phys page addrs */ - kib_fmr_t fmr; /* FMR */ + struct kib_fmr fmr; /* FMR */ int tx_dmadir; /* dma direction */ -} kib_tx_t; +}; -typedef struct kib_connvars { - kib_msg_t cv_msg; /* connection-in-progress variables */ -} kib_connvars_t; +struct kib_connvars { + struct kib_msg cv_msg; /* connection-in-progress variables */ +}; -typedef struct kib_conn { +struct kib_conn { struct kib_sched_info *ibc_sched; /* scheduler information */ struct kib_peer *ibc_peer; /* owning peer */ - kib_hca_dev_t *ibc_hdev; /* HCA bound on */ + struct kib_hca_dev *ibc_hdev; /* HCA bound on */ struct list_head ibc_list; /* stash on peer's conn list */ struct list_head ibc_sched_list; /* schedule for attention */ __u16 ibc_version; /* version of connection */ @@ -566,14 +547,14 @@ typedef struct kib_conn { /* reserve an ACK/DONE msg */ struct list_head ibc_active_txs; /* active tx awaiting completion */ spinlock_t ibc_lock; /* serialise */ - kib_rx_t *ibc_rxs; /* the rx descs */ - kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */ + struct kib_rx *ibc_rxs; /* the rx descs */ + struct kib_pages *ibc_rx_pages; /* premapped rx msg pages */ struct rdma_cm_id *ibc_cmid; /* CM id */ struct ib_cq *ibc_cq; /* completion queue */ - kib_connvars_t *ibc_connvars; /* in-progress connection state */ -} kib_conn_t; + struct kib_connvars *ibc_connvars; /* in-progress connection state */ +}; #define IBLND_CONN_INIT 0 /* being initialised */ #define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */ @@ -582,7 +563,7 @@ typedef struct kib_conn { #define IBLND_CONN_CLOSING 4 /* being closed */ #define IBLND_CONN_DISCONNECTED 5 /* disconnected */ -typedef struct kib_peer { +struct kib_peer { struct list_head ibp_list; /* stash on global peer list */ lnet_nid_t ibp_nid; /* who's on the other end(s) */ lnet_ni_t *ibp_ni; /* LNet interface */ @@ -609,21 +590,63 @@ typedef struct kib_peer { __u16 ibp_max_frags; /* max_peer_credits */ __u16 ibp_queue_depth; -} kib_peer_t; +}; + +extern struct kib_data kiblnd_data; -extern kib_data_t kiblnd_data; +void kiblnd_hdev_destroy(struct kib_hca_dev *hdev); -void kiblnd_hdev_destroy(kib_hca_dev_t *hdev); +int kiblnd_msg_queue_size(int version, struct lnet_ni *ni); + +/* max # of fragments configured by user */ +static inline int +kiblnd_cfg_rdma_frags(struct lnet_ni *ni) +{ + struct lnet_ioctl_config_o2iblnd_tunables *tunables; + int mod; + + tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; + mod = tunables->lnd_map_on_demand; + return mod ? mod : IBLND_MAX_RDMA_FRAGS; +} + +static inline int +kiblnd_rdma_frags(int version, struct lnet_ni *ni) +{ + return version == IBLND_MSG_VERSION_1 ? + IBLND_MAX_RDMA_FRAGS : + kiblnd_cfg_rdma_frags(ni); +} + +static inline int +kiblnd_concurrent_sends(int version, struct lnet_ni *ni) +{ + struct lnet_ioctl_config_o2iblnd_tunables *tunables; + int concurrent_sends; + + tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; + concurrent_sends = tunables->lnd_concurrent_sends; + + if (version == IBLND_MSG_VERSION_1) { + if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2) + return IBLND_MSG_QUEUE_SIZE_V1 * 2; + + if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2) + return IBLND_MSG_QUEUE_SIZE_V1 / 2; + } + + return concurrent_sends; +} static inline void -kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev) +kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev) { LASSERT(atomic_read(&hdev->ibh_ref) > 0); atomic_inc(&hdev->ibh_ref); } static inline void -kiblnd_hdev_decref(kib_hca_dev_t *hdev) +kiblnd_hdev_decref(struct kib_hca_dev *hdev) { LASSERT(atomic_read(&hdev->ibh_ref) > 0); if (atomic_dec_and_test(&hdev->ibh_ref)) @@ -631,7 +654,7 @@ kiblnd_hdev_decref(kib_hca_dev_t *hdev) } static inline int -kiblnd_dev_can_failover(kib_dev_t *dev) +kiblnd_dev_can_failover(struct kib_dev *dev) { if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */ return 0; @@ -687,7 +710,7 @@ do { \ } while (0) static inline bool -kiblnd_peer_connecting(kib_peer_t *peer) +kiblnd_peer_connecting(struct kib_peer *peer) { return peer->ibp_connecting || peer->ibp_reconnecting || @@ -695,7 +718,7 @@ kiblnd_peer_connecting(kib_peer_t *peer) } static inline bool -kiblnd_peer_idle(kib_peer_t *peer) +kiblnd_peer_idle(struct kib_peer *peer) { return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns); } @@ -710,23 +733,23 @@ kiblnd_nid2peerlist(lnet_nid_t nid) } static inline int -kiblnd_peer_active(kib_peer_t *peer) +kiblnd_peer_active(struct kib_peer *peer) { /* Am I in the peer hash table? */ return !list_empty(&peer->ibp_list); } -static inline kib_conn_t * -kiblnd_get_conn_locked(kib_peer_t *peer) +static inline struct kib_conn * +kiblnd_get_conn_locked(struct kib_peer *peer) { LASSERT(!list_empty(&peer->ibp_conns)); /* just return the first connection */ - return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list); + return list_entry(peer->ibp_conns.next, struct kib_conn, ibc_list); } static inline int -kiblnd_send_keepalive(kib_conn_t *conn) +kiblnd_send_keepalive(struct kib_conn *conn) { return (*kiblnd_tunables.kib_keepalive > 0) && cfs_time_after(jiffies, conn->ibc_last_send + @@ -735,12 +758,16 @@ kiblnd_send_keepalive(kib_conn_t *conn) } static inline int -kiblnd_need_noop(kib_conn_t *conn) +kiblnd_need_noop(struct kib_conn *conn) { + struct lnet_ioctl_config_o2iblnd_tunables *tunables; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); + tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; if (conn->ibc_outstanding_credits < - IBLND_CREDITS_HIGHWATER(conn->ibc_version) && + IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) && !kiblnd_send_keepalive(conn)) return 0; /* No need to send NOOP */ @@ -767,14 +794,14 @@ kiblnd_need_noop(kib_conn_t *conn) } static inline void -kiblnd_abort_receives(kib_conn_t *conn) +kiblnd_abort_receives(struct kib_conn *conn) { ib_modify_qp(conn->ibc_cmid->qp, &kiblnd_data.kib_error_qpa, IB_QP_STATE); } static inline const char * -kiblnd_queue2str(kib_conn_t *conn, struct list_head *q) +kiblnd_queue2str(struct kib_conn *conn, struct list_head *q) { if (q == &conn->ibc_tx_queue) return "tx_queue"; @@ -799,7 +826,8 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q) #define IBLND_WID_TX 1 #define IBLND_WID_RX 2 #define IBLND_WID_RDMA 3 -#define IBLND_WID_MASK 3UL +#define IBLND_WID_MR 4 +#define IBLND_WID_MASK 7UL static inline __u64 kiblnd_ptr2wreqid(void *ptr, int type) @@ -824,21 +852,21 @@ kiblnd_wreqid2type(__u64 wreqid) } static inline void -kiblnd_set_conn_state(kib_conn_t *conn, int state) +kiblnd_set_conn_state(struct kib_conn *conn, int state) { conn->ibc_state = state; mb(); } static inline void -kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob) +kiblnd_init_msg(struct kib_msg *msg, int type, int body_nob) { msg->ibm_type = type; - msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob; + msg->ibm_nob = offsetof(struct kib_msg, ibm_u) + body_nob; } static inline int -kiblnd_rd_size(kib_rdma_desc_t *rd) +kiblnd_rd_size(struct kib_rdma_desc *rd) { int i; int size; @@ -850,25 +878,25 @@ kiblnd_rd_size(kib_rdma_desc_t *rd) } static inline __u64 -kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index) +kiblnd_rd_frag_addr(struct kib_rdma_desc *rd, int index) { return rd->rd_frags[index].rf_addr; } static inline __u32 -kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index) +kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index) { return rd->rd_frags[index].rf_nob; } static inline __u32 -kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index) +kiblnd_rd_frag_key(struct kib_rdma_desc *rd, int index) { return rd->rd_key; } static inline int -kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob) +kiblnd_rd_consume_frag(struct kib_rdma_desc *rd, int index, __u32 nob) { if (nob < rd->rd_frags[index].rf_nob) { rd->rd_frags[index].rf_addr += nob; @@ -881,14 +909,14 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob) } static inline int -kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n) +kiblnd_rd_msg_size(struct kib_rdma_desc *rd, int msgtype, int n) { LASSERT(msgtype == IBLND_MSG_GET_REQ || msgtype == IBLND_MSG_PUT_ACK); return msgtype == IBLND_MSG_GET_REQ ? - offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) : - offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]); + offsetof(struct kib_get_msg, ibgm_rd.rd_frags[n]) : + offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[n]); } static inline __u64 @@ -947,50 +975,51 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev, #define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data) #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) -struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, - kib_rdma_desc_t *rd, +struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd, int negotiated_nfrags); -void kiblnd_map_rx_descs(kib_conn_t *conn); -void kiblnd_unmap_rx_descs(kib_conn_t *conn); -void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node); -struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps); +void kiblnd_map_rx_descs(struct kib_conn *conn); +void kiblnd_unmap_rx_descs(struct kib_conn *conn); +void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node); +struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps); -int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, - int npages, __u64 iov, kib_fmr_t *fmr); -void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status); +int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, + struct kib_rdma_desc *rd, __u32 nob, __u64 iov, + struct kib_fmr *fmr); +void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status); -int kiblnd_tunables_init(void); -void kiblnd_tunables_fini(void); +int kiblnd_tunables_setup(struct lnet_ni *ni); +void kiblnd_tunables_init(void); int kiblnd_connd(void *arg); int kiblnd_scheduler(void *arg); int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name); int kiblnd_failover_thread(void *arg); -int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages); +int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages); int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event); int kiblnd_translate_mtu(int value); -int kiblnd_dev_failover(kib_dev_t *dev); -int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid); -void kiblnd_destroy_peer(kib_peer_t *peer); -bool kiblnd_reconnect_peer(kib_peer_t *peer); -void kiblnd_destroy_dev(kib_dev_t *dev); -void kiblnd_unlink_peer_locked(kib_peer_t *peer); -kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid); -int kiblnd_close_stale_conns_locked(kib_peer_t *peer, +int kiblnd_dev_failover(struct kib_dev *dev); +int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid); +void kiblnd_destroy_peer(struct kib_peer *peer); +bool kiblnd_reconnect_peer(struct kib_peer *peer); +void kiblnd_destroy_dev(struct kib_dev *dev); +void kiblnd_unlink_peer_locked(struct kib_peer *peer); +struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid); +int kiblnd_close_stale_conns_locked(struct kib_peer *peer, int version, __u64 incarnation); -int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why); +int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why); -kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, - int state, int version); -void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn); -void kiblnd_close_conn(kib_conn_t *conn, int error); -void kiblnd_close_conn_locked(kib_conn_t *conn, int error); +struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, + struct rdma_cm_id *cmid, + int state, int version); +void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn); +void kiblnd_close_conn(struct kib_conn *conn, int error); +void kiblnd_close_conn_locked(struct kib_conn *conn, int error); -void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid); +void kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid); void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status); @@ -998,10 +1027,10 @@ void kiblnd_qp_event(struct ib_event *event, void *arg); void kiblnd_cq_event(struct ib_event *event, void *arg); void kiblnd_cq_completion(struct ib_cq *cq, void *arg); -void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, +void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version, int credits, lnet_nid_t dstnid, __u64 dststamp); -int kiblnd_unpack_msg(kib_msg_t *msg, int nob); -int kiblnd_post_rx(kib_rx_t *rx, int credit); +int kiblnd_unpack_msg(struct kib_msg *msg, int nob); +int kiblnd_post_rx(struct kib_rx *rx, int credit); int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg); int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 2323e8d3a318..596a697b9d39 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -40,22 +36,22 @@ #include "o2iblnd.h" -static void kiblnd_peer_alive(kib_peer_t *peer); -static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error); -static void kiblnd_check_sends(kib_conn_t *conn); -static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, +static void kiblnd_peer_alive(struct kib_peer *peer); +static void kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error); +static void kiblnd_check_sends(struct kib_conn *conn); +static void kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx, int type, int body_nob); -static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, - int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie); -static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn); -static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn); -static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx); +static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, + int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie); +static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn); +static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn); +static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx); static void -kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) +kiblnd_tx_done(lnet_ni_t *ni, struct kib_tx *tx) { lnet_msg_t *lntmsg[2]; - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; int rc; int i; @@ -97,10 +93,10 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status) { - kib_tx_t *tx; + struct kib_tx *tx; while (!list_empty(txlist)) { - tx = list_entry(txlist->next, kib_tx_t, tx_list); + tx = list_entry(txlist->next, struct kib_tx, tx_list); list_del(&tx->tx_list); /* complete now */ @@ -110,19 +106,19 @@ kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status) } } -static kib_tx_t * +static struct kib_tx * kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) { - kib_net_t *net = (kib_net_t *)ni->ni_data; + struct kib_net *net = (struct kib_net *)ni->ni_data; struct list_head *node; - kib_tx_t *tx; - kib_tx_poolset_t *tps; + struct kib_tx *tx; + struct kib_tx_poolset *tps; tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)]; node = kiblnd_pool_alloc_node(&tps->tps_poolset); if (!node) return NULL; - tx = list_entry(node, kib_tx_t, tx_list); + tx = list_entry(node, struct kib_tx, tx_list); LASSERT(!tx->tx_nwrq); LASSERT(!tx->tx_queued); @@ -138,9 +134,9 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) } static void -kiblnd_drop_rx(kib_rx_t *rx) +kiblnd_drop_rx(struct kib_rx *rx) { - kib_conn_t *conn = rx->rx_conn; + struct kib_conn *conn = rx->rx_conn; struct kib_sched_info *sched = conn->ibc_sched; unsigned long flags; @@ -153,10 +149,10 @@ kiblnd_drop_rx(kib_rx_t *rx) } int -kiblnd_post_rx(kib_rx_t *rx, int credit) +kiblnd_post_rx(struct kib_rx *rx, int credit) { - kib_conn_t *conn = rx->rx_conn; - kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; + struct kib_conn *conn = rx->rx_conn; + struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data; struct ib_recv_wr *bad_wrq = NULL; struct ib_mr *mr = conn->ibc_hdev->ibh_mrs; int rc; @@ -223,13 +219,13 @@ out: return rc; } -static kib_tx_t * -kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) +static struct kib_tx * +kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, __u64 cookie) { struct list_head *tmp; list_for_each(tmp, &conn->ibc_active_txs) { - kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); + struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list); LASSERT(!tx->tx_queued); LASSERT(tx->tx_sending || tx->tx_waiting); @@ -249,9 +245,9 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) } static void -kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) +kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, __u64 cookie) { - kib_tx_t *tx; + struct kib_tx *tx; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; int idle; @@ -287,10 +283,10 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) } static void -kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) +kiblnd_send_completion(struct kib_conn *conn, int type, int status, __u64 cookie) { lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (!tx) { CERROR("Can't get tx for completion %x for %s\n", @@ -300,19 +296,19 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) tx->tx_msg->ibm_u.completion.ibcm_status = status; tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie; - kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t)); + kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg)); kiblnd_queue_tx(tx, conn); } static void -kiblnd_handle_rx(kib_rx_t *rx) +kiblnd_handle_rx(struct kib_rx *rx) { - kib_msg_t *msg = rx->rx_msg; - kib_conn_t *conn = rx->rx_conn; + struct kib_msg *msg = rx->rx_msg; + struct kib_conn *conn = rx->rx_conn; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; int credits = msg->ibm_credits; - kib_tx_t *tx; + struct kib_tx *tx; int rc = 0; int rc2; int post_credit; @@ -467,12 +463,12 @@ kiblnd_handle_rx(kib_rx_t *rx) } static void -kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) +kiblnd_rx_complete(struct kib_rx *rx, int status, int nob) { - kib_msg_t *msg = rx->rx_msg; - kib_conn_t *conn = rx->rx_conn; + struct kib_msg *msg = rx->rx_msg; + struct kib_conn *conn = rx->rx_conn; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; int rc; int err = -EIO; @@ -561,36 +557,23 @@ kiblnd_kvaddr_to_page(unsigned long vaddr) } static int -kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) +kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc *rd, __u32 nob) { - kib_hca_dev_t *hdev; - __u64 *pages = tx->tx_pages; - kib_fmr_poolset_t *fps; - int npages; - int size; + struct kib_hca_dev *hdev; + struct kib_fmr_poolset *fps; int cpt; int rc; - int i; LASSERT(tx->tx_pool); LASSERT(tx->tx_pool->tpo_pool.po_owner); hdev = tx->tx_pool->tpo_hdev; - - for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { - for (size = 0; size < rd->rd_frags[i].rf_nob; - size += hdev->ibh_page_size) { - pages[npages++] = (rd->rd_frags[i].rf_addr & - hdev->ibh_page_mask) + size; - } - } - cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; fps = net->ibn_fmr_ps[cpt]; - rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr); + rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr); if (rc) { - CERROR("Can't map %d pages: %d\n", npages, rc); + CERROR("Can't map %u bytes: %d\n", nob, rc); return rc; } @@ -598,8 +581,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) * If rd is not tx_rd, it's going to get sent to a peer, who will need * the rkey */ - rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey : - tx->fmr.fmr_pfmr->fmr->lkey; + rd->rd_key = tx->fmr.fmr_key; rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; rd->rd_frags[0].rf_nob = nob; rd->rd_nfrags = 1; @@ -607,16 +589,14 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) return 0; } -static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) +static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx) { - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; LASSERT(net); - if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) { + if (net->ibn_fmr_ps) kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); - tx->fmr.fmr_pfmr = NULL; - } if (tx->tx_nfrags) { kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, @@ -625,11 +605,11 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) } } -static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, +static int kiblnd_map_tx(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd, int nfrags) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; + struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev; struct ib_mr *mr = NULL; __u32 nob; int i; @@ -652,7 +632,7 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, nob += rd->rd_frags[i].rf_nob; } - mr = kiblnd_find_rd_dma_mr(hdev, rd, tx->tx_conn ? + mr = kiblnd_find_rd_dma_mr(ni, rd, tx->tx_conn ? tx->tx_conn->ibc_max_frags : -1); if (mr) { /* found pre-mapping MR */ @@ -667,10 +647,10 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, } static int -kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, +kiblnd_setup_rd_iov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd, unsigned int niov, struct kvec *iov, int offset, int nob) { - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; struct page *page; struct scatterlist *sg; unsigned long vaddr; @@ -704,7 +684,11 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, fragnob = min(fragnob, (int)PAGE_SIZE - page_offset); sg_set_page(sg, page, fragnob, page_offset); - sg++; + sg = sg_next(sg); + if (!sg) { + CERROR("lacking enough sg entries to map tx\n"); + return -EFAULT; + } if (offset + fragnob < iov->iov_len) { offset += fragnob; @@ -720,10 +704,10 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, } static int -kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, +kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd, int nkiov, lnet_kiov_t *kiov, int offset, int nob) { - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; struct scatterlist *sg; int fragnob; @@ -748,7 +732,11 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, sg_set_page(sg, kiov->kiov_page, fragnob, kiov->kiov_offset + offset); - sg++; + sg = sg_next(sg); + if (!sg) { + CERROR("lacking enough sg entries to map tx\n"); + return -EFAULT; + } offset = 0; kiov++; @@ -760,11 +748,12 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, } static int -kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) +kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit) __must_hold(&conn->ibc_lock) { - kib_msg_t *msg = tx->tx_msg; - kib_peer_t *peer = conn->ibc_peer; + struct kib_msg *msg = tx->tx_msg; + struct kib_peer *peer = conn->ibc_peer; + struct lnet_ni *ni = peer->ibp_ni; int ver = conn->ibc_version; int rc; int done; @@ -780,7 +769,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) LASSERT(conn->ibc_credits >= 0); LASSERT(conn->ibc_credits <= conn->ibc_queue_depth); - if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { + if (conn->ibc_nsends_posted == kiblnd_concurrent_sends(ver, ni)) { /* tx completions outstanding... */ CDEBUG(D_NET, "%s: posted enough\n", libcfs_nid2str(peer->ibp_nid)); @@ -851,14 +840,26 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) /* close_conn will launch failover */ rc = -ENETDOWN; } else { - struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq - 1].wr; + struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd; + struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr; + struct ib_send_wr *wrq = &tx->tx_wrq[0].wr; + + if (frd) { + if (!frd->frd_valid) { + wrq = &frd->frd_inv_wr; + wrq->next = &frd->frd_fastreg_wr.wr; + } else { + wrq = &frd->frd_fastreg_wr.wr; + } + frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr; + } - LASSERTF(wrq->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), + LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), "bad wr_id %llx, opc %d, flags %d, peer: %s\n", - wrq->wr_id, wrq->opcode, wrq->send_flags, - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - wrq = NULL; - rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &wrq); + bad->wr_id, bad->opcode, bad->send_flags, + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + bad = NULL; + rc = ib_post_send(conn->ibc_cmid->qp, wrq, &bad); } conn->ibc_last_send = jiffies; @@ -904,11 +905,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) } static void -kiblnd_check_sends(kib_conn_t *conn) +kiblnd_check_sends(struct kib_conn *conn) { int ver = conn->ibc_version; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_tx_t *tx; + struct kib_tx *tx; /* Don't send anything until after the connection is established */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { @@ -919,7 +920,7 @@ kiblnd_check_sends(kib_conn_t *conn) spin_lock(&conn->ibc_lock); - LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); + LASSERT(conn->ibc_nsends_posted <= kiblnd_concurrent_sends(ver, ni)); LASSERT(!IBLND_OOB_CAPABLE(ver) || conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); LASSERT(conn->ibc_reserved_credits >= 0); @@ -927,7 +928,7 @@ kiblnd_check_sends(kib_conn_t *conn) while (conn->ibc_reserved_credits > 0 && !list_empty(&conn->ibc_tx_queue_rsrvd)) { tx = list_entry(conn->ibc_tx_queue_rsrvd.next, - kib_tx_t, tx_list); + struct kib_tx, tx_list); list_del(&tx->tx_list); list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); conn->ibc_reserved_credits--; @@ -951,16 +952,16 @@ kiblnd_check_sends(kib_conn_t *conn) if (!list_empty(&conn->ibc_tx_queue_nocred)) { credit = 0; tx = list_entry(conn->ibc_tx_queue_nocred.next, - kib_tx_t, tx_list); + struct kib_tx, tx_list); } else if (!list_empty(&conn->ibc_tx_noops)) { LASSERT(!IBLND_OOB_CAPABLE(ver)); credit = 1; tx = list_entry(conn->ibc_tx_noops.next, - kib_tx_t, tx_list); + struct kib_tx, tx_list); } else if (!list_empty(&conn->ibc_tx_queue)) { credit = 1; tx = list_entry(conn->ibc_tx_queue.next, - kib_tx_t, tx_list); + struct kib_tx, tx_list); } else { break; } @@ -973,10 +974,10 @@ kiblnd_check_sends(kib_conn_t *conn) } static void -kiblnd_tx_complete(kib_tx_t *tx, int status) +kiblnd_tx_complete(struct kib_tx *tx, int status) { int failed = (status != IB_WC_SUCCESS); - kib_conn_t *conn = tx->tx_conn; + struct kib_conn *conn = tx->tx_conn; int idle; LASSERT(tx->tx_sending > 0); @@ -1028,12 +1029,12 @@ kiblnd_tx_complete(kib_tx_t *tx, int status) } static void -kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) +kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx, int type, int body_nob) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev; struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; - int nob = offsetof(kib_msg_t, ibm_u) + body_nob; + int nob = offsetof(struct kib_msg, ibm_u) + body_nob; struct ib_mr *mr = hdev->ibh_mrs; LASSERT(tx->tx_nwrq >= 0); @@ -1060,13 +1061,13 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) } static int -kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, - int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) +kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, + int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie) { - kib_msg_t *ibmsg = tx->tx_msg; - kib_rdma_desc_t *srcrd = tx->tx_rd; + struct kib_msg *ibmsg = tx->tx_msg; + struct kib_rdma_desc *srcrd = tx->tx_rd; struct ib_sge *sge = &tx->tx_sge[0]; - struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next; + struct ib_rdma_wr *wrq, *next; int rc = resid; int srcidx = 0; int dstidx = 0; @@ -1102,7 +1103,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, wrknob = min(min(kiblnd_rd_frag_size(srcrd, srcidx), kiblnd_rd_frag_size(dstrd, dstidx)), - (__u32) resid); + (__u32)resid); sge = &tx->tx_sge[tx->tx_nwrq]; sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx); @@ -1138,13 +1139,13 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, ibmsg->ibm_u.completion.ibcm_status = rc; ibmsg->ibm_u.completion.ibcm_cookie = dstcookie; kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx, - type, sizeof(kib_completion_msg_t)); + type, sizeof(struct kib_completion_msg)); return rc; } static void -kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) +kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn) { struct list_head *q; @@ -1199,7 +1200,7 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) } static void -kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn) +kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn) { spin_lock(&conn->ibc_lock); kiblnd_queue_tx_locked(tx, conn); @@ -1246,11 +1247,11 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, } static void -kiblnd_connect_peer(kib_peer_t *peer) +kiblnd_connect_peer(struct kib_peer *peer) { struct rdma_cm_id *cmid; - kib_dev_t *dev; - kib_net_t *net = peer->ibp_ni->ni_data; + struct kib_dev *dev; + struct kib_net *net = peer->ibp_ni->ni_data; struct sockaddr_in srcaddr; struct sockaddr_in dstaddr; int rc; @@ -1314,7 +1315,7 @@ kiblnd_connect_peer(kib_peer_t *peer) } bool -kiblnd_reconnect_peer(kib_peer_t *peer) +kiblnd_reconnect_peer(struct kib_peer *peer) { rwlock_t *glock = &kiblnd_data.kib_global_lock; char *reason = NULL; @@ -1364,11 +1365,11 @@ no_reconnect: } void -kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) +kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid) { - kib_peer_t *peer; - kib_peer_t *peer2; - kib_conn_t *conn; + struct kib_peer *peer; + struct kib_peer *peer2; + struct kib_conn *conn; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; unsigned long flags; int rc; @@ -1471,7 +1472,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) peer->ibp_connecting = 1; /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown); + LASSERT(!((struct kib_net *)ni->ni_data)->ibn_shutdown); if (tx) list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); @@ -1498,9 +1499,9 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; unsigned int payload_offset = lntmsg->msg_offset; unsigned int payload_nob = lntmsg->msg_len; - kib_msg_t *ibmsg; - kib_rdma_desc_t *rd; - kib_tx_t *tx; + struct kib_msg *ibmsg; + struct kib_rdma_desc *rd; + struct kib_tx *tx; int nob; int rc; @@ -1531,7 +1532,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) break; /* send IMMEDIATE */ /* is the REPLY message too small for RDMA? */ - nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]); + nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]); if (nob <= IBLND_MSG_SIZE) break; /* send IMMEDIATE */ @@ -1561,7 +1562,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) return -EIO; } - nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]); + nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]); ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; ibmsg->ibm_u.get.ibgm_hdr = *hdr; @@ -1583,7 +1584,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) case LNET_MSG_REPLY: case LNET_MSG_PUT: /* Is the payload small enough not to need RDMA? */ - nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]); + nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]); if (nob <= IBLND_MSG_SIZE) break; /* send IMMEDIATE */ @@ -1613,7 +1614,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) ibmsg = tx->tx_msg; ibmsg->ibm_u.putreq.ibprm_hdr = *hdr; ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie; - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t)); + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(struct kib_putreq_msg)); tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */ @@ -1623,7 +1624,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) /* send IMMEDIATE */ - LASSERT(offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]) + LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]) <= IBLND_MSG_SIZE); tx = kiblnd_get_idle_tx(ni, target.nid); @@ -1638,16 +1639,16 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (payload_kiov) lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg, - offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), + offsetof(struct kib_msg, ibm_u.immediate.ibim_payload), payload_niov, payload_kiov, payload_offset, payload_nob); else lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg, - offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), + offsetof(struct kib_msg, ibm_u.immediate.ibim_payload), payload_niov, payload_iov, payload_offset, payload_nob); - nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]); + nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]); kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ @@ -1656,7 +1657,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) } static void -kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) +kiblnd_reply(lnet_ni_t *ni, struct kib_rx *rx, lnet_msg_t *lntmsg) { lnet_process_id_t target = lntmsg->msg_target; unsigned int niov = lntmsg->msg_niov; @@ -1664,7 +1665,7 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) lnet_kiov_t *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; - kib_tx_t *tx; + struct kib_tx *tx; int rc; tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); @@ -1721,10 +1722,10 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { - kib_rx_t *rx = private; - kib_msg_t *rxmsg = rx->rx_msg; - kib_conn_t *conn = rx->rx_conn; - kib_tx_t *tx; + struct kib_rx *rx = private; + struct kib_msg *rxmsg = rx->rx_msg; + struct kib_conn *conn = rx->rx_conn; + struct kib_tx *tx; int nob; int post_credit = IBLND_POSTRX_PEER_CREDIT; int rc = 0; @@ -1739,7 +1740,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, LBUG(); case IBLND_MSG_IMMEDIATE: - nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]); + nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]); if (nob > rx->rx_nob) { CERROR("Immediate message from %s too big: %d(%d)\n", libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), @@ -1751,19 +1752,19 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, if (kiov) lnet_copy_flat2kiov(niov, kiov, offset, IBLND_MSG_SIZE, rxmsg, - offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), + offsetof(struct kib_msg, ibm_u.immediate.ibim_payload), mlen); else lnet_copy_flat2iov(niov, iov, offset, IBLND_MSG_SIZE, rxmsg, - offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), + offsetof(struct kib_msg, ibm_u.immediate.ibim_payload), mlen); lnet_finalize(ni, lntmsg, 0); break; case IBLND_MSG_PUT_REQ: { - kib_msg_t *txmsg; - kib_rdma_desc_t *rd; + struct kib_msg *txmsg; + struct kib_rdma_desc *rd; if (!mlen) { lnet_finalize(ni, lntmsg, 0); @@ -1799,7 +1800,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, break; } - nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]); + nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]); txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; @@ -1850,7 +1851,7 @@ kiblnd_thread_fini(void) } static void -kiblnd_peer_alive(kib_peer_t *peer) +kiblnd_peer_alive(struct kib_peer *peer) { /* This is racy, but everyone's only writing cfs_time_current() */ peer->ibp_last_alive = cfs_time_current(); @@ -1858,7 +1859,7 @@ kiblnd_peer_alive(kib_peer_t *peer) } static void -kiblnd_peer_notify(kib_peer_t *peer) +kiblnd_peer_notify(struct kib_peer *peer) { int error = 0; unsigned long last_alive = 0; @@ -1881,7 +1882,7 @@ kiblnd_peer_notify(kib_peer_t *peer) } void -kiblnd_close_conn_locked(kib_conn_t *conn, int error) +kiblnd_close_conn_locked(struct kib_conn *conn, int error) { /* * This just does the immediate housekeeping. 'error' is zero for a @@ -1891,8 +1892,8 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error) * already dealing with it (either to set it up or tear it down). * Caller holds kib_global_lock exclusively in irq context */ - kib_peer_t *peer = conn->ibc_peer; - kib_dev_t *dev; + struct kib_peer *peer = conn->ibc_peer; + struct kib_dev *dev; unsigned long flags; LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED); @@ -1921,7 +1922,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error) list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); } - dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev; + dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev; list_del(&conn->ibc_list); /* connd (see below) takes over ibc_list's ref */ @@ -1951,7 +1952,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error) } void -kiblnd_close_conn(kib_conn_t *conn, int error) +kiblnd_close_conn(struct kib_conn *conn, int error) { unsigned long flags; @@ -1963,11 +1964,11 @@ kiblnd_close_conn(kib_conn_t *conn, int error) } static void -kiblnd_handle_early_rxs(kib_conn_t *conn) +kiblnd_handle_early_rxs(struct kib_conn *conn) { unsigned long flags; - kib_rx_t *rx; - kib_rx_t *tmp; + struct kib_rx *rx; + struct kib_rx *tmp; LASSERT(!in_interrupt()); LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); @@ -1985,17 +1986,17 @@ kiblnd_handle_early_rxs(kib_conn_t *conn) } static void -kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) +kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs) { LIST_HEAD(zombies); struct list_head *tmp; struct list_head *nxt; - kib_tx_t *tx; + struct kib_tx *tx; spin_lock(&conn->ibc_lock); list_for_each_safe(tmp, nxt, txs) { - tx = list_entry(tmp, kib_tx_t, tx_list); + tx = list_entry(tmp, struct kib_tx, tx_list); if (txs == &conn->ibc_active_txs) { LASSERT(!tx->tx_queued); @@ -2020,7 +2021,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) } static void -kiblnd_finalise_conn(kib_conn_t *conn) +kiblnd_finalise_conn(struct kib_conn *conn) { LASSERT(!in_interrupt()); LASSERT(conn->ibc_state > IBLND_CONN_INIT); @@ -2048,7 +2049,7 @@ kiblnd_finalise_conn(kib_conn_t *conn) } static void -kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) +kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error) { LIST_HEAD(zombies); unsigned long flags; @@ -2102,11 +2103,11 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) } static void -kiblnd_connreq_done(kib_conn_t *conn, int status) +kiblnd_connreq_done(struct kib_conn *conn, int status) { - kib_peer_t *peer = conn->ibc_peer; - kib_tx_t *tx; - kib_tx_t *tmp; + struct kib_peer *peer = conn->ibc_peer; + struct kib_tx *tx; + struct kib_tx *tmp; struct list_head txs; unsigned long flags; int active; @@ -2212,7 +2213,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) } static void -kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) +kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej) { int rc; @@ -2226,17 +2227,17 @@ static int kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) { rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - kib_msg_t *reqmsg = priv; - kib_msg_t *ackmsg; - kib_dev_t *ibdev; - kib_peer_t *peer; - kib_peer_t *peer2; - kib_conn_t *conn; + struct kib_msg *reqmsg = priv; + struct kib_msg *ackmsg; + struct kib_dev *ibdev; + struct kib_peer *peer; + struct kib_peer *peer2; + struct kib_conn *conn; lnet_ni_t *ni = NULL; - kib_net_t *net = NULL; + struct kib_net *net = NULL; lnet_nid_t nid; struct rdma_conn_param cp; - kib_rej_t rej; + struct kib_rej rej; int version = IBLND_MSG_VERSION; unsigned long flags; int rc; @@ -2245,7 +2246,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) LASSERT(!in_interrupt()); /* cmid inherits 'context' from the corresponding listener id */ - ibdev = (kib_dev_t *)cmid->context; + ibdev = (struct kib_dev *)cmid->context; LASSERT(ibdev); memset(&rej, 0, sizeof(rej)); @@ -2263,7 +2264,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - if (priv_nob < offsetof(kib_msg_t, ibm_type)) { + if (priv_nob < offsetof(struct kib_msg, ibm_type)) { CERROR("Short connection request\n"); goto failed; } @@ -2298,7 +2299,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); if (ni) { - net = (kib_net_t *)ni->ni_data; + net = (struct kib_net *)ni->ni_data; rej.ibr_incarnation = net->ibn_incarnation; } @@ -2333,11 +2334,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } if (reqmsg->ibm_u.connparams.ibcp_queue_depth > - IBLND_MSG_QUEUE_SIZE(version)) { + kiblnd_msg_queue_size(version, ni)) { CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n", libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(version)); + kiblnd_msg_queue_size(version, ni)); if (version == IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; @@ -2346,24 +2347,24 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } if (reqmsg->ibm_u.connparams.ibcp_max_frags > - IBLND_RDMA_FRAGS(version)) { + kiblnd_rdma_frags(version, ni)) { CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n", libcfs_nid2str(nid), version, reqmsg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(version)); + kiblnd_rdma_frags(version, ni)); if (version >= IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; goto failed; } else if (reqmsg->ibm_u.connparams.ibcp_max_frags < - IBLND_RDMA_FRAGS(version) && !net->ibn_fmr_ps) { + kiblnd_rdma_frags(version, ni) && !net->ibn_fmr_ps) { CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n", libcfs_nid2str(nid), version, reqmsg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(version)); + kiblnd_rdma_frags(version, ni)); - if (version >= IBLND_MSG_VERSION) + if (version == IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; goto failed; @@ -2524,23 +2525,24 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) return 0; failed: - if (ni) + if (ni) { lnet_ni_decref(ni); + rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni); + rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni); + } rej.ibr_version = version; - rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); - rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version); kiblnd_reject(cmid, &rej); return -ECONNREFUSED; } static void -kiblnd_check_reconnect(kib_conn_t *conn, int version, - __u64 incarnation, int why, kib_connparams_t *cp) +kiblnd_check_reconnect(struct kib_conn *conn, int version, + __u64 incarnation, int why, struct kib_connparams *cp) { rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_peer_t *peer = conn->ibc_peer; + struct kib_peer *peer = conn->ibc_peer; char *reason; int msg_size = IBLND_MSG_SIZE; int frag_num = -1; @@ -2580,12 +2582,15 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, reason = "Unknown"; break; - case IBLND_REJECT_RDMA_FRAGS: + case IBLND_REJECT_RDMA_FRAGS: { + struct lnet_ioctl_config_lnd_tunables *tunables; + if (!cp) { reason = "can't negotiate max frags"; goto out; } - if (!*kiblnd_tunables.kib_map_on_demand) { + tunables = peer->ibp_ni->ni_lnd_tunables; + if (!tunables->lt_tun_u.lt_o2ib.lnd_map_on_demand) { reason = "map_on_demand must be enabled"; goto out; } @@ -2597,7 +2602,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version, peer->ibp_max_frags = frag_num; reason = "rdma fragments"; break; - + } case IBLND_REJECT_MSG_QUEUE_SIZE: if (!cp) { reason = "can't negotiate queue depth"; @@ -2646,9 +2651,9 @@ out: } static void -kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) +kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; + struct kib_peer *peer = conn->ibc_peer; LASSERT(!in_interrupt()); LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); @@ -2666,9 +2671,9 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) break; case IB_CM_REJ_CONSUMER_DEFINED: - if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) { - kib_rej_t *rej = priv; - kib_connparams_t *cp = NULL; + if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) { + struct kib_rej *rej = priv; + struct kib_connparams *cp = NULL; int flip = 0; __u64 incarnation = -1; @@ -2691,7 +2696,7 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) flip = 1; } - if (priv_nob >= sizeof(kib_rej_t) && + if (priv_nob >= sizeof(struct kib_rej) && rej->ibr_version > IBLND_MSG_VERSION_1) { /* * priv_nob is always 148 in current version @@ -2774,12 +2779,12 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) } static void -kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) +kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; + struct kib_peer *peer = conn->ibc_peer; lnet_ni_t *ni = peer->ibp_ni; - kib_net_t *net = ni->ni_data; - kib_msg_t *msg = priv; + struct kib_net *net = ni->ni_data; + struct kib_msg *msg = priv; int ver = conn->ibc_version; int rc = kiblnd_unpack_msg(msg, priv_nob); unsigned long flags; @@ -2876,9 +2881,9 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) static int kiblnd_active_connect(struct rdma_cm_id *cmid) { - kib_peer_t *peer = (kib_peer_t *)cmid->context; - kib_conn_t *conn; - kib_msg_t *msg; + struct kib_peer *peer = (struct kib_peer *)cmid->context; + struct kib_conn *conn; + struct kib_msg *msg; struct rdma_conn_param cp; int version; __u64 incarnation; @@ -2943,8 +2948,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) { - kib_peer_t *peer; - kib_conn_t *conn; + struct kib_peer *peer; + struct kib_conn *conn; int rc; switch (event->event) { @@ -2962,7 +2967,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return rc; case RDMA_CM_EVENT_ADDR_ERROR: - peer = (kib_peer_t *)cmid->context; + peer = (struct kib_peer *)cmid->context; CNETERR("%s: ADDR ERROR %d\n", libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); @@ -2970,7 +2975,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return -EHOSTUNREACH; /* rc destroys cmid */ case RDMA_CM_EVENT_ADDR_RESOLVED: - peer = (kib_peer_t *)cmid->context; + peer = (struct kib_peer *)cmid->context; CDEBUG(D_NET, "%s Addr resolved: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); @@ -2993,7 +2998,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return rc; /* rc destroys cmid */ case RDMA_CM_EVENT_ROUTE_ERROR: - peer = (kib_peer_t *)cmid->context; + peer = (struct kib_peer *)cmid->context; CNETERR("%s: ROUTE ERROR %d\n", libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); @@ -3001,7 +3006,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return -EHOSTUNREACH; /* rc destroys cmid */ case RDMA_CM_EVENT_ROUTE_RESOLVED: - peer = (kib_peer_t *)cmid->context; + peer = (struct kib_peer *)cmid->context; CDEBUG(D_NET, "%s Route resolved: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); @@ -3015,7 +3020,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return event->status; /* rc destroys cmid */ case RDMA_CM_EVENT_UNREACHABLE: - conn = (kib_conn_t *)cmid->context; + conn = (struct kib_conn *)cmid->context; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); CNETERR("%s: UNREACHABLE %d\n", @@ -3025,7 +3030,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return 0; case RDMA_CM_EVENT_CONNECT_ERROR: - conn = (kib_conn_t *)cmid->context; + conn = (struct kib_conn *)cmid->context; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); CNETERR("%s: CONNECT ERROR %d\n", @@ -3035,7 +3040,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return 0; case RDMA_CM_EVENT_REJECTED: - conn = (kib_conn_t *)cmid->context; + conn = (struct kib_conn *)cmid->context; switch (conn->ibc_state) { default: LBUG(); @@ -3057,7 +3062,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return 0; case RDMA_CM_EVENT_ESTABLISHED: - conn = (kib_conn_t *)cmid->context; + conn = (struct kib_conn *)cmid->context; switch (conn->ibc_state) { default: LBUG(); @@ -3083,7 +3088,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n"); return 0; case RDMA_CM_EVENT_DISCONNECTED: - conn = (kib_conn_t *)cmid->context; + conn = (struct kib_conn *)cmid->context; if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { CERROR("%s DISCONNECTED\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); @@ -3112,13 +3117,13 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) } static int -kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) +kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs) { - kib_tx_t *tx; + struct kib_tx *tx; struct list_head *ttmp; list_for_each(ttmp, txs) { - tx = list_entry(ttmp, kib_tx_t, tx_list); + tx = list_entry(ttmp, struct kib_tx, tx_list); if (txs != &conn->ibc_active_txs) { LASSERT(tx->tx_queued); @@ -3139,7 +3144,7 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) } static int -kiblnd_conn_timed_out_locked(kib_conn_t *conn) +kiblnd_conn_timed_out_locked(struct kib_conn *conn) { return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) || kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) || @@ -3155,10 +3160,10 @@ kiblnd_check_conns(int idx) LIST_HEAD(checksends); struct list_head *peers = &kiblnd_data.kib_peers[idx]; struct list_head *ptmp; - kib_peer_t *peer; - kib_conn_t *conn; - kib_conn_t *temp; - kib_conn_t *tmp; + struct kib_peer *peer; + struct kib_conn *conn; + struct kib_conn *temp; + struct kib_conn *tmp; struct list_head *ctmp; unsigned long flags; @@ -3170,13 +3175,13 @@ kiblnd_check_conns(int idx) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); list_for_each(ptmp, peers) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); + peer = list_entry(ptmp, struct kib_peer, ibp_list); list_for_each(ctmp, &peer->ibp_conns) { int timedout; int sendnoop; - conn = list_entry(ctmp, kib_conn_t, ibc_list); + conn = list_entry(ctmp, struct kib_conn, ibc_list); LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED); @@ -3234,7 +3239,7 @@ kiblnd_check_conns(int idx) } static void -kiblnd_disconnect_conn(kib_conn_t *conn) +kiblnd_disconnect_conn(struct kib_conn *conn) { LASSERT(!in_interrupt()); LASSERT(current == kiblnd_data.kib_connd); @@ -3263,7 +3268,7 @@ kiblnd_connd(void *arg) spinlock_t *lock= &kiblnd_data.kib_connd_lock; wait_queue_t wait; unsigned long flags; - kib_conn_t *conn; + struct kib_conn *conn; int timeout; int i; int dropped_lock; @@ -3283,10 +3288,10 @@ kiblnd_connd(void *arg) dropped_lock = 0; if (!list_empty(&kiblnd_data.kib_connd_zombies)) { - kib_peer_t *peer = NULL; + struct kib_peer *peer = NULL; conn = list_entry(kiblnd_data.kib_connd_zombies.next, - kib_conn_t, ibc_list); + struct kib_conn, ibc_list); list_del(&conn->ibc_list); if (conn->ibc_reconnect) { peer = conn->ibc_peer; @@ -3313,7 +3318,7 @@ kiblnd_connd(void *arg) if (!list_empty(&kiblnd_data.kib_connd_conns)) { conn = list_entry(kiblnd_data.kib_connd_conns.next, - kib_conn_t, ibc_list); + struct kib_conn, ibc_list); list_del(&conn->ibc_list); spin_unlock_irqrestore(lock, flags); @@ -3337,7 +3342,7 @@ kiblnd_connd(void *arg) break; conn = list_entry(kiblnd_data.kib_reconn_list.next, - kib_conn_t, ibc_list); + struct kib_conn, ibc_list); list_del(&conn->ibc_list); spin_unlock_irqrestore(lock, flags); @@ -3408,7 +3413,7 @@ kiblnd_connd(void *arg) void kiblnd_qp_event(struct ib_event *event, void *arg) { - kib_conn_t *conn = arg; + struct kib_conn *conn = arg; switch (event->event) { case IB_EVENT_COMM_EST: @@ -3430,6 +3435,12 @@ kiblnd_complete(struct ib_wc *wc) default: LBUG(); + case IBLND_WID_MR: + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) + CNETERR("FastReg failed: %d\n", wc->status); + break; + case IBLND_WID_RDMA: /* * We only get RDMA completion notification if it fails. All @@ -3464,7 +3475,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg) * occurred. But in this case, !ibc_nrx && !ibc_nsends_posted * and this CQ is about to be destroyed so I NOOP. */ - kib_conn_t *conn = arg; + struct kib_conn *conn = arg; struct kib_sched_info *sched = conn->ibc_sched; unsigned long flags; @@ -3491,7 +3502,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg) void kiblnd_cq_event(struct ib_event *event, void *arg) { - kib_conn_t *conn = arg; + struct kib_conn *conn = arg; CERROR("%s: async CQ event type %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); @@ -3502,7 +3513,7 @@ kiblnd_scheduler(void *arg) { long id = (long)arg; struct kib_sched_info *sched; - kib_conn_t *conn; + struct kib_conn *conn; wait_queue_t wait; unsigned long flags; struct ib_wc wc; @@ -3537,7 +3548,7 @@ kiblnd_scheduler(void *arg) did_something = 0; if (!list_empty(&sched->ibs_conns)) { - conn = list_entry(sched->ibs_conns.next, kib_conn_t, + conn = list_entry(sched->ibs_conns.next, struct kib_conn, ibc_sched_list); /* take over kib_sched_conns' ref on conn... */ LASSERT(conn->ibc_scheduled); @@ -3637,7 +3648,7 @@ int kiblnd_failover_thread(void *arg) { rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_dev_t *dev; + struct kib_dev *dev; wait_queue_t wait; unsigned long flags; int rc; diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c index b4607dad3712..44e960f60833 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -145,81 +141,142 @@ static int use_privileged_port = 1; module_param(use_privileged_port, int, 0644); MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection"); -kib_tunables_t kiblnd_tunables = { +struct kib_tunables kiblnd_tunables = { .kib_dev_failover = &dev_failover, .kib_service = &service, .kib_cksum = &cksum, .kib_timeout = &timeout, .kib_keepalive = &keepalive, .kib_ntx = &ntx, - .kib_credits = &credits, - .kib_peertxcredits = &peer_credits, - .kib_peercredits_hiw = &peer_credits_hiw, - .kib_peerrtrcredits = &peer_buffer_credits, - .kib_peertimeout = &peer_timeout, .kib_default_ipif = &ipif_name, .kib_retry_count = &retry_count, .kib_rnr_retry_count = &rnr_retry_count, - .kib_concurrent_sends = &concurrent_sends, .kib_ib_mtu = &ib_mtu, - .kib_map_on_demand = &map_on_demand, - .kib_fmr_pool_size = &fmr_pool_size, - .kib_fmr_flush_trigger = &fmr_flush_trigger, - .kib_fmr_cache = &fmr_cache, .kib_require_priv_port = &require_privileged_port, .kib_use_priv_port = &use_privileged_port, .kib_nscheds = &nscheds }; -int -kiblnd_tunables_init(void) +static struct lnet_ioctl_config_o2iblnd_tunables default_tunables; + +/* # messages/RDMAs in-flight */ +int kiblnd_msg_queue_size(int version, lnet_ni_t *ni) +{ + if (version == IBLND_MSG_VERSION_1) + return IBLND_MSG_QUEUE_SIZE_V1; + else if (ni) + return ni->ni_peertxcredits; + else + return peer_credits; +} + +int kiblnd_tunables_setup(struct lnet_ni *ni) { + struct lnet_ioctl_config_o2iblnd_tunables *tunables; + + /* + * if there was no tunables specified, setup the tunables to be + * defaulted + */ + if (!ni->ni_lnd_tunables) { + LIBCFS_ALLOC(ni->ni_lnd_tunables, + sizeof(*ni->ni_lnd_tunables)); + if (!ni->ni_lnd_tunables) + return -ENOMEM; + + memcpy(&ni->ni_lnd_tunables->lt_tun_u.lt_o2ib, + &default_tunables, sizeof(*tunables)); + } + tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib; + + /* Current API version */ + tunables->lnd_version = 0; + if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) { CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n", *kiblnd_tunables.kib_ib_mtu); return -EINVAL; } - if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT) - *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT; + if (!ni->ni_peertimeout) + ni->ni_peertimeout = peer_timeout; + + if (!ni->ni_maxtxcredits) + ni->ni_maxtxcredits = credits; + + if (!ni->ni_peertxcredits) + ni->ni_peertxcredits = peer_credits; - if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX) - *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX; + if (!ni->ni_peerrtrcredits) + ni->ni_peerrtrcredits = peer_buffer_credits; - if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits) - *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits; + if (ni->ni_peertxcredits < IBLND_CREDITS_DEFAULT) + ni->ni_peertxcredits = IBLND_CREDITS_DEFAULT; - if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2) - *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2; + if (ni->ni_peertxcredits > IBLND_CREDITS_MAX) + ni->ni_peertxcredits = IBLND_CREDITS_MAX; - if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits) - *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1; + if (ni->ni_peertxcredits > credits) + ni->ni_peertxcredits = credits; - if (*kiblnd_tunables.kib_map_on_demand < 0 || - *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS) - *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */ + if (!tunables->lnd_peercredits_hiw) + tunables->lnd_peercredits_hiw = peer_credits_hiw; - if (*kiblnd_tunables.kib_map_on_demand == 1) - *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */ + if (tunables->lnd_peercredits_hiw < ni->ni_peertxcredits / 2) + tunables->lnd_peercredits_hiw = ni->ni_peertxcredits / 2; - if (!*kiblnd_tunables.kib_concurrent_sends) { - if (*kiblnd_tunables.kib_map_on_demand > 0 && - *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) - *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2; - else - *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits); + if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits) + tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1; + + if (tunables->lnd_map_on_demand < 0 || + tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) { + /* disable map-on-demand */ + tunables->lnd_map_on_demand = 0; + } + + if (tunables->lnd_map_on_demand == 1) { + /* don't make sense to create map if only one fragment */ + tunables->lnd_map_on_demand = 2; + } + + if (!tunables->lnd_concurrent_sends) { + if (tunables->lnd_map_on_demand > 0 && + tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) { + tunables->lnd_concurrent_sends = + ni->ni_peertxcredits * 2; + } else { + tunables->lnd_concurrent_sends = ni->ni_peertxcredits; + } } - if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2) - *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2; + if (tunables->lnd_concurrent_sends > ni->ni_peertxcredits * 2) + tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2; - if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2) - *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2; + if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits / 2) + tunables->lnd_concurrent_sends = ni->ni_peertxcredits / 2; - if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) { + if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits) { CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n", - *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits); + tunables->lnd_concurrent_sends, ni->ni_peertxcredits); } + if (!tunables->lnd_fmr_pool_size) + tunables->lnd_fmr_pool_size = fmr_pool_size; + if (!tunables->lnd_fmr_flush_trigger) + tunables->lnd_fmr_flush_trigger = fmr_flush_trigger; + if (!tunables->lnd_fmr_cache) + tunables->lnd_fmr_cache = fmr_cache; + return 0; } + +void kiblnd_tunables_init(void) +{ + default_tunables.lnd_version = 0; + default_tunables.lnd_peercredits_hiw = peer_credits_hiw, + default_tunables.lnd_map_on_demand = map_on_demand; + default_tunables.lnd_concurrent_sends = concurrent_sends; + default_tunables.lnd_fmr_pool_size = fmr_pool_size; + default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger; + default_tunables.lnd_fmr_cache = fmr_cache; +} diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index cca7b2f7f1a7..07ec540946cd 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -44,14 +40,14 @@ #include "socklnd.h" static lnd_t the_ksocklnd; -ksock_nal_data_t ksocknal_data; +struct ksock_nal_data ksocknal_data; -static ksock_interface_t * +static struct ksock_interface * ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip) { - ksock_net_t *net = ni->ni_data; + struct ksock_net *net = ni->ni_data; int i; - ksock_interface_t *iface; + struct ksock_interface *iface; for (i = 0; i < net->ksnn_ninterfaces; i++) { LASSERT(i < LNET_MAX_INTERFACES); @@ -64,10 +60,10 @@ ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip) return NULL; } -static ksock_route_t * +static struct ksock_route * ksocknal_create_route(__u32 ipaddr, int port) { - ksock_route_t *route; + struct ksock_route *route; LIBCFS_ALLOC(route, sizeof(*route)); if (!route) @@ -89,7 +85,7 @@ ksocknal_create_route(__u32 ipaddr, int port) } void -ksocknal_destroy_route(ksock_route_t *route) +ksocknal_destroy_route(struct ksock_route *route) { LASSERT(!atomic_read(&route->ksnr_refcount)); @@ -100,11 +96,11 @@ ksocknal_destroy_route(ksock_route_t *route) } static int -ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) +ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id) { int cpt = lnet_cpt_of_nid(id.nid); - ksock_net_t *net = ni->ni_data; - ksock_peer_t *peer; + struct ksock_net *net = ni->ni_data; + struct ksock_peer *peer; LASSERT(id.nid != LNET_NID_ANY); LASSERT(id.pid != LNET_PID_ANY); @@ -148,9 +144,9 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) } void -ksocknal_destroy_peer(ksock_peer_t *peer) +ksocknal_destroy_peer(struct ksock_peer *peer) { - ksock_net_t *net = peer->ksnp_ni->ni_data; + struct ksock_net *net = peer->ksnp_ni->ni_data; CDEBUG(D_NET, "peer %s %p deleted\n", libcfs_id2str(peer->ksnp_id), peer); @@ -175,15 +171,15 @@ ksocknal_destroy_peer(ksock_peer_t *peer) spin_unlock_bh(&net->ksnn_lock); } -ksock_peer_t * +struct ksock_peer * ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id) { struct list_head *peer_list = ksocknal_nid2peerlist(id.nid); struct list_head *tmp; - ksock_peer_t *peer; + struct ksock_peer *peer; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, ksock_peer_t, ksnp_list); + peer = list_entry(tmp, struct ksock_peer, ksnp_list); LASSERT(!peer->ksnp_closing); @@ -202,10 +198,10 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id) return NULL; } -ksock_peer_t * +struct ksock_peer * ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id) { - ksock_peer_t *peer; + struct ksock_peer *peer; read_lock(&ksocknal_data.ksnd_global_lock); peer = ksocknal_find_peer_locked(ni, id); @@ -217,11 +213,11 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id) } static void -ksocknal_unlink_peer_locked(ksock_peer_t *peer) +ksocknal_unlink_peer_locked(struct ksock_peer *peer) { int i; __u32 ip; - ksock_interface_t *iface; + struct ksock_interface *iface; for (i = 0; i < peer->ksnp_n_passive_ips; i++) { LASSERT(i < LNET_MAX_INTERFACES); @@ -253,9 +249,9 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, int *port, int *conn_count, int *share_count) { - ksock_peer_t *peer; + struct ksock_peer *peer; struct list_head *ptmp; - ksock_route_t *route; + struct ksock_route *route; struct list_head *rtmp; int i; int j; @@ -265,7 +261,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, ksnp_list); + peer = list_entry(ptmp, struct ksock_peer, ksnp_list); if (peer->ksnp_ni != ni) continue; @@ -303,7 +299,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, if (index-- > 0) continue; - route = list_entry(rtmp, ksock_route_t, + route = list_entry(rtmp, struct ksock_route, ksnr_list); *id = peer->ksnp_id; @@ -323,11 +319,11 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, } static void -ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) +ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn) { - ksock_peer_t *peer = route->ksnr_peer; + struct ksock_peer *peer = route->ksnr_peer; int type = conn->ksnc_type; - ksock_interface_t *iface; + struct ksock_interface *iface; conn->ksnc_route = route; ksocknal_route_addref(route); @@ -369,11 +365,11 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) } static void -ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) +ksocknal_add_route_locked(struct ksock_peer *peer, struct ksock_route *route) { struct list_head *tmp; - ksock_conn_t *conn; - ksock_route_t *route2; + struct ksock_conn *conn; + struct ksock_route *route2; LASSERT(!peer->ksnp_closing); LASSERT(!route->ksnr_peer); @@ -383,7 +379,7 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) /* LASSERT(unique) */ list_for_each(tmp, &peer->ksnp_routes) { - route2 = list_entry(tmp, ksock_route_t, ksnr_list); + route2 = list_entry(tmp, struct ksock_route, ksnr_list); if (route2->ksnr_ipaddr == route->ksnr_ipaddr) { CERROR("Duplicate route %s %pI4h\n", @@ -399,7 +395,7 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) list_add_tail(&route->ksnr_list, &peer->ksnp_routes); list_for_each(tmp, &peer->ksnp_conns) { - conn = list_entry(tmp, ksock_conn_t, ksnc_list); + conn = list_entry(tmp, struct ksock_conn, ksnc_list); if (conn->ksnc_ipaddr != route->ksnr_ipaddr) continue; @@ -410,11 +406,11 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) } static void -ksocknal_del_route_locked(ksock_route_t *route) +ksocknal_del_route_locked(struct ksock_route *route) { - ksock_peer_t *peer = route->ksnr_peer; - ksock_interface_t *iface; - ksock_conn_t *conn; + struct ksock_peer *peer = route->ksnr_peer; + struct ksock_interface *iface; + struct ksock_conn *conn; struct list_head *ctmp; struct list_head *cnxt; @@ -422,7 +418,7 @@ ksocknal_del_route_locked(ksock_route_t *route) /* Close associated conns */ list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { - conn = list_entry(ctmp, ksock_conn_t, ksnc_list); + conn = list_entry(ctmp, struct ksock_conn, ksnc_list); if (conn->ksnc_route != route) continue; @@ -455,10 +451,10 @@ int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) { struct list_head *tmp; - ksock_peer_t *peer; - ksock_peer_t *peer2; - ksock_route_t *route; - ksock_route_t *route2; + struct ksock_peer *peer; + struct ksock_peer *peer2; + struct ksock_route *route; + struct ksock_route *route2; int rc; if (id.nid == LNET_NID_ANY || @@ -479,7 +475,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) write_lock_bh(&ksocknal_data.ksnd_global_lock); /* always called with a ref on ni, so shutdown can't have started */ - LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown); + LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown); peer2 = ksocknal_find_peer_locked(ni, id); if (peer2) { @@ -493,7 +489,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) route2 = NULL; list_for_each(tmp, &peer->ksnp_routes) { - route2 = list_entry(tmp, ksock_route_t, ksnr_list); + route2 = list_entry(tmp, struct ksock_route, ksnr_list); if (route2->ksnr_ipaddr == ipaddr) break; @@ -514,10 +510,10 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) } static void -ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) +ksocknal_del_peer_locked(struct ksock_peer *peer, __u32 ip) { - ksock_conn_t *conn; - ksock_route_t *route; + struct ksock_conn *conn; + struct ksock_route *route; struct list_head *tmp; struct list_head *nxt; int nshared; @@ -528,7 +524,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) ksocknal_peer_addref(peer); list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { - route = list_entry(tmp, ksock_route_t, ksnr_list); + route = list_entry(tmp, struct ksock_route, ksnr_list); /* no match */ if (!(!ip || route->ksnr_ipaddr == ip)) @@ -541,7 +537,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) nshared = 0; list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { - route = list_entry(tmp, ksock_route_t, ksnr_list); + route = list_entry(tmp, struct ksock_route, ksnr_list); nshared += route->ksnr_share_count; } @@ -551,7 +547,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) * left */ list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { - route = list_entry(tmp, ksock_route_t, ksnr_list); + route = list_entry(tmp, struct ksock_route, ksnr_list); /* we should only be removing auto-entries */ LASSERT(!route->ksnr_share_count); @@ -559,7 +555,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) } list_for_each_safe(tmp, nxt, &peer->ksnp_conns) { - conn = list_entry(tmp, ksock_conn_t, ksnc_list); + conn = list_entry(tmp, struct ksock_conn, ksnc_list); ksocknal_close_conn_locked(conn, 0); } @@ -575,7 +571,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) LIST_HEAD(zombies); struct list_head *ptmp; struct list_head *pnxt; - ksock_peer_t *peer; + struct ksock_peer *peer; int lo; int hi; int i; @@ -593,7 +589,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, ksnp_list); + peer = list_entry(ptmp, struct ksock_peer, ksnp_list); if (peer->ksnp_ni != ni) continue; @@ -628,12 +624,12 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) return rc; } -static ksock_conn_t * +static struct ksock_conn * ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index) { - ksock_peer_t *peer; + struct ksock_peer *peer; struct list_head *ptmp; - ksock_conn_t *conn; + struct ksock_conn *conn; struct list_head *ctmp; int i; @@ -641,7 +637,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index) for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, ksnp_list); + peer = list_entry(ptmp, struct ksock_peer, ksnp_list); LASSERT(!peer->ksnp_closing); @@ -652,7 +648,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index) if (index-- > 0) continue; - conn = list_entry(ctmp, ksock_conn_t, + conn = list_entry(ctmp, struct ksock_conn, ksnc_list); ksocknal_conn_addref(conn); read_unlock(&ksocknal_data.ksnd_global_lock); @@ -665,11 +661,11 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index) return NULL; } -static ksock_sched_t * +static struct ksock_sched * ksocknal_choose_scheduler_locked(unsigned int cpt) { struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt]; - ksock_sched_t *sched; + struct ksock_sched *sched; int i; LASSERT(info->ksi_nthreads > 0); @@ -691,7 +687,7 @@ ksocknal_choose_scheduler_locked(unsigned int cpt) static int ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) { - ksock_net_t *net = ni->ni_data; + struct ksock_net *net = ni->ni_data; int i; int nip; @@ -719,7 +715,7 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) } static int -ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips) +ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips) { int best_netmatch = 0; int best_xor = 0; @@ -751,12 +747,12 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips) } static int -ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) +ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips) { rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - ksock_net_t *net = peer->ksnp_ni->ni_data; - ksock_interface_t *iface; - ksock_interface_t *best_iface; + struct ksock_net *net = peer->ksnp_ni->ni_data; + struct ksock_interface *iface; + struct ksock_interface *best_iface; int n_ips; int i; int j; @@ -862,17 +858,17 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) } static void -ksocknal_create_routes(ksock_peer_t *peer, int port, +ksocknal_create_routes(struct ksock_peer *peer, int port, __u32 *peer_ipaddrs, int npeer_ipaddrs) { - ksock_route_t *newroute = NULL; + struct ksock_route *newroute = NULL; rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; lnet_ni_t *ni = peer->ksnp_ni; - ksock_net_t *net = ni->ni_data; + struct ksock_net *net = ni->ni_data; struct list_head *rtmp; - ksock_route_t *route; - ksock_interface_t *iface; - ksock_interface_t *best_iface; + struct ksock_route *route; + struct ksock_interface *iface; + struct ksock_interface *best_iface; int best_netmatch; int this_netmatch; int best_nroutes; @@ -919,7 +915,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, /* Already got a route? */ route = NULL; list_for_each(rtmp, &peer->ksnp_routes) { - route = list_entry(rtmp, ksock_route_t, ksnr_list); + route = list_entry(rtmp, struct ksock_route, ksnr_list); if (route->ksnr_ipaddr == newroute->ksnr_ipaddr) break; @@ -941,7 +937,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, /* Using this interface already? */ list_for_each(rtmp, &peer->ksnp_routes) { - route = list_entry(rtmp, ksock_route_t, + route = list_entry(rtmp, struct ksock_route, ksnr_list); if (route->ksnr_myipaddr == iface->ksni_ipaddr) @@ -985,7 +981,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, int ksocknal_accept(lnet_ni_t *ni, struct socket *sock) { - ksock_connreq_t *cr; + struct ksock_connreq *cr; int rc; __u32 peer_ip; int peer_port; @@ -1014,9 +1010,9 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock) } static int -ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr) +ksocknal_connecting(struct ksock_peer *peer, __u32 ipaddr) { - ksock_route_t *route; + struct ksock_route *route; list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) { if (route->ksnr_ipaddr == ipaddr) @@ -1026,7 +1022,7 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr) } int -ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, +ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route, struct socket *sock, int type) { rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; @@ -1034,15 +1030,15 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, lnet_process_id_t peerid; struct list_head *tmp; __u64 incarnation; - ksock_conn_t *conn; - ksock_conn_t *conn2; - ksock_peer_t *peer = NULL; - ksock_peer_t *peer2; - ksock_sched_t *sched; + struct ksock_conn *conn; + struct ksock_conn *conn2; + struct ksock_peer *peer = NULL; + struct ksock_peer *peer2; + struct ksock_sched *sched; ksock_hello_msg_t *hello; int cpt; - ksock_tx_t *tx; - ksock_tx_t *txtmp; + struct ksock_tx *tx; + struct ksock_tx *txtmp; int rc; int active; char *warn = NULL; @@ -1150,7 +1146,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, write_lock_bh(global_lock); /* called with a ref on ni, so shutdown can't have started */ - LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown); + LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown); peer2 = ksocknal_find_peer_locked(ni, peerid); if (!peer2) { @@ -1233,7 +1229,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, */ if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) { list_for_each(tmp, &peer->ksnp_conns) { - conn2 = list_entry(tmp, ksock_conn_t, ksnc_list); + conn2 = list_entry(tmp, struct ksock_conn, ksnc_list); if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr || conn2->ksnc_myipaddr != conn->ksnc_myipaddr || @@ -1273,7 +1269,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, * continually create duplicate routes. */ list_for_each(tmp, &peer->ksnp_routes) { - route = list_entry(tmp, ksock_route_t, ksnr_list); + route = list_entry(tmp, struct ksock_route, ksnr_list); if (route->ksnr_ipaddr != conn->ksnc_ipaddr) continue; @@ -1432,16 +1428,16 @@ failed_0: } void -ksocknal_close_conn_locked(ksock_conn_t *conn, int error) +ksocknal_close_conn_locked(struct ksock_conn *conn, int error) { /* * This just does the immmediate housekeeping, and queues the * connection for the reaper to terminate. * Caller holds ksnd_global_lock exclusively in irq context */ - ksock_peer_t *peer = conn->ksnc_peer; - ksock_route_t *route; - ksock_conn_t *conn2; + struct ksock_peer *peer = conn->ksnc_peer; + struct ksock_route *route; + struct ksock_conn *conn2; struct list_head *tmp; LASSERT(!peer->ksnp_error); @@ -1459,7 +1455,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) conn2 = NULL; list_for_each(tmp, &peer->ksnp_conns) { - conn2 = list_entry(tmp, ksock_conn_t, ksnc_list); + conn2 = list_entry(tmp, struct ksock_conn, ksnc_list); if (conn2->ksnc_route == route && conn2->ksnc_type == conn->ksnc_type) @@ -1484,7 +1480,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) /* No more connections to this peer */ if (!list_empty(&peer->ksnp_tx_queue)) { - ksock_tx_t *tx; + struct ksock_tx *tx; LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); @@ -1524,7 +1520,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) } void -ksocknal_peer_failed(ksock_peer_t *peer) +ksocknal_peer_failed(struct ksock_peer *peer) { int notify = 0; unsigned long last_alive = 0; @@ -1552,12 +1548,12 @@ ksocknal_peer_failed(ksock_peer_t *peer) } void -ksocknal_finalize_zcreq(ksock_conn_t *conn) +ksocknal_finalize_zcreq(struct ksock_conn *conn) { - ksock_peer_t *peer = conn->ksnc_peer; - ksock_tx_t *tx; - ksock_tx_t *temp; - ksock_tx_t *tmp; + struct ksock_peer *peer = conn->ksnc_peer; + struct ksock_tx *tx; + struct ksock_tx *temp; + struct ksock_tx *tmp; LIST_HEAD(zlist); /* @@ -1589,7 +1585,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) } void -ksocknal_terminate_conn(ksock_conn_t *conn) +ksocknal_terminate_conn(struct ksock_conn *conn) { /* * This gets called by the reaper (guaranteed thread context) to @@ -1597,8 +1593,8 @@ ksocknal_terminate_conn(ksock_conn_t *conn) * ksnc_refcount will eventually hit zero, and then the reaper will * destroy it. */ - ksock_peer_t *peer = conn->ksnc_peer; - ksock_sched_t *sched = conn->ksnc_scheduler; + struct ksock_peer *peer = conn->ksnc_peer; + struct ksock_sched *sched = conn->ksnc_scheduler; int failed = 0; LASSERT(conn->ksnc_closing); @@ -1656,7 +1652,7 @@ ksocknal_terminate_conn(ksock_conn_t *conn) } void -ksocknal_queue_zombie_conn(ksock_conn_t *conn) +ksocknal_queue_zombie_conn(struct ksock_conn *conn) { /* Queue the conn for the reaper to destroy */ @@ -1670,7 +1666,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn) } void -ksocknal_destroy_conn(ksock_conn_t *conn) +ksocknal_destroy_conn(struct ksock_conn *conn) { unsigned long last_rcv; @@ -1730,15 +1726,15 @@ ksocknal_destroy_conn(ksock_conn_t *conn) } int -ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why) +ksocknal_close_peer_conns_locked(struct ksock_peer *peer, __u32 ipaddr, int why) { - ksock_conn_t *conn; + struct ksock_conn *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { - conn = list_entry(ctmp, ksock_conn_t, ksnc_list); + conn = list_entry(ctmp, struct ksock_conn, ksnc_list); if (!ipaddr || conn->ksnc_ipaddr == ipaddr) { count++; @@ -1750,9 +1746,9 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why) } int -ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why) +ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why) { - ksock_peer_t *peer = conn->ksnc_peer; + struct ksock_peer *peer = conn->ksnc_peer; __u32 ipaddr = conn->ksnc_ipaddr; int count; @@ -1768,7 +1764,7 @@ ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why) int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) { - ksock_peer_t *peer; + struct ksock_peer *peer; struct list_head *ptmp; struct list_head *pnxt; int lo; @@ -1789,7 +1785,7 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, ksnp_list); + peer = list_entry(ptmp, struct ksock_peer, ksnp_list); if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) && (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid))) @@ -1844,7 +1840,7 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) int connect = 1; unsigned long last_alive = 0; unsigned long now = cfs_time_current(); - ksock_peer_t *peer = NULL; + struct ksock_peer *peer = NULL; rwlock_t *glock = &ksocknal_data.ksnd_global_lock; lnet_process_id_t id = { .nid = nid, @@ -1856,11 +1852,11 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) peer = ksocknal_find_peer_locked(ni, id); if (peer) { struct list_head *tmp; - ksock_conn_t *conn; + struct ksock_conn *conn; int bufnob; list_for_each(tmp, &peer->ksnp_conns) { - conn = list_entry(tmp, ksock_conn_t, ksnc_list); + conn = list_entry(tmp, struct ksock_conn, ksnc_list); bufnob = conn->ksnc_sock->sk->sk_wmem_queued; if (bufnob < conn->ksnc_tx_bufnob) { @@ -1902,12 +1898,12 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) } static void -ksocknal_push_peer(ksock_peer_t *peer) +ksocknal_push_peer(struct ksock_peer *peer) { int index; int i; struct list_head *tmp; - ksock_conn_t *conn; + struct ksock_conn *conn; for (index = 0; ; index++) { read_lock(&ksocknal_data.ksnd_global_lock); @@ -1917,7 +1913,7 @@ ksocknal_push_peer(ksock_peer_t *peer) list_for_each(tmp, &peer->ksnp_conns) { if (i++ == index) { - conn = list_entry(tmp, ksock_conn_t, + conn = list_entry(tmp, struct ksock_conn, ksnc_list); ksocknal_conn_addref(conn); break; @@ -1954,7 +1950,7 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) int peer_off; /* searching offset in peer hash table */ for (peer_off = 0; ; peer_off++) { - ksock_peer_t *peer; + struct ksock_peer *peer; int i = 0; read_lock(&ksocknal_data.ksnd_global_lock); @@ -1986,15 +1982,15 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) static int ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) { - ksock_net_t *net = ni->ni_data; - ksock_interface_t *iface; + struct ksock_net *net = ni->ni_data; + struct ksock_interface *iface; int rc; int i; int j; struct list_head *ptmp; - ksock_peer_t *peer; + struct ksock_peer *peer; struct list_head *rtmp; - ksock_route_t *route; + struct ksock_route *route; if (!ipaddress || !netmask) return -EINVAL; @@ -2017,7 +2013,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(ptmp, ksock_peer_t, + peer = list_entry(ptmp, struct ksock_peer, ksnp_list); for (j = 0; j < peer->ksnp_n_passive_ips; j++) @@ -2025,7 +2021,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) iface->ksni_npeers++; list_for_each(rtmp, &peer->ksnp_routes) { - route = list_entry(rtmp, ksock_route_t, + route = list_entry(rtmp, struct ksock_route, ksnr_list); if (route->ksnr_myipaddr == ipaddress) @@ -2044,12 +2040,12 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) } static void -ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) +ksocknal_peer_del_interface_locked(struct ksock_peer *peer, __u32 ipaddr) { struct list_head *tmp; struct list_head *nxt; - ksock_route_t *route; - ksock_conn_t *conn; + struct ksock_route *route; + struct ksock_conn *conn; int i; int j; @@ -2063,7 +2059,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) } list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { - route = list_entry(tmp, ksock_route_t, ksnr_list); + route = list_entry(tmp, struct ksock_route, ksnr_list); if (route->ksnr_myipaddr != ipaddr) continue; @@ -2077,7 +2073,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) } list_for_each_safe(tmp, nxt, &peer->ksnp_conns) { - conn = list_entry(tmp, ksock_conn_t, ksnc_list); + conn = list_entry(tmp, struct ksock_conn, ksnc_list); if (conn->ksnc_myipaddr == ipaddr) ksocknal_close_conn_locked(conn, 0); @@ -2087,11 +2083,11 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) static int ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress) { - ksock_net_t *net = ni->ni_data; + struct ksock_net *net = ni->ni_data; int rc = -ENOENT; struct list_head *tmp; struct list_head *nxt; - ksock_peer_t *peer; + struct ksock_peer *peer; __u32 this_ip; int i; int j; @@ -2115,7 +2111,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress) for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) { list_for_each_safe(tmp, nxt, &ksocknal_data.ksnd_peers[j]) { - peer = list_entry(tmp, ksock_peer_t, ksnp_list); + peer = list_entry(tmp, struct ksock_peer, ksnp_list); if (peer->ksnp_ni != ni) continue; @@ -2139,8 +2135,8 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) switch (cmd) { case IOC_LIBCFS_GET_INTERFACE: { - ksock_net_t *net = ni->ni_data; - ksock_interface_t *iface; + struct ksock_net *net = ni->ni_data; + struct ksock_interface *iface; read_lock(&ksocknal_data.ksnd_global_lock); @@ -2209,7 +2205,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) int txmem; int rxmem; int nagle; - ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); + struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); if (!conn) return -ENOENT; @@ -2284,8 +2280,8 @@ ksocknal_free_buffers(void) if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { struct list_head zlist; - ksock_tx_t *tx; - ksock_tx_t *temp; + struct ksock_tx *tx; + struct ksock_tx *temp; list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs); list_del_init(&ksocknal_data.ksnd_idle_noop_txs); @@ -2304,7 +2300,7 @@ static void ksocknal_base_shutdown(void) { struct ksock_sched_info *info; - ksock_sched_t *sched; + struct ksock_sched *sched; int i; int j; @@ -2446,7 +2442,7 @@ ksocknal_base_startup(void) goto failed; cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - ksock_sched_t *sched; + struct ksock_sched *sched; int nthrs; nthrs = cfs_cpt_weight(lnet_cpt_table(), i); @@ -2534,7 +2530,7 @@ ksocknal_base_startup(void) static void ksocknal_debug_peerhash(lnet_ni_t *ni) { - ksock_peer_t *peer = NULL; + struct ksock_peer *peer = NULL; struct list_head *tmp; int i; @@ -2542,7 +2538,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) { - peer = list_entry(tmp, ksock_peer_t, ksnp_list); + peer = list_entry(tmp, struct ksock_peer, ksnp_list); if (peer->ksnp_ni == ni) break; @@ -2552,8 +2548,8 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) } if (peer) { - ksock_route_t *route; - ksock_conn_t *conn; + struct ksock_route *route; + struct ksock_conn *conn; CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id), @@ -2565,7 +2561,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) !list_empty(&peer->ksnp_zc_req_list)); list_for_each(tmp, &peer->ksnp_routes) { - route = list_entry(tmp, ksock_route_t, ksnr_list); + route = list_entry(tmp, struct ksock_route, ksnr_list); CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n", atomic_read(&route->ksnr_refcount), route->ksnr_scheduled, route->ksnr_connecting, @@ -2573,7 +2569,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) } list_for_each(tmp, &peer->ksnp_conns) { - conn = list_entry(tmp, ksock_conn_t, ksnc_list); + conn = list_entry(tmp, struct ksock_conn, ksnc_list); CWARN("Conn: ref %d, sref %d, t %d, c %d\n", atomic_read(&conn->ksnc_conn_refcount), atomic_read(&conn->ksnc_sock_refcount), @@ -2582,13 +2578,12 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) } read_unlock(&ksocknal_data.ksnd_global_lock); - return; } void ksocknal_shutdown(lnet_ni_t *ni) { - ksock_net_t *net = ni->ni_data; + struct ksock_net *net = ni->ni_data; int i; lnet_process_id_t anyid = {0}; @@ -2638,7 +2633,7 @@ ksocknal_shutdown(lnet_ni_t *ni) } static int -ksocknal_enumerate_interfaces(ksock_net_t *net) +ksocknal_enumerate_interfaces(struct ksock_net *net) { char **names; int i; @@ -2695,7 +2690,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) } static int -ksocknal_search_new_ipif(ksock_net_t *net) +ksocknal_search_new_ipif(struct ksock_net *net) { int new_ipif = 0; int i; @@ -2704,7 +2699,7 @@ ksocknal_search_new_ipif(ksock_net_t *net) char *ifnam = &net->ksnn_interfaces[i].ksni_name[0]; char *colon = strchr(ifnam, ':'); int found = 0; - ksock_net_t *tmp; + struct ksock_net *tmp; int j; if (colon) /* ignore alias device */ @@ -2761,7 +2756,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) for (i = 0; i < nthrs; i++) { long id; char name[20]; - ksock_sched_t *sched; + struct ksock_sched *sched; id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i); sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)]; @@ -2783,7 +2778,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) } static int -ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) +ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts) { int newif = ksocknal_search_new_ipif(net); int rc; @@ -2811,7 +2806,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) int ksocknal_startup(lnet_ni_t *ni) { - ksock_net_t *net; + struct ksock_net *net; int rc; int i; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index a60d72f9432f..a56632b4ee37 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -77,8 +77,7 @@ struct ksock_sched_info; -typedef struct /* per scheduler state */ -{ +struct ksock_sched { /* per scheduler state */ spinlock_t kss_lock; /* serialise */ struct list_head kss_rx_conns; /* conn waiting to be read */ struct list_head kss_tx_conns; /* conn waiting to be written */ @@ -89,13 +88,13 @@ typedef struct /* per scheduler state */ struct ksock_sched_info *kss_info; /* owner of it */ struct page *kss_rx_scratch_pgs[LNET_MAX_IOV]; struct kvec kss_scratch_iov[LNET_MAX_IOV]; -} ksock_sched_t; +}; struct ksock_sched_info { int ksi_nthreads_max; /* max allowed threads */ int ksi_nthreads; /* number of threads */ int ksi_cpt; /* CPT id */ - ksock_sched_t *ksi_scheds; /* array of schedulers */ + struct ksock_sched *ksi_scheds; /* array of schedulers */ }; #define KSOCK_CPT_SHIFT 16 @@ -103,16 +102,15 @@ struct ksock_sched_info { #define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT) #define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1)) -typedef struct /* in-use interface */ -{ +struct ksock_interface { /* in-use interface */ __u32 ksni_ipaddr; /* interface's IP address */ __u32 ksni_netmask; /* interface's network mask */ int ksni_nroutes; /* # routes using (active) */ int ksni_npeers; /* # peers using (passive) */ char ksni_name[IFNAMSIZ]; /* interface name */ -} ksock_interface_t; +}; -typedef struct { +struct ksock_tunables { int *ksnd_timeout; /* "stuck" socket timeout * (seconds) */ int *ksnd_nscheds; /* # scheduler threads in each @@ -155,24 +153,24 @@ typedef struct { * Chelsio TOE) */ int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to * enable ZC receive */ -} ksock_tunables_t; +}; -typedef struct { +struct ksock_net { __u64 ksnn_incarnation; /* my epoch */ spinlock_t ksnn_lock; /* serialise */ struct list_head ksnn_list; /* chain on global list */ int ksnn_npeers; /* # peers */ int ksnn_shutdown; /* shutting down? */ int ksnn_ninterfaces; /* IP interfaces */ - ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES]; -} ksock_net_t; + struct ksock_interface ksnn_interfaces[LNET_MAX_INTERFACES]; +}; /** connd timeout */ #define SOCKNAL_CONND_TIMEOUT 120 /** reserved thread for accepting & creating new connd */ #define SOCKNAL_CONND_RESV 1 -typedef struct { +struct ksock_nal_data { int ksnd_init; /* initialisation state */ int ksnd_nnets; /* # networks set up */ @@ -229,7 +227,7 @@ typedef struct { spinlock_t ksnd_tx_lock; /* serialise, g_lock * unsafe */ -} ksock_nal_data_t; +}; #define SOCKNAL_INIT_NOTHING 0 #define SOCKNAL_INIT_DATA 1 @@ -250,8 +248,7 @@ struct ksock_peer; /* forward ref */ struct ksock_route; /* forward ref */ struct ksock_proto; /* forward ref */ -typedef struct /* transmit packet */ -{ +struct ksock_tx { /* transmit packet */ struct list_head tx_list; /* queue on conn for transmission etc */ struct list_head tx_zc_list; /* queue on peer for ZC request */ @@ -281,20 +278,20 @@ typedef struct /* transmit packet */ struct kvec iov[1]; /* virt hdr + payload */ } virt; } tx_frags; -} ksock_tx_t; +}; -#define KSOCK_NOOP_TX_SIZE (offsetof(ksock_tx_t, tx_frags.paged.kiov[0])) +#define KSOCK_NOOP_TX_SIZE (offsetof(struct ksock_tx, tx_frags.paged.kiov[0])) -/* network zero copy callback descriptor embedded in ksock_tx_t */ +/* network zero copy callback descriptor embedded in struct ksock_tx */ /* * space for the rx frag descriptors; we either read a single contiguous * header, or up to LNET_MAX_IOV frags of payload of either type. */ -typedef union { +union ksock_rxiovspace { struct kvec iov[LNET_MAX_IOV]; lnet_kiov_t kiov[LNET_MAX_IOV]; -} ksock_rxiovspace_t; +}; #define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */ #define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */ @@ -303,7 +300,7 @@ typedef union { #define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */ #define SOCKNAL_RX_SLOP 6 /* skipping body */ -typedef struct ksock_conn { +struct ksock_conn { struct ksock_peer *ksnc_peer; /* owning peer */ struct ksock_route *ksnc_route; /* owning route */ struct list_head ksnc_list; /* stash on peer's conn list */ @@ -314,8 +311,8 @@ typedef struct ksock_conn { * write_space() callback */ atomic_t ksnc_conn_refcount;/* conn refcount */ atomic_t ksnc_sock_refcount;/* sock refcount */ - ksock_sched_t *ksnc_scheduler; /* who schedules this connection - */ + struct ksock_sched *ksnc_scheduler; /* who schedules this connection + */ __u32 ksnc_myipaddr; /* my IP */ __u32 ksnc_ipaddr; /* peer's IP */ int ksnc_port; /* peer's port */ @@ -341,7 +338,7 @@ typedef struct ksock_conn { struct kvec *ksnc_rx_iov; /* the iovec frags */ int ksnc_rx_nkiov; /* # page frags */ lnet_kiov_t *ksnc_rx_kiov; /* the page frags */ - ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */ + union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */ __u32 ksnc_rx_csum; /* partial checksum for incoming * data */ void *ksnc_cookie; /* rx lnet_finalize passthru arg @@ -357,7 +354,7 @@ typedef struct ksock_conn { struct list_head ksnc_tx_list; /* where I enq waiting for output * space */ struct list_head ksnc_tx_queue; /* packets waiting to be sent */ - ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet + struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet * message or ZC-ACK */ unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out */ @@ -367,9 +364,9 @@ typedef struct ksock_conn { int ksnc_tx_scheduled; /* being progressed */ unsigned long ksnc_tx_last_post; /* time stamp of the last posted * TX */ -} ksock_conn_t; +}; -typedef struct ksock_route { +struct ksock_route { struct list_head ksnr_list; /* chain on peer route list */ struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */ struct ksock_peer *ksnr_peer; /* owning peer */ @@ -389,11 +386,11 @@ typedef struct ksock_route { unsigned int ksnr_share_count; /* created explicitly? */ int ksnr_conn_count; /* # conns established by this * route */ -} ksock_route_t; +}; #define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */ -typedef struct ksock_peer { +struct ksock_peer { struct list_head ksnp_list; /* stash on global peer list */ unsigned long ksnp_last_alive; /* when (in jiffies) I was last * alive */ @@ -420,49 +417,49 @@ typedef struct ksock_peer { /* preferred local interfaces */ __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; -} ksock_peer_t; +}; -typedef struct ksock_connreq { +struct ksock_connreq { struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */ lnet_ni_t *ksncr_ni; /* chosen NI */ struct socket *ksncr_sock; /* accepted socket */ -} ksock_connreq_t; +}; -extern ksock_nal_data_t ksocknal_data; -extern ksock_tunables_t ksocknal_tunables; +extern struct ksock_nal_data ksocknal_data; +extern struct ksock_tunables ksocknal_tunables; #define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */ #define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */ #define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not * preferred */ -typedef struct ksock_proto { +struct ksock_proto { /* version number of protocol */ int pro_version; /* handshake function */ - int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *); + int (*pro_send_hello)(struct ksock_conn *, ksock_hello_msg_t *); /* handshake function */ - int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int); + int (*pro_recv_hello)(struct ksock_conn *, ksock_hello_msg_t *, int); /* message pack */ - void (*pro_pack)(ksock_tx_t *); + void (*pro_pack)(struct ksock_tx *); /* message unpack */ void (*pro_unpack)(ksock_msg_t *); /* queue tx on the connection */ - ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); + struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *, struct ksock_tx *); /* queue ZC ack on the connection */ - int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64); + int (*pro_queue_tx_zcack)(struct ksock_conn *, struct ksock_tx *, __u64); /* handle ZC request */ - int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int); + int (*pro_handle_zcreq)(struct ksock_conn *, __u64, int); /* handle ZC ACK */ - int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); + int (*pro_handle_zcack)(struct ksock_conn *, __u64, __u64); /* * msg type matches the connection type: @@ -471,12 +468,12 @@ typedef struct ksock_proto { * return MATCH_YES : matching type * return MATCH_MAY : can be backup */ - int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); -} ksock_proto_t; + int (*pro_match_tx)(struct ksock_conn *, struct ksock_tx *, int); +}; -extern ksock_proto_t ksocknal_protocol_v1x; -extern ksock_proto_t ksocknal_protocol_v2x; -extern ksock_proto_t ksocknal_protocol_v3x; +extern struct ksock_proto ksocknal_protocol_v1x; +extern struct ksock_proto ksocknal_protocol_v2x; +extern struct ksock_proto ksocknal_protocol_v3x; #define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR #define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR @@ -517,17 +514,17 @@ ksocknal_nid2peerlist(lnet_nid_t nid) } static inline void -ksocknal_conn_addref(ksock_conn_t *conn) +ksocknal_conn_addref(struct ksock_conn *conn) { LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0); atomic_inc(&conn->ksnc_conn_refcount); } -void ksocknal_queue_zombie_conn(ksock_conn_t *conn); -void ksocknal_finalize_zcreq(ksock_conn_t *conn); +void ksocknal_queue_zombie_conn(struct ksock_conn *conn); +void ksocknal_finalize_zcreq(struct ksock_conn *conn); static inline void -ksocknal_conn_decref(ksock_conn_t *conn) +ksocknal_conn_decref(struct ksock_conn *conn) { LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0); if (atomic_dec_and_test(&conn->ksnc_conn_refcount)) @@ -535,7 +532,7 @@ ksocknal_conn_decref(ksock_conn_t *conn) } static inline int -ksocknal_connsock_addref(ksock_conn_t *conn) +ksocknal_connsock_addref(struct ksock_conn *conn) { int rc = -ESHUTDOWN; @@ -551,7 +548,7 @@ ksocknal_connsock_addref(ksock_conn_t *conn) } static inline void -ksocknal_connsock_decref(ksock_conn_t *conn) +ksocknal_connsock_decref(struct ksock_conn *conn) { LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0); if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) { @@ -563,17 +560,17 @@ ksocknal_connsock_decref(ksock_conn_t *conn) } static inline void -ksocknal_tx_addref(ksock_tx_t *tx) +ksocknal_tx_addref(struct ksock_tx *tx) { LASSERT(atomic_read(&tx->tx_refcount) > 0); atomic_inc(&tx->tx_refcount); } -void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx); -void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx); +void ksocknal_tx_prep(struct ksock_conn *, struct ksock_tx *tx); +void ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx); static inline void -ksocknal_tx_decref(ksock_tx_t *tx) +ksocknal_tx_decref(struct ksock_tx *tx) { LASSERT(atomic_read(&tx->tx_refcount) > 0); if (atomic_dec_and_test(&tx->tx_refcount)) @@ -581,16 +578,16 @@ ksocknal_tx_decref(ksock_tx_t *tx) } static inline void -ksocknal_route_addref(ksock_route_t *route) +ksocknal_route_addref(struct ksock_route *route) { LASSERT(atomic_read(&route->ksnr_refcount) > 0); atomic_inc(&route->ksnr_refcount); } -void ksocknal_destroy_route(ksock_route_t *route); +void ksocknal_destroy_route(struct ksock_route *route); static inline void -ksocknal_route_decref(ksock_route_t *route) +ksocknal_route_decref(struct ksock_route *route) { LASSERT(atomic_read(&route->ksnr_refcount) > 0); if (atomic_dec_and_test(&route->ksnr_refcount)) @@ -598,16 +595,16 @@ ksocknal_route_decref(ksock_route_t *route) } static inline void -ksocknal_peer_addref(ksock_peer_t *peer) +ksocknal_peer_addref(struct ksock_peer *peer) { LASSERT(atomic_read(&peer->ksnp_refcount) > 0); atomic_inc(&peer->ksnp_refcount); } -void ksocknal_destroy_peer(ksock_peer_t *peer); +void ksocknal_destroy_peer(struct ksock_peer *peer); static inline void -ksocknal_peer_decref(ksock_peer_t *peer) +ksocknal_peer_decref(struct ksock_peer *peer) { LASSERT(atomic_read(&peer->ksnp_refcount) > 0); if (atomic_dec_and_test(&peer->ksnp_refcount)) @@ -625,71 +622,71 @@ int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int ksocknal_accept(lnet_ni_t *ni, struct socket *sock); int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port); -ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id); -ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id); -void ksocknal_peer_failed(ksock_peer_t *peer); -int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, +struct ksock_peer *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id); +struct ksock_peer *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id); +void ksocknal_peer_failed(struct ksock_peer *peer); +int ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route, struct socket *sock, int type); -void ksocknal_close_conn_locked(ksock_conn_t *conn, int why); -void ksocknal_terminate_conn(ksock_conn_t *conn); -void ksocknal_destroy_conn(ksock_conn_t *conn); -int ksocknal_close_peer_conns_locked(ksock_peer_t *peer, +void ksocknal_close_conn_locked(struct ksock_conn *conn, int why); +void ksocknal_terminate_conn(struct ksock_conn *conn); +void ksocknal_destroy_conn(struct ksock_conn *conn); +int ksocknal_close_peer_conns_locked(struct ksock_peer *peer, __u32 ipaddr, int why); -int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why); +int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why); int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr); -ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer, - ksock_tx_t *tx, int nonblk); +struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer *peer, + struct ksock_tx *tx, int nonblk); -int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, +int ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx, lnet_process_id_t id); -ksock_tx_t *ksocknal_alloc_tx(int type, int size); -void ksocknal_free_tx(ksock_tx_t *tx); -ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk); -void ksocknal_next_tx_carrier(ksock_conn_t *conn); -void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn); +struct ksock_tx *ksocknal_alloc_tx(int type, int size); +void ksocknal_free_tx(struct ksock_tx *tx); +struct ksock_tx *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk); +void ksocknal_next_tx_carrier(struct ksock_conn *conn); +void ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn); void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error); void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive); void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when); int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name); void ksocknal_thread_fini(void); -void ksocknal_launch_all_connections_locked(ksock_peer_t *peer); -ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer); -ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer); -int ksocknal_new_packet(ksock_conn_t *conn, int skip); +void ksocknal_launch_all_connections_locked(struct ksock_peer *peer); +struct ksock_route *ksocknal_find_connectable_route_locked(struct ksock_peer *peer); +struct ksock_route *ksocknal_find_connecting_route_locked(struct ksock_peer *peer); +int ksocknal_new_packet(struct ksock_conn *conn, int skip); int ksocknal_scheduler(void *arg); int ksocknal_connd(void *arg); int ksocknal_reaper(void *arg); -int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn, +int ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn, lnet_nid_t peer_nid, ksock_hello_msg_t *hello); -int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, +int ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn, ksock_hello_msg_t *hello, lnet_process_id_t *id, __u64 *incarnation); -void ksocknal_read_callback(ksock_conn_t *conn); -void ksocknal_write_callback(ksock_conn_t *conn); - -int ksocknal_lib_zc_capable(ksock_conn_t *conn); -void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn); -void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn); -void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn); -void ksocknal_lib_push_conn(ksock_conn_t *conn); -int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn); +void ksocknal_read_callback(struct ksock_conn *conn); +void ksocknal_write_callback(struct ksock_conn *conn); + +int ksocknal_lib_zc_capable(struct ksock_conn *conn); +void ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn); +void ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn); +void ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn); +void ksocknal_lib_push_conn(struct ksock_conn *conn); +int ksocknal_lib_get_conn_addrs(struct ksock_conn *conn); int ksocknal_lib_setup_sock(struct socket *so); -int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx); -int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx); -void ksocknal_lib_eager_ack(ksock_conn_t *conn); -int ksocknal_lib_recv_iov(ksock_conn_t *conn); -int ksocknal_lib_recv_kiov(ksock_conn_t *conn); -int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, +int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx); +int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx); +void ksocknal_lib_eager_ack(struct ksock_conn *conn); +int ksocknal_lib_recv_iov(struct ksock_conn *conn); +int ksocknal_lib_recv_kiov(struct ksock_conn *conn); +int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle); -void ksocknal_read_callback(ksock_conn_t *conn); -void ksocknal_write_callback(ksock_conn_t *conn); +void ksocknal_read_callback(struct ksock_conn *conn); +void ksocknal_write_callback(struct ksock_conn *conn); int ksocknal_tunables_init(void); -void ksocknal_lib_csum_tx(ksock_tx_t *tx); +void ksocknal_lib_csum_tx(struct ksock_tx *tx); -int ksocknal_lib_memory_pressure(ksock_conn_t *conn); +int ksocknal_lib_memory_pressure(struct ksock_conn *conn); int ksocknal_lib_bind_thread_to_cpu(int id); #endif /* _SOCKLND_SOCKLND_H_ */ diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 976fd78926e0..303576d815c6 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -23,10 +23,10 @@ #include "socklnd.h" -ksock_tx_t * +struct ksock_tx * ksocknal_alloc_tx(int type, int size) { - ksock_tx_t *tx = NULL; + struct ksock_tx *tx = NULL; if (type == KSOCK_MSG_NOOP) { LASSERT(size == KSOCK_NOOP_TX_SIZE); @@ -36,7 +36,7 @@ ksocknal_alloc_tx(int type, int size) if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \ - next, ksock_tx_t, tx_list); + next, struct ksock_tx, tx_list); LASSERT(tx->tx_desc_size == size); list_del(&tx->tx_list); } @@ -61,10 +61,10 @@ ksocknal_alloc_tx(int type, int size) return tx; } -ksock_tx_t * +struct ksock_tx * ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) { - ksock_tx_t *tx; + struct ksock_tx *tx; tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE); if (!tx) { @@ -87,7 +87,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) } void -ksocknal_free_tx(ksock_tx_t *tx) +ksocknal_free_tx(struct ksock_tx *tx) { atomic_dec(&ksocknal_data.ksnd_nactive_txs); @@ -104,7 +104,7 @@ ksocknal_free_tx(ksock_tx_t *tx) } static int -ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx) { struct kvec *iov = tx->tx_iov; int nob; @@ -126,7 +126,7 @@ ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) do { LASSERT(tx->tx_niov > 0); - if (nob < (int) iov->iov_len) { + if (nob < (int)iov->iov_len) { iov->iov_base = (void *)((char *)iov->iov_base + nob); iov->iov_len -= nob; return rc; @@ -141,7 +141,7 @@ ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) } static int -ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx) { lnet_kiov_t *kiov = tx->tx_kiov; int nob; @@ -179,7 +179,7 @@ ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) } static int -ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx) { int rc; int bufnob; @@ -247,7 +247,7 @@ ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx) } static int -ksocknal_recv_iov(ksock_conn_t *conn) +ksocknal_recv_iov(struct ksock_conn *conn) { struct kvec *iov = conn->ksnc_rx_iov; int nob; @@ -294,7 +294,7 @@ ksocknal_recv_iov(ksock_conn_t *conn) } static int -ksocknal_recv_kiov(ksock_conn_t *conn) +ksocknal_recv_kiov(struct ksock_conn *conn) { lnet_kiov_t *kiov = conn->ksnc_rx_kiov; int nob; @@ -326,7 +326,7 @@ ksocknal_recv_kiov(ksock_conn_t *conn) do { LASSERT(conn->ksnc_rx_nkiov > 0); - if (nob < (int) kiov->kiov_len) { + if (nob < (int)kiov->kiov_len) { kiov->kiov_offset += nob; kiov->kiov_len -= nob; return -EAGAIN; @@ -341,7 +341,7 @@ ksocknal_recv_kiov(ksock_conn_t *conn) } static int -ksocknal_receive(ksock_conn_t *conn) +ksocknal_receive(struct ksock_conn *conn) { /* * Return 1 on success, 0 on EOF, < 0 on error. @@ -391,7 +391,7 @@ ksocknal_receive(ksock_conn_t *conn) } void -ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx) +ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx) { lnet_msg_t *lnetmsg = tx->tx_lnetmsg; int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO; @@ -412,10 +412,10 @@ ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx) void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error) { - ksock_tx_t *tx; + struct ksock_tx *tx; while (!list_empty(txlist)) { - tx = list_entry(txlist->next, ksock_tx_t, tx_list); + tx = list_entry(txlist->next, struct ksock_tx, tx_list); if (error && tx->tx_lnetmsg) { CNETERR("Deleting packet type %d len %d %s->%s\n", @@ -435,10 +435,10 @@ ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error) } static void -ksocknal_check_zc_req(ksock_tx_t *tx) +ksocknal_check_zc_req(struct ksock_tx *tx) { - ksock_conn_t *conn = tx->tx_conn; - ksock_peer_t *peer = conn->ksnc_peer; + struct ksock_conn *conn = tx->tx_conn; + struct ksock_peer *peer = conn->ksnc_peer; /* * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx @@ -482,9 +482,9 @@ ksocknal_check_zc_req(ksock_tx_t *tx) } static void -ksocknal_uncheck_zc_req(ksock_tx_t *tx) +ksocknal_uncheck_zc_req(struct ksock_tx *tx) { - ksock_peer_t *peer = tx->tx_conn->ksnc_peer; + struct ksock_peer *peer = tx->tx_conn->ksnc_peer; LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_zc_capable); @@ -508,7 +508,7 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx) } static int -ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx) { int rc; @@ -583,7 +583,7 @@ ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx) } static void -ksocknal_launch_connection_locked(ksock_route_t *route) +ksocknal_launch_connection_locked(struct ksock_route *route) { /* called holding write lock on ksnd_global_lock */ @@ -604,9 +604,9 @@ ksocknal_launch_connection_locked(ksock_route_t *route) } void -ksocknal_launch_all_connections_locked(ksock_peer_t *peer) +ksocknal_launch_all_connections_locked(struct ksock_peer *peer) { - ksock_route_t *route; + struct ksock_route *route; /* called holding write lock on ksnd_global_lock */ for (;;) { @@ -619,18 +619,18 @@ ksocknal_launch_all_connections_locked(ksock_peer_t *peer) } } -ksock_conn_t * -ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) +struct ksock_conn * +ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk) { struct list_head *tmp; - ksock_conn_t *conn; - ksock_conn_t *typed = NULL; - ksock_conn_t *fallback = NULL; + struct ksock_conn *conn; + struct ksock_conn *typed = NULL; + struct ksock_conn *fallback = NULL; int tnob = 0; int fnob = 0; list_for_each(tmp, &peer->ksnp_conns) { - ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); + struct ksock_conn *c = list_entry(tmp, struct ksock_conn, ksnc_list); int nob = atomic_read(&c->ksnc_tx_nob) + c->ksnc_sock->sk->sk_wmem_queued; int rc; @@ -677,7 +677,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) } void -ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx) { conn->ksnc_proto->pro_pack(tx); @@ -687,11 +687,11 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx) } void -ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn) +ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn) { - ksock_sched_t *sched = conn->ksnc_scheduler; + struct ksock_sched *sched = conn->ksnc_scheduler; ksock_msg_t *msg = &tx->tx_msg; - ksock_tx_t *ztx = NULL; + struct ksock_tx *ztx = NULL; int bufnob = 0; /* @@ -784,15 +784,15 @@ ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn) spin_unlock_bh(&sched->kss_lock); } -ksock_route_t * -ksocknal_find_connectable_route_locked(ksock_peer_t *peer) +struct ksock_route * +ksocknal_find_connectable_route_locked(struct ksock_peer *peer) { unsigned long now = cfs_time_current(); struct list_head *tmp; - ksock_route_t *route; + struct ksock_route *route; list_for_each(tmp, &peer->ksnp_routes) { - route = list_entry(tmp, ksock_route_t, ksnr_list); + route = list_entry(tmp, struct ksock_route, ksnr_list); LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); @@ -820,14 +820,14 @@ ksocknal_find_connectable_route_locked(ksock_peer_t *peer) return NULL; } -ksock_route_t * -ksocknal_find_connecting_route_locked(ksock_peer_t *peer) +struct ksock_route * +ksocknal_find_connecting_route_locked(struct ksock_peer *peer) { struct list_head *tmp; - ksock_route_t *route; + struct ksock_route *route; list_for_each(tmp, &peer->ksnp_routes) { - route = list_entry(tmp, ksock_route_t, ksnr_list); + route = list_entry(tmp, struct ksock_route, ksnr_list); LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); @@ -839,10 +839,10 @@ ksocknal_find_connecting_route_locked(ksock_peer_t *peer) } int -ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) +ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx, lnet_process_id_t id) { - ksock_peer_t *peer; - ksock_conn_t *conn; + struct ksock_peer *peer; + struct ksock_conn *conn; rwlock_t *g_lock; int retry; int rc; @@ -942,7 +942,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; unsigned int payload_offset = lntmsg->msg_offset; unsigned int payload_nob = lntmsg->msg_len; - ksock_tx_t *tx; + struct ksock_tx *tx; int desc_size; int rc; @@ -960,10 +960,10 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) LASSERT(!in_interrupt()); if (payload_iov) - desc_size = offsetof(ksock_tx_t, + desc_size = offsetof(struct ksock_tx, tx_frags.virt.iov[1 + payload_niov]); else - desc_size = offsetof(ksock_tx_t, + desc_size = offsetof(struct ksock_tx, tx_frags.paged.kiov[payload_niov]); if (lntmsg->msg_vmflush) @@ -1037,7 +1037,7 @@ ksocknal_thread_fini(void) } int -ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip) +ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip) { static char ksocknal_slop_buffer[4096]; @@ -1120,7 +1120,7 @@ ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip) } static int -ksocknal_process_receive(ksock_conn_t *conn) +ksocknal_process_receive(struct ksock_conn *conn) { lnet_hdr_t *lhdr; lnet_process_id_t *id; @@ -1328,8 +1328,8 @@ ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { - ksock_conn_t *conn = private; - ksock_sched_t *sched = conn->ksnc_scheduler; + struct ksock_conn *conn = private; + struct ksock_sched *sched = conn->ksnc_scheduler; LASSERT(mlen <= rlen); LASSERT(niov <= LNET_MAX_IOV); @@ -1382,7 +1382,7 @@ ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, } static inline int -ksocknal_sched_cansleep(ksock_sched_t *sched) +ksocknal_sched_cansleep(struct ksock_sched *sched) { int rc; @@ -1399,9 +1399,9 @@ ksocknal_sched_cansleep(ksock_sched_t *sched) int ksocknal_scheduler(void *arg) { struct ksock_sched_info *info; - ksock_sched_t *sched; - ksock_conn_t *conn; - ksock_tx_t *tx; + struct ksock_sched *sched; + struct ksock_conn *conn; + struct ksock_tx *tx; int rc; int nloops = 0; long id = (long)arg; @@ -1426,7 +1426,7 @@ int ksocknal_scheduler(void *arg) if (!list_empty(&sched->kss_rx_conns)) { conn = list_entry(sched->kss_rx_conns.next, - ksock_conn_t, ksnc_rx_list); + struct ksock_conn, ksnc_rx_list); list_del(&conn->ksnc_rx_list); LASSERT(conn->ksnc_rx_scheduled); @@ -1481,7 +1481,7 @@ int ksocknal_scheduler(void *arg) } conn = list_entry(sched->kss_tx_conns.next, - ksock_conn_t, ksnc_tx_list); + struct ksock_conn, ksnc_tx_list); list_del(&conn->ksnc_tx_list); LASSERT(conn->ksnc_tx_scheduled); @@ -1489,7 +1489,7 @@ int ksocknal_scheduler(void *arg) LASSERT(!list_empty(&conn->ksnc_tx_queue)); tx = list_entry(conn->ksnc_tx_queue.next, - ksock_tx_t, tx_list); + struct ksock_tx, tx_list); if (conn->ksnc_tx_carrier == tx) ksocknal_next_tx_carrier(conn); @@ -1575,9 +1575,9 @@ int ksocknal_scheduler(void *arg) * Add connection to kss_rx_conns of scheduler * and wakeup the scheduler. */ -void ksocknal_read_callback(ksock_conn_t *conn) +void ksocknal_read_callback(struct ksock_conn *conn) { - ksock_sched_t *sched; + struct ksock_sched *sched; sched = conn->ksnc_scheduler; @@ -1600,9 +1600,9 @@ void ksocknal_read_callback(ksock_conn_t *conn) * Add connection to kss_tx_conns of scheduler * and wakeup the scheduler. */ -void ksocknal_write_callback(ksock_conn_t *conn) +void ksocknal_write_callback(struct ksock_conn *conn) { - ksock_sched_t *sched; + struct ksock_sched *sched; sched = conn->ksnc_scheduler; @@ -1623,7 +1623,7 @@ void ksocknal_write_callback(ksock_conn_t *conn) spin_unlock_bh(&sched->kss_lock); } -static ksock_proto_t * +static struct ksock_proto * ksocknal_parse_proto_version(ksock_hello_msg_t *hello) { __u32 version = 0; @@ -1666,11 +1666,11 @@ ksocknal_parse_proto_version(ksock_hello_msg_t *hello) } int -ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn, +ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn, lnet_nid_t peer_nid, ksock_hello_msg_t *hello) { /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ - ksock_net_t *net = (ksock_net_t *)ni->ni_data; + struct ksock_net *net = (struct ksock_net *)ni->ni_data; LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES); @@ -1704,7 +1704,7 @@ ksocknal_invert_type(int type) } int -ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, +ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn, ksock_hello_msg_t *hello, lnet_process_id_t *peerid, __u64 *incarnation) { @@ -1718,7 +1718,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, int timeout; int proto_match; int rc; - ksock_proto_t *proto; + struct ksock_proto *proto; lnet_process_id_t recv_id; /* socket type set on active connections - not set on passive */ @@ -1847,10 +1847,10 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, } static int -ksocknal_connect(ksock_route_t *route) +ksocknal_connect(struct ksock_route *route) { LIST_HEAD(zombies); - ksock_peer_t *peer = route->ksnr_peer; + struct ksock_peer *peer = route->ksnr_peer; int type; int wanted; struct socket *sock; @@ -1989,7 +1989,7 @@ ksocknal_connect(ksock_route_t *route) if (!list_empty(&peer->ksnp_tx_queue) && !peer->ksnp_accepting && !ksocknal_find_connecting_route_locked(peer)) { - ksock_conn_t *conn; + struct ksock_conn *conn; /* * ksnp_tx_queue is queued on a conn on successful @@ -1997,7 +1997,7 @@ ksocknal_connect(ksock_route_t *route) */ if (!list_empty(&peer->ksnp_conns)) { conn = list_entry(peer->ksnp_conns.next, - ksock_conn_t, ksnc_list); + struct ksock_conn, ksnc_list); LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); } @@ -2131,10 +2131,10 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout) * Go through connd_routes queue looking for a route that we can process * right now, @timeout_p can be updated if we need to come back later */ -static ksock_route_t * +static struct ksock_route * ksocknal_connd_get_route_locked(signed long *timeout_p) { - ksock_route_t *route; + struct ksock_route *route; unsigned long now; now = cfs_time_current(); @@ -2158,7 +2158,7 @@ int ksocknal_connd(void *arg) { spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; - ksock_connreq_t *cr; + struct ksock_connreq *cr; wait_queue_t wait; int nloops = 0; int cons_retry = 0; @@ -2174,7 +2174,7 @@ ksocknal_connd(void *arg) ksocknal_data.ksnd_connd_running++; while (!ksocknal_data.ksnd_shuttingdown) { - ksock_route_t *route = NULL; + struct ksock_route *route = NULL; time64_t sec = ktime_get_real_seconds(); long timeout = MAX_SCHEDULE_TIMEOUT; int dropped_lock = 0; @@ -2192,8 +2192,8 @@ ksocknal_connd(void *arg) if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) { /* Connection accepted by the listener */ - cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \ - next, ksock_connreq_t, ksncr_list); + cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next, + struct ksock_connreq, ksncr_list); list_del(&cr->ksncr_list); spin_unlock_bh(connd_lock); @@ -2267,17 +2267,17 @@ ksocknal_connd(void *arg) return 0; } -static ksock_conn_t * -ksocknal_find_timed_out_conn(ksock_peer_t *peer) +static struct ksock_conn * +ksocknal_find_timed_out_conn(struct ksock_peer *peer) { /* We're called with a shared lock on ksnd_global_lock */ - ksock_conn_t *conn; + struct ksock_conn *conn; struct list_head *ctmp; list_for_each(ctmp, &peer->ksnp_conns) { int error; - conn = list_entry(ctmp, ksock_conn_t, ksnc_list); + conn = list_entry(ctmp, struct ksock_conn, ksnc_list); /* Don't need the {get,put}connsock dance to deref ksnc_sock */ LASSERT(!conn->ksnc_closing); @@ -2351,10 +2351,10 @@ ksocknal_find_timed_out_conn(ksock_peer_t *peer) } static inline void -ksocknal_flush_stale_txs(ksock_peer_t *peer) +ksocknal_flush_stale_txs(struct ksock_peer *peer) { - ksock_tx_t *tx; - ksock_tx_t *tmp; + struct ksock_tx *tx; + struct ksock_tx *tmp; LIST_HEAD(stale_txs); write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -2374,12 +2374,12 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer) } static int -ksocknal_send_keepalive_locked(ksock_peer_t *peer) +ksocknal_send_keepalive_locked(struct ksock_peer *peer) __must_hold(&ksocknal_data.ksnd_global_lock) { - ksock_sched_t *sched; - ksock_conn_t *conn; - ksock_tx_t *tx; + struct ksock_sched *sched; + struct ksock_conn *conn; + struct ksock_tx *tx; if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */ return 0; @@ -2440,9 +2440,9 @@ static void ksocknal_check_peer_timeouts(int idx) { struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; - ksock_peer_t *peer; - ksock_conn_t *conn; - ksock_tx_t *tx; + struct ksock_peer *peer; + struct ksock_conn *conn; + struct ksock_tx *tx; again: /* @@ -2483,8 +2483,8 @@ ksocknal_check_peer_timeouts(int idx) * holding only shared lock */ if (!list_empty(&peer->ksnp_tx_queue)) { - ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next, - ksock_tx_t, tx_list); + struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next, + struct ksock_tx, tx_list); if (cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) { @@ -2518,7 +2518,7 @@ ksocknal_check_peer_timeouts(int idx) } tx = list_entry(peer->ksnp_zc_req_list.next, - ksock_tx_t, tx_zc_list); + struct ksock_tx, tx_zc_list); deadline = tx->tx_deadline; resid = tx->tx_resid; conn = tx->tx_conn; @@ -2544,8 +2544,8 @@ int ksocknal_reaper(void *arg) { wait_queue_t wait; - ksock_conn_t *conn; - ksock_sched_t *sched; + struct ksock_conn *conn; + struct ksock_sched *sched; struct list_head enomem_conns; int nenomem_conns; long timeout; @@ -2563,7 +2563,7 @@ ksocknal_reaper(void *arg) while (!ksocknal_data.ksnd_shuttingdown) { if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) { conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next, - ksock_conn_t, ksnc_list); + struct ksock_conn, ksnc_list); list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2577,7 +2577,7 @@ ksocknal_reaper(void *arg) if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) { conn = list_entry(ksocknal_data.ksnd_zombie_conns.next, - ksock_conn_t, ksnc_list); + struct ksock_conn, ksnc_list); list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2599,7 +2599,7 @@ ksocknal_reaper(void *arg) /* reschedule all the connections that stalled with ENOMEM... */ nenomem_conns = 0; while (!list_empty(&enomem_conns)) { - conn = list_entry(enomem_conns.next, ksock_conn_t, + conn = list_entry(enomem_conns.next, struct ksock_conn, ksnc_tx_list); list_del(&conn->ksnc_tx_list); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c index d4ce06d0aeeb..6a17757fce1e 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -37,7 +33,7 @@ #include "socklnd.h" int -ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) +ksocknal_lib_get_conn_addrs(struct ksock_conn *conn) { int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr, &conn->ksnc_port); @@ -60,7 +56,7 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) } int -ksocknal_lib_zc_capable(ksock_conn_t *conn) +ksocknal_lib_zc_capable(struct ksock_conn *conn) { int caps = conn->ksnc_sock->sk->sk_route_caps; @@ -75,7 +71,7 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn) } int -ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx) { struct socket *sock = conn->ksnc_sock; int nob; @@ -118,7 +114,7 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) } int -ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx) { struct socket *sock = conn->ksnc_sock; lnet_kiov_t *kiov = tx->tx_kiov; @@ -187,7 +183,7 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) } void -ksocknal_lib_eager_ack(ksock_conn_t *conn) +ksocknal_lib_eager_ack(struct ksock_conn *conn) { int opt = 1; struct socket *sock = conn->ksnc_sock; @@ -203,7 +199,7 @@ ksocknal_lib_eager_ack(ksock_conn_t *conn) } int -ksocknal_lib_recv_iov(ksock_conn_t *conn) +ksocknal_lib_recv_iov(struct ksock_conn *conn) { #if SOCKNAL_SINGLE_FRAG_RX struct kvec scratch; @@ -309,7 +305,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, } int -ksocknal_lib_recv_kiov(ksock_conn_t *conn) +ksocknal_lib_recv_kiov(struct ksock_conn *conn) { #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK struct kvec scratch; @@ -393,7 +389,7 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn) } void -ksocknal_lib_csum_tx(ksock_tx_t *tx) +ksocknal_lib_csum_tx(struct ksock_tx *tx) { int i; __u32 csum; @@ -432,7 +428,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx) } int -ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle) +ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle) { struct socket *sock = conn->ksnc_sock; int len; @@ -562,7 +558,7 @@ ksocknal_lib_setup_sock(struct socket *sock) } void -ksocknal_lib_push_conn(ksock_conn_t *conn) +ksocknal_lib_push_conn(struct ksock_conn *conn) { struct sock *sk; struct tcp_sock *tp; @@ -599,7 +595,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn) static void ksocknal_data_ready(struct sock *sk) { - ksock_conn_t *conn; + struct ksock_conn *conn; /* interleave correctly with closing sockets... */ LASSERT(!in_irq()); @@ -619,7 +615,7 @@ ksocknal_data_ready(struct sock *sk) static void ksocknal_write_space(struct sock *sk) { - ksock_conn_t *conn; + struct ksock_conn *conn; int wspace; int min_wpace; @@ -663,23 +659,22 @@ ksocknal_write_space(struct sock *sk) } void -ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn) +ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn) { conn->ksnc_saved_data_ready = sock->sk->sk_data_ready; conn->ksnc_saved_write_space = sock->sk->sk_write_space; } void -ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn) +ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn) { sock->sk->sk_user_data = conn; sock->sk->sk_data_ready = ksocknal_data_ready; sock->sk->sk_write_space = ksocknal_write_space; - return; } void -ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn) +ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn) { /* * Remove conn's network callbacks. @@ -695,15 +690,13 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn) * sk_user_data is NULL. */ sock->sk->sk_user_data = NULL; - - return ; } int -ksocknal_lib_memory_pressure(ksock_conn_t *conn) +ksocknal_lib_memory_pressure(struct ksock_conn *conn) { int rc = 0; - ksock_sched_t *sched; + struct ksock_sched *sched; sched = conn->ksnc_scheduler; spin_lock_bh(&sched->kss_lock); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c index 6329cbe66573..fc7eec83ac07 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c @@ -139,7 +139,7 @@ module_param(protocol, int, 0644); MODULE_PARM_DESC(protocol, "protocol version"); #endif -ksock_tunables_t ksocknal_tunables; +struct ksock_tunables ksocknal_tunables; int ksocknal_tunables_init(void) { diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c index 32cc31e4cc29..82e174f6d9fe 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c @@ -38,8 +38,8 @@ * pro_match_tx() : Called holding glock */ -static ksock_tx_t * -ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg) +static struct ksock_tx * +ksocknal_queue_tx_msg_v1(struct ksock_conn *conn, struct ksock_tx *tx_msg) { /* V1.x, just enqueue it */ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); @@ -47,9 +47,9 @@ ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg) } void -ksocknal_next_tx_carrier(ksock_conn_t *conn) +ksocknal_next_tx_carrier(struct ksock_conn *conn) { - ksock_tx_t *tx = conn->ksnc_tx_carrier; + struct ksock_tx *tx = conn->ksnc_tx_carrier; /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ LASSERT(!list_empty(&conn->ksnc_tx_queue)); @@ -66,10 +66,10 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn) } static int -ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, - ksock_tx_t *tx_ack, __u64 cookie) +ksocknal_queue_tx_zcack_v2(struct ksock_conn *conn, + struct ksock_tx *tx_ack, __u64 cookie) { - ksock_tx_t *tx = conn->ksnc_tx_carrier; + struct ksock_tx *tx = conn->ksnc_tx_carrier; LASSERT(!tx_ack || tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); @@ -112,10 +112,10 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, return 1; } -static ksock_tx_t * -ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg) +static struct ksock_tx * +ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg) { - ksock_tx_t *tx = conn->ksnc_tx_carrier; + struct ksock_tx *tx = conn->ksnc_tx_carrier; /* * Enqueue tx_msg: @@ -149,10 +149,10 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg) } static int -ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, - ksock_tx_t *tx_ack, __u64 cookie) +ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn, + struct ksock_tx *tx_ack, __u64 cookie) { - ksock_tx_t *tx; + struct ksock_tx *tx; if (conn->ksnc_type != SOCKLND_CONN_ACK) return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie); @@ -267,7 +267,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, } static int -ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) +ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk) { int nob; @@ -311,7 +311,7 @@ ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) } static int -ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) +ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk) { int nob; @@ -355,18 +355,18 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) /* (Sink) handle incoming ZC request from sender */ static int -ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) +ksocknal_handle_zcreq(struct ksock_conn *c, __u64 cookie, int remote) { - ksock_peer_t *peer = c->ksnc_peer; - ksock_conn_t *conn; - ksock_tx_t *tx; + struct ksock_peer *peer = c->ksnc_peer; + struct ksock_conn *conn; + struct ksock_tx *tx; int rc; read_lock(&ksocknal_data.ksnd_global_lock); conn = ksocknal_find_conn_locked(peer, NULL, !!remote); if (conn) { - ksock_sched_t *sched = conn->ksnc_scheduler; + struct ksock_sched *sched = conn->ksnc_scheduler; LASSERT(conn->ksnc_proto->pro_queue_tx_zcack); @@ -399,12 +399,12 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) /* (Sender) handle ZC_ACK from sink */ static int -ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) +ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2) { - ksock_peer_t *peer = conn->ksnc_peer; - ksock_tx_t *tx; - ksock_tx_t *temp; - ksock_tx_t *tmp; + struct ksock_peer *peer = conn->ksnc_peer; + struct ksock_tx *tx; + struct ksock_tx *temp; + struct ksock_tx *tmp; LIST_HEAD(zlist); int count; @@ -446,7 +446,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) } static int -ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) +ksocknal_send_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello) { struct socket *sock = conn->ksnc_sock; lnet_hdr_t *hdr; @@ -503,7 +503,7 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) if (!hello->kshm_nips) goto out; - for (i = 0; i < (int) hello->kshm_nips; i++) + for (i = 0; i < (int)hello->kshm_nips; i++) hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]); rc = lnet_sock_write(sock, hello->kshm_ips, @@ -521,7 +521,7 @@ out: } static int -ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) +ksocknal_send_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello) { struct socket *sock = conn->ksnc_sock; int rc; @@ -563,7 +563,7 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) } static int -ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, +ksocknal_recv_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout) { struct socket *sock = conn->ksnc_sock; @@ -622,7 +622,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, goto out; } - for (i = 0; i < (int) hello->kshm_nips; i++) { + for (i = 0; i < (int)hello->kshm_nips; i++) { hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]); if (!hello->kshm_ips[i]) { @@ -639,7 +639,7 @@ out: } static int -ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) +ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout) { struct socket *sock = conn->ksnc_sock; int rc; @@ -690,7 +690,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout return rc; } - for (i = 0; i < (int) hello->kshm_nips; i++) { + for (i = 0; i < (int)hello->kshm_nips; i++) { if (conn->ksnc_flip) __swab32s(&hello->kshm_ips[i]); @@ -705,7 +705,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout } static void -ksocknal_pack_msg_v1(ksock_tx_t *tx) +ksocknal_pack_msg_v1(struct ksock_tx *tx) { /* V1.x has no KSOCK_MSG_NOOP */ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); @@ -719,7 +719,7 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx) } static void -ksocknal_pack_msg_v2(ksock_tx_t *tx) +ksocknal_pack_msg_v2(struct ksock_tx *tx) { tx->tx_iov[0].iov_base = &tx->tx_msg; @@ -755,7 +755,7 @@ ksocknal_unpack_msg_v2(ksock_msg_t *msg) return; /* Do nothing */ } -ksock_proto_t ksocknal_protocol_v1x = { +struct ksock_proto ksocknal_protocol_v1x = { .pro_version = KSOCK_PROTO_V1, .pro_send_hello = ksocknal_send_hello_v1, .pro_recv_hello = ksocknal_recv_hello_v1, @@ -768,7 +768,7 @@ ksock_proto_t ksocknal_protocol_v1x = { .pro_match_tx = ksocknal_match_tx }; -ksock_proto_t ksocknal_protocol_v2x = { +struct ksock_proto ksocknal_protocol_v2x = { .pro_version = KSOCK_PROTO_V2, .pro_send_hello = ksocknal_send_hello_v2, .pro_recv_hello = ksocknal_recv_hello_v2, @@ -781,7 +781,7 @@ ksock_proto_t ksocknal_protocol_v2x = { .pro_match_tx = ksocknal_match_tx }; -ksock_proto_t ksocknal_protocol_v3x = { +struct ksock_proto ksocknal_protocol_v3x = { .pro_version = KSOCK_PROTO_V3, .pro_send_hello = ksocknal_send_hello_v2, .pro_recv_hello = ksocknal_recv_hello_v2, diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c index c3d628bac5b8..42b15a769183 100644 --- a/drivers/staging/lustre/lnet/libcfs/debug.c +++ b/drivers/staging/lustre/lnet/libcfs/debug.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -232,130 +228,24 @@ int libcfs_panic_in_progress; static const char * libcfs_debug_subsys2str(int subsys) { - switch (1 << subsys) { - default: + static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES; + + if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems)) return NULL; - case S_UNDEFINED: - return "undefined"; - case S_MDC: - return "mdc"; - case S_MDS: - return "mds"; - case S_OSC: - return "osc"; - case S_OST: - return "ost"; - case S_CLASS: - return "class"; - case S_LOG: - return "log"; - case S_LLITE: - return "llite"; - case S_RPC: - return "rpc"; - case S_LNET: - return "lnet"; - case S_LND: - return "lnd"; - case S_PINGER: - return "pinger"; - case S_FILTER: - return "filter"; - case S_ECHO: - return "echo"; - case S_LDLM: - return "ldlm"; - case S_LOV: - return "lov"; - case S_LQUOTA: - return "lquota"; - case S_OSD: - return "osd"; - case S_LFSCK: - return "lfsck"; - case S_LMV: - return "lmv"; - case S_SEC: - return "sec"; - case S_GSS: - return "gss"; - case S_MGC: - return "mgc"; - case S_MGS: - return "mgs"; - case S_FID: - return "fid"; - case S_FLD: - return "fld"; - } + + return libcfs_debug_subsystems[subsys]; } /* libcfs_debug_token2mask() expects the returned string in lower-case */ static const char * libcfs_debug_dbg2str(int debug) { - switch (1 << debug) { - default: + static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES; + + if (debug >= ARRAY_SIZE(libcfs_debug_masks)) return NULL; - case D_TRACE: - return "trace"; - case D_INODE: - return "inode"; - case D_SUPER: - return "super"; - case D_EXT2: - return "ext2"; - case D_MALLOC: - return "malloc"; - case D_CACHE: - return "cache"; - case D_INFO: - return "info"; - case D_IOCTL: - return "ioctl"; - case D_NETERROR: - return "neterror"; - case D_NET: - return "net"; - case D_WARNING: - return "warning"; - case D_BUFFS: - return "buffs"; - case D_OTHER: - return "other"; - case D_DENTRY: - return "dentry"; - case D_NETTRACE: - return "nettrace"; - case D_PAGE: - return "page"; - case D_DLMTRACE: - return "dlmtrace"; - case D_ERROR: - return "error"; - case D_EMERG: - return "emerg"; - case D_HA: - return "ha"; - case D_RPCTRACE: - return "rpctrace"; - case D_VFSTRACE: - return "vfstrace"; - case D_READA: - return "reada"; - case D_MMAP: - return "mmap"; - case D_CONFIG: - return "config"; - case D_CONSOLE: - return "console"; - case D_QUOTA: - return "quota"; - case D_SEC: - return "sec"; - case D_LFSCK: - return "lfsck"; - } + + return libcfs_debug_masks[debug]; } int @@ -472,12 +362,12 @@ void libcfs_debug_dumplog(void) * get to schedule() */ init_waitqueue_entry(&wait, current); - set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&debug_ctlwq, &wait); dumper = kthread_run(libcfs_debug_dumplog_thread, (void *)(long)current_pid(), "libcfs_debug_dumper"); + set_current_state(TASK_INTERRUPTIBLE); if (IS_ERR(dumper)) pr_err("LustreError: cannot start log dump thread: %ld\n", PTR_ERR(dumper)); diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c index dadaf7685cbd..9288ee08d1f7 100644 --- a/drivers/staging/lustre/lnet/libcfs/fail.c +++ b/drivers/staging/lustre/lnet/libcfs/fail.c @@ -16,10 +16,6 @@ * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see http://www.gnu.org/licenses * - * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores, - * CA 94065 USA or visit www.oracle.com if you need additional information or - * have any questions. - * * GPL HEADER END */ /* @@ -41,6 +37,9 @@ EXPORT_SYMBOL(cfs_fail_loc); unsigned int cfs_fail_val; EXPORT_SYMBOL(cfs_fail_val); +int cfs_fail_err; +EXPORT_SYMBOL(cfs_fail_err); + DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq); EXPORT_SYMBOL(cfs_race_waitq); diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c index f60feb3a3dc7..23283b6e09ab 100644 --- a/drivers/staging/lustre/lnet/libcfs/hash.c +++ b/drivers/staging/lustre/lnet/libcfs/hash.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -942,10 +938,10 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, * @flags - CFS_HASH_REHASH enable synamic hash resizing * - CFS_HASH_SORT enable chained hash sort */ -static int cfs_hash_rehash_worker(cfs_workitem_t *wi); +static int cfs_hash_rehash_worker(struct cfs_workitem *wi); #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 -static int cfs_hash_dep_print(cfs_workitem_t *wi) +static int cfs_hash_dep_print(struct cfs_workitem *wi) { struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi); int dep; @@ -1847,7 +1843,7 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) } static int -cfs_hash_rehash_worker(cfs_workitem_t *wi) +cfs_hash_rehash_worker(struct cfs_workitem *wi) { struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi); struct cfs_hash_bucket **bkts; diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c index 2de9eeae0232..83543f928279 100644 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c @@ -49,7 +49,8 @@ EXPORT_SYMBOL(cfs_percpt_lock_free); * reason we always allocate cacheline-aligned memory block. */ struct cfs_percpt_lock * -cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) +cfs_percpt_lock_create(struct cfs_cpt_table *cptab, + struct lock_class_key *keys) { struct cfs_percpt_lock *pcl; spinlock_t *lock; @@ -67,12 +68,18 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) return NULL; } - cfs_percpt_for_each(lock, i, pcl->pcl_locks) + if (!keys) + CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n"); + + cfs_percpt_for_each(lock, i, pcl->pcl_locks) { spin_lock_init(lock); + if (keys != NULL) + lockdep_set_class(lock, &keys[i]); + } return pcl; } -EXPORT_SYMBOL(cfs_percpt_lock_alloc); +EXPORT_SYMBOL(cfs_percpt_lock_create); /** * lock a CPU partition @@ -142,44 +149,3 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index) } } EXPORT_SYMBOL(cfs_percpt_unlock); - -/** free cpu-partition refcount */ -void -cfs_percpt_atomic_free(atomic_t **refs) -{ - cfs_percpt_free(refs); -} -EXPORT_SYMBOL(cfs_percpt_atomic_free); - -/** allocate cpu-partition refcount with initial value @init_val */ -atomic_t ** -cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val) -{ - atomic_t **refs; - atomic_t *ref; - int i; - - refs = cfs_percpt_alloc(cptab, sizeof(*ref)); - if (!refs) - return NULL; - - cfs_percpt_for_each(ref, i, refs) - atomic_set(ref, init_val); - return refs; -} -EXPORT_SYMBOL(cfs_percpt_atomic_alloc); - -/** return sum of cpu-partition refs */ -int -cfs_percpt_atomic_summary(atomic_t **refs) -{ - atomic_t *ref; - int i; - int val = 0; - - cfs_percpt_for_each(ref, i, refs) - val += atomic_read(ref); - - return val; -} -EXPORT_SYMBOL(cfs_percpt_atomic_summary); diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c index c5a6951516ed..d0e81bb41cdc 100644 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c @@ -115,34 +115,6 @@ cfs_percpt_number(void *vars) EXPORT_SYMBOL(cfs_percpt_number); /* - * return memory block shadowed from current CPU - */ -void * -cfs_percpt_current(void *vars) -{ - struct cfs_var_array *arr; - int cpt; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - cpt = cfs_cpt_current(arr->va_cptab, 0); - if (cpt < 0) - return NULL; - - return arr->va_ptrs[cpt]; -} - -void * -cfs_percpt_index(void *vars, int idx) -{ - struct cfs_var_array *arr; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - LASSERT(idx >= 0 && idx < arr->va_count); - return arr->va_ptrs[idx]; -} - -/* * free variable array, see more detail in cfs_array_alloc */ void diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c index 50ac1536db4b..fc697cdfcdaf 100644 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c index 389fb9eeea75..b52518c54efe 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c @@ -755,8 +755,13 @@ cfs_cpt_table_create(int ncpt) struct cfs_cpu_partition *part; int n; - if (cpt >= ncpt) - goto failed; + /* + * Each emulated NUMA node has all allowed CPUs in + * the mask. + * End loop when all partitions have assigned CPUs. + */ + if (cpt == ncpt) + break; part = &cptab->ctb_parts[cpt]; diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c index 8c9377ed850c..5c0116ade909 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c @@ -30,13 +30,34 @@ #include <crypto/hash.h> #include <linux/scatterlist.h> #include "../../../include/linux/libcfs/libcfs.h" +#include "../../../include/linux/libcfs/libcfs_crypto.h" #include "linux-crypto.h" + /** - * Array of hash algorithm speed in MByte per second + * Array of hash algorithm speed in MByte per second */ static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX]; -static int cfs_crypto_hash_alloc(unsigned char alg_id, +/** + * Initialize the state descriptor for the specified hash algorithm. + * + * An internal routine to allocate the hash-specific state in \a hdesc for + * use with cfs_crypto_hash_digest() to compute the hash of a single message, + * though possibly in multiple chunks. The descriptor internal state should + * be freed with cfs_crypto_hash_final(). + * + * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) + * \param[out] type pointer to the hash description in hash_types[] + * array + * \param[in,out] hdesc hash state descriptor to be initialized + * \param[in] key initial hash value/state, NULL to use default + * value + * \param[in] key_len length of \a key + * + * \retval 0 on success + * \retval negative errno on failure + */ +static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg, const struct cfs_crypto_hash_type **type, struct ahash_request **req, unsigned char *key, @@ -45,11 +66,11 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id, struct crypto_ahash *tfm; int err = 0; - *type = cfs_crypto_hash_type(alg_id); + *type = cfs_crypto_hash_type(hash_alg); if (!*type) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", - alg_id, CFS_HASH_ALG_MAX); + hash_alg, CFS_HASH_ALG_MAX); return -EINVAL; } tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC); @@ -70,12 +91,6 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id, ahash_request_set_callback(*req, 0, NULL, NULL); - /** Shash have different logic for initialization then digest - * shash: crypto_hash_setkey, crypto_hash_init - * digest: crypto_digest_init, crypto_digest_setkey - * Skip this function for digest, because we use shash logic at - * cfs_crypto_hash_alloc. - */ if (key) err = crypto_ahash_setkey(tfm, key, key_len); else if ((*type)->cht_key != 0) @@ -84,13 +99,14 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id, (*type)->cht_size); if (err != 0) { + ahash_request_free(*req); crypto_free_ahash(tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm), - cfs_crypto_hash_speeds[alg_id]); + cfs_crypto_hash_speeds[hash_alg]); err = crypto_ahash_init(*req); if (err) { @@ -100,7 +116,33 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id, return err; } -int cfs_crypto_hash_digest(unsigned char alg_id, +/** + * Calculate hash digest for the passed buffer. + * + * This should be used when computing the hash on a single contiguous buffer. + * It combines the hash initialization, computation, and cleanup. + * + * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*) + * \param[in] buf data buffer on which to compute hash + * \param[in] buf_len length of \a buf in bytes + * \param[in] key initial value/state for algorithm, + * if \a key = NULL use default initial value + * \param[in] key_len length of \a key in bytes + * \param[out] hash pointer to computed hash value, + * if \a hash = NULL then \a hash_len is to digest + * size in bytes, retval -ENOSPC + * \param[in,out] hash_len size of \a hash buffer + * + * \retval -EINVAL \a buf, \a buf_len, \a hash_len, + * \a hash_alg invalid + * \retval -ENOENT \a hash_alg is unsupported + * \retval -ENOSPC \a hash is NULL, or \a hash_len less than + * digest size + * \retval 0 for success + * \retval negative errno for other errors from lower + * layers. + */ +int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg, const void *buf, unsigned int buf_len, unsigned char *key, unsigned int key_len, unsigned char *hash, unsigned int *hash_len) @@ -113,7 +155,7 @@ int cfs_crypto_hash_digest(unsigned char alg_id, if (!buf || buf_len == 0 || !hash_len) return -EINVAL; - err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len); + err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len); if (err != 0) return err; @@ -134,15 +176,32 @@ int cfs_crypto_hash_digest(unsigned char alg_id, } EXPORT_SYMBOL(cfs_crypto_hash_digest); +/** + * Allocate and initialize desriptor for hash algorithm. + * + * This should be used to initialize a hash descriptor for multiple calls + * to a single hash function when computing the hash across multiple + * separate buffers or pages using cfs_crypto_hash_update{,_page}(). + * + * The hash descriptor should be freed with cfs_crypto_hash_final(). + * + * \param[in] hash_alg algorithm id (CFS_HASH_ALG_*) + * \param[in] key initial value/state for algorithm, if \a key = NULL + * use default initial value + * \param[in] key_len length of \a key in bytes + * + * \retval pointer to descriptor of hash instance + * \retval ERR_PTR(errno) in case of error + */ struct cfs_crypto_hash_desc * - cfs_crypto_hash_init(unsigned char alg_id, - unsigned char *key, unsigned int key_len) +cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg, + unsigned char *key, unsigned int key_len) { struct ahash_request *req; int err; const struct cfs_crypto_hash_type *type; - err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len); + err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len); if (err) return ERR_PTR(err); @@ -150,6 +209,17 @@ struct cfs_crypto_hash_desc * } EXPORT_SYMBOL(cfs_crypto_hash_init); +/** + * Update hash digest computed on data within the given \a page + * + * \param[in] hdesc hash state descriptor + * \param[in] page data page on which to compute the hash + * \param[in] offset offset within \a page at which to start hash + * \param[in] len length of data on which to compute hash + * + * \retval 0 for success + * \retval negative errno on failure + */ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc, struct page *page, unsigned int offset, unsigned int len) @@ -158,13 +228,23 @@ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc, struct scatterlist sl; sg_init_table(&sl, 1); - sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK); + sg_set_page(&sl, page, len, offset & ~PAGE_MASK); ahash_request_set_crypt(req, &sl, NULL, sl.length); return crypto_ahash_update(req); } EXPORT_SYMBOL(cfs_crypto_hash_update_page); +/** + * Update hash digest computed on the specified data + * + * \param[in] hdesc hash state descriptor + * \param[in] buf data buffer on which to compute the hash + * \param[in] buf_len length of \buf on which to compute hash + * + * \retval 0 for success + * \retval negative errno on failure + */ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc, const void *buf, unsigned int buf_len) { @@ -178,7 +258,18 @@ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc, } EXPORT_SYMBOL(cfs_crypto_hash_update); -/* If hash_len pointer is NULL - destroy descriptor. */ +/** + * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor + * + * \param[in] hdesc hash descriptor + * \param[out] hash pointer to hash buffer to store hash digest + * \param[in,out] hash_len pointer to hash buffer size, if \a hdesc = NULL + * only free \a hdesc instead of computing the hash + * + * \retval 0 for success + * \retval -EOVERFLOW if hash_len is too small for the hash digest + * \retval negative errno for other errors from lower layers + */ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, unsigned char *hash, unsigned int *hash_len) { @@ -186,99 +277,153 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, struct ahash_request *req = (void *)hdesc; int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); - if (!hash_len) { - crypto_free_ahash(crypto_ahash_reqtfm(req)); - ahash_request_free(req); - return 0; + if (!hash || !hash_len) { + err = 0; + goto free_ahash; } - if (!hash || *hash_len < size) { - *hash_len = size; - return -ENOSPC; + if (*hash_len < size) { + err = -EOVERFLOW; + goto free_ahash; } + ahash_request_set_crypt(req, NULL, hash, 0); err = crypto_ahash_final(req); - - if (err < 0) { - /* May be caller can fix error */ - return err; - } + if (!err) + *hash_len = size; +free_ahash: crypto_free_ahash(crypto_ahash_reqtfm(req)); ahash_request_free(req); return err; } EXPORT_SYMBOL(cfs_crypto_hash_final); -static void cfs_crypto_performance_test(unsigned char alg_id, - const unsigned char *buf, - unsigned int buf_len) +/** + * Compute the speed of specified hash function + * + * Run a speed test on the given hash algorithm on buffer of the given size. + * The speed is stored internally in the cfs_crypto_hash_speeds[] array, and + * is available through the cfs_crypto_hash_speed() function. + * + * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) + * \param[in] buf data buffer on which to compute the hash + * \param[in] buf_len length of \buf on which to compute hash + */ +static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg) { + int buf_len = max(PAGE_SIZE, 1048576UL); + void *buf; unsigned long start, end; int bcount, err = 0; - int sec = 1; /* do test only 1 sec */ - unsigned char hash[64]; - unsigned int hash_len = 64; - - for (start = jiffies, end = start + sec * HZ, bcount = 0; - time_before(jiffies, end); bcount++) { - err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0, - hash, &hash_len); + struct page *page; + unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX]; + unsigned int hash_len = sizeof(hash); + + page = alloc_page(GFP_KERNEL); + if (!page) { + err = -ENOMEM; + goto out_err; + } + + buf = kmap(page); + memset(buf, 0xAD, PAGE_SIZE); + kunmap(page); + + for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC), + bcount = 0; time_before(jiffies, end); bcount++) { + struct cfs_crypto_hash_desc *hdesc; + int i; + + hdesc = cfs_crypto_hash_init(hash_alg, NULL, 0); + if (IS_ERR(hdesc)) { + err = PTR_ERR(hdesc); + break; + } + + for (i = 0; i < buf_len / PAGE_SIZE; i++) { + err = cfs_crypto_hash_update_page(hdesc, page, 0, + PAGE_SIZE); + if (err) + break; + } + + err = cfs_crypto_hash_final(hdesc, hash, &hash_len); if (err) break; } end = jiffies; - + __free_page(page); +out_err: if (err) { - cfs_crypto_hash_speeds[alg_id] = -1; - CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n", - cfs_crypto_hash_name(alg_id), err); + cfs_crypto_hash_speeds[hash_alg] = err; + CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n", + cfs_crypto_hash_name(hash_alg), err); } else { unsigned long tmp; tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) * 1000) / (1024 * 1024); - cfs_crypto_hash_speeds[alg_id] = (int)tmp; + cfs_crypto_hash_speeds[hash_alg] = (int)tmp; + CDEBUG(D_CONFIG, "Crypto hash algorithm %s speed = %d MB/s\n", + cfs_crypto_hash_name(hash_alg), + cfs_crypto_hash_speeds[hash_alg]); } - CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n", - cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]); } -int cfs_crypto_hash_speed(unsigned char hash_alg) +/** + * hash speed in Mbytes per second for valid hash algorithm + * + * Return the performance of the specified \a hash_alg that was previously + * computed using cfs_crypto_performance_test(). + * + * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) + * + * \retval positive speed of the hash function in MB/s + * \retval -ENOENT if \a hash_alg is unsupported + * \retval negative errno if \a hash_alg speed is unavailable + */ +int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg) { if (hash_alg < CFS_HASH_ALG_MAX) return cfs_crypto_hash_speeds[hash_alg]; - return -1; + return -ENOENT; } EXPORT_SYMBOL(cfs_crypto_hash_speed); /** - * Do performance test for all hash algorithms. + * Run the performance test for all hash algorithms. + * + * Run the cfs_crypto_performance_test() benchmark for all of the available + * hash functions using a 1MB buffer size. This is a reasonable buffer size + * for Lustre RPCs, even if the actual RPC size is larger or smaller. + * + * Since the setup cost and computation speed of various hash algorithms is + * a function of the buffer size (and possibly internal contention of offload + * engines), this speed only represents an estimate of the actual speed under + * actual usage, but is reasonable for comparing available algorithms. + * + * The actual speeds are available via cfs_crypto_hash_speed() for later + * comparison. + * + * \retval 0 on success + * \retval -ENOMEM if no memory is available for test buffer */ static int cfs_crypto_test_hashes(void) { - unsigned char i; - unsigned char *data; - unsigned int j; - /* Data block size for testing hash. Maximum - * kmalloc size for 2.6.18 kernel is 128K - */ - unsigned int data_len = 1 * 128 * 1024; - - data = kmalloc(data_len, 0); - if (!data) - return -ENOMEM; + enum cfs_crypto_hash_alg hash_alg; - for (j = 0; j < data_len; j++) - data[j] = j & 0xff; + for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++) + cfs_crypto_performance_test(hash_alg); - for (i = 0; i < CFS_HASH_ALG_MAX; i++) - cfs_crypto_performance_test(i, data, data_len); - - kfree(data); return 0; } static int adler32; +/** + * Register available hash functions + * + * \retval 0 + */ int cfs_crypto_register(void) { request_module("crc32c"); @@ -290,6 +435,9 @@ int cfs_crypto_register(void) return 0; } +/** + * Unregister previously registered hash functions + */ void cfs_crypto_unregister(void) { if (adler32 == 0) diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c index 13d31e8a931d..3e22cad18a8b 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c index 638e4b33d3a9..435b784c52f8 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c index 86f32ffc5d04..a6a76a681ea9 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c @@ -11,7 +11,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * http://www.gnu.org/licenses/gpl-2.0.html * */ /* diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c index ebc60ac9bb7a..38308f8b6aae 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -40,10 +36,75 @@ #define LNET_MINOR 240 +static inline size_t libcfs_ioctl_packlen(struct libcfs_ioctl_data *data) +{ + size_t len = sizeof(*data); + + len += cfs_size_round(data->ioc_inllen1); + len += cfs_size_round(data->ioc_inllen2); + return len; +} + +static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) +{ + if (data->ioc_hdr.ioc_len > BIT(30)) { + CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n"); + return true; + } + if (data->ioc_inllen1 > BIT(30)) { + CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n"); + return true; + } + if (data->ioc_inllen2 > BIT(30)) { + CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n"); + return true; + } + if (data->ioc_inlbuf1 && !data->ioc_inllen1) { + CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n"); + return true; + } + if (data->ioc_inlbuf2 && !data->ioc_inllen2) { + CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n"); + return true; + } + if (data->ioc_pbuf1 && !data->ioc_plen1) { + CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n"); + return true; + } + if (data->ioc_pbuf2 && !data->ioc_plen2) { + CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n"); + return true; + } + if (data->ioc_plen1 && !data->ioc_pbuf1) { + CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n"); + return true; + } + if (data->ioc_plen2 && !data->ioc_pbuf2) { + CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n"); + return true; + } + if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) { + CERROR("LIBCFS ioctl: packlen != ioc_len\n"); + return true; + } + if (data->ioc_inllen1 && + data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') { + CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n"); + return true; + } + if (data->ioc_inllen2 && + data->ioc_bulk[cfs_size_round(data->ioc_inllen1) + + data->ioc_inllen2 - 1] != '\0') { + CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n"); + return true; + } + return false; +} + int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data) { if (libcfs_ioctl_is_invalid(data)) { - CERROR("LNET: ioctl not correctly formatted\n"); + CERROR("libcfs ioctl: parameter not correctly formatted\n"); return -EINVAL; } @@ -57,68 +118,47 @@ int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data) return 0; } -int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg, - __u32 *len) +int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp, + const struct libcfs_ioctl_hdr __user *uhdr) { struct libcfs_ioctl_hdr hdr; + int err = 0; - if (copy_from_user(&hdr, arg, sizeof(hdr))) + if (copy_from_user(&hdr, uhdr, sizeof(hdr))) return -EFAULT; if (hdr.ioc_version != LIBCFS_IOCTL_VERSION && hdr.ioc_version != LIBCFS_IOCTL_VERSION2) { - CERROR("LNET: version mismatch expected %#x, got %#x\n", + CERROR("libcfs ioctl: version mismatch expected %#x, got %#x\n", LIBCFS_IOCTL_VERSION, hdr.ioc_version); return -EINVAL; } - *len = hdr.ioc_len; - - return 0; -} - -int libcfs_ioctl_popdata(void __user *arg, void *data, int size) -{ - if (copy_to_user(arg, data, size)) - return -EFAULT; - return 0; -} - -static int -libcfs_psdev_open(struct inode *inode, struct file *file) -{ - int rc = 0; + if (hdr.ioc_len < sizeof(struct libcfs_ioctl_data)) { + CERROR("libcfs ioctl: user buffer too small for ioctl\n"); + return -EINVAL; + } - if (!inode) + if (hdr.ioc_len > LIBCFS_IOC_DATA_MAX) { + CERROR("libcfs ioctl: user buffer is too large %d/%d\n", + hdr.ioc_len, LIBCFS_IOC_DATA_MAX); return -EINVAL; - if (libcfs_psdev_ops.p_open) - rc = libcfs_psdev_ops.p_open(0, NULL); - else - return -EPERM; - return rc; -} + } -/* called when closing /dev/device */ -static int -libcfs_psdev_release(struct inode *inode, struct file *file) -{ - int rc = 0; + LIBCFS_ALLOC(*hdr_pp, hdr.ioc_len); + if (!*hdr_pp) + return -ENOMEM; - if (!inode) - return -EINVAL; - if (libcfs_psdev_ops.p_close) - rc = libcfs_psdev_ops.p_close(0, NULL); - else - rc = -EPERM; - return rc; + if (copy_from_user(*hdr_pp, uhdr, hdr.ioc_len)) { + LIBCFS_FREE(*hdr_pp, hdr.ioc_len); + err = -EFAULT; + } + return err; } -static long libcfs_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) +static long +libcfs_psdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct cfs_psdev_file pfile; - int rc = 0; - if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -130,26 +170,12 @@ static long libcfs_ioctl(struct file *file, return -EINVAL; } - /* Handle platform-dependent IOC requests */ - switch (cmd) { - case IOC_LIBCFS_PANIC: - if (!capable(CFS_CAP_SYS_BOOT)) - return -EPERM; - panic("debugctl-invoked panic"); - return 0; - } - - if (libcfs_psdev_ops.p_ioctl) - rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void __user *)arg); - else - rc = -EPERM; - return rc; + return libcfs_ioctl(cmd, (void __user *)arg); } static const struct file_operations libcfs_fops = { - .unlocked_ioctl = libcfs_ioctl, - .open = libcfs_psdev_open, - .release = libcfs_psdev_release, + .owner = THIS_MODULE, + .unlocked_ioctl = libcfs_psdev_ioctl, }; struct miscdevice libcfs_dev = { diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c index 89084460231a..291d286eab48 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -46,30 +42,6 @@ #include <linux/kgdb.h> #endif -/** - * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively - * waiting threads, which is not always desirable because all threads will - * be waken up again and again, even user only needs a few of them to be - * active most time. This is not good for performance because cache can - * be polluted by different threads. - * - * LIFO list can resolve this problem because we always wakeup the most - * recent active thread by default. - * - * NB: please don't call non-exclusive & exclusive wait on the same - * waitq if add_wait_queue_exclusive_head is used. - */ -void -add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link) -{ - unsigned long flags; - - spin_lock_irqsave(&waitq->lock, flags); - __add_wait_queue_exclusive(waitq, link); - spin_unlock_irqrestore(&waitq->lock, flags); -} -EXPORT_SYMBOL(add_wait_queue_exclusive_head); - sigset_t cfs_block_allsigs(void) { @@ -128,13 +100,6 @@ cfs_restore_sigs(sigset_t old) } EXPORT_SYMBOL(cfs_restore_sigs); -int -cfs_signal_pending(void) -{ - return signal_pending(current); -} -EXPORT_SYMBOL(cfs_signal_pending); - void cfs_clear_sigpending(void) { diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c index 91c2ae8f9d67..8b551d2708ba 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c index cdc640bfdba8..86b4d25cad46 100644 --- a/drivers/staging/lustre/lnet/libcfs/module.c +++ b/drivers/staging/lustre/lnet/libcfs/module.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -54,9 +50,6 @@ # define DEBUG_SUBSYSTEM S_LNET -#define LNET_MAX_IOCTL_BUF_LEN (sizeof(struct lnet_ioctl_net_config) + \ - sizeof(struct lnet_ioctl_config_data)) - #include "../../include/linux/libcfs/libcfs.h" #include <asm/div64.h> @@ -68,20 +61,6 @@ static struct dentry *lnet_debugfs_root; -/* called when opening /dev/device */ -static int libcfs_psdev_open(unsigned long flags, void *args) -{ - try_module_get(THIS_MODULE); - return 0; -} - -/* called when closing /dev/device */ -static int libcfs_psdev_release(unsigned long flags, void *args) -{ - module_put(THIS_MODULE); - return 0; -} - static DECLARE_RWSEM(ioctl_list_sem); static LIST_HEAD(ioctl_list); @@ -115,39 +94,47 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand) } EXPORT_SYMBOL(libcfs_deregister_ioctl); -static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd, - void __user *arg, struct libcfs_ioctl_hdr *hdr) +int libcfs_ioctl(unsigned long cmd, void __user *uparam) { struct libcfs_ioctl_data *data = NULL; - int err = -EINVAL; + struct libcfs_ioctl_hdr *hdr; + int err; + + /* 'cmd' and permissions get checked in our arch-specific caller */ + err = libcfs_ioctl_getdata(&hdr, uparam); + if (err) { + CDEBUG_LIMIT(D_ERROR, + "libcfs ioctl: data header error %d\n", err); + return err; + } - /* - * The libcfs_ioctl_data_adjust() function performs adjustment - * operations on the libcfs_ioctl_data structure to make - * it usable by the code. This doesn't need to be called - * for new data structures added. - */ if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) { + /* + * The libcfs_ioctl_data_adjust() function performs adjustment + * operations on the libcfs_ioctl_data structure to make + * it usable by the code. This doesn't need to be called + * for new data structures added. + */ data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr); err = libcfs_ioctl_data_adjust(data); if (err) - return err; + goto out; } + CDEBUG(D_IOCTL, "libcfs ioctl cmd %lu\n", cmd); switch (cmd) { case IOC_LIBCFS_CLEAR_DEBUG: libcfs_debug_clear_buffer(); - return 0; - /* - * case IOC_LIBCFS_PANIC: - * Handled in arch/cfs_module.c - */ + break; + case IOC_LIBCFS_MARK_DEBUG: - if (!data->ioc_inlbuf1 || - data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') - return -EINVAL; + if (!data || !data->ioc_inlbuf1 || + data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') { + err = -EINVAL; + goto out; + } libcfs_debug_mark_buffer(data->ioc_inlbuf1); - return 0; + break; default: { struct libcfs_ioctl_handler *hand; @@ -156,67 +143,23 @@ static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd, down_read(&ioctl_list_sem); list_for_each_entry(hand, &ioctl_list, item) { err = hand->handle_ioctl(cmd, hdr); - if (err != -EINVAL) { - if (err == 0) - err = libcfs_ioctl_popdata(arg, - hdr, hdr->ioc_len); - break; + if (err == -EINVAL) + continue; + + if (!err) { + if (copy_to_user(uparam, hdr, hdr->ioc_len)) + err = -EFAULT; } + break; } up_read(&ioctl_list_sem); - break; - } - } - - return err; -} - -static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, - void __user *arg) -{ - struct libcfs_ioctl_hdr *hdr; - int err = 0; - __u32 buf_len; - - err = libcfs_ioctl_getdata_len(arg, &buf_len); - if (err) - return err; - - /* - * do a check here to restrict the size of the memory - * to allocate to guard against DoS attacks. - */ - if (buf_len > LNET_MAX_IOCTL_BUF_LEN) { - CERROR("LNET: user buffer exceeds kernel buffer\n"); - return -EINVAL; - } - - LIBCFS_ALLOC_GFP(hdr, buf_len, GFP_KERNEL); - if (!hdr) - return -ENOMEM; - - /* 'cmd' and permissions get checked in our arch-specific caller */ - if (copy_from_user(hdr, arg, buf_len)) { - CERROR("LNET ioctl: data error\n"); - err = -EFAULT; - goto out; + break; } } - - err = libcfs_ioctl_handle(pfile, cmd, arg, hdr); - out: - LIBCFS_FREE(hdr, buf_len); + LIBCFS_FREE(hdr, hdr->ioc_len); return err; } -struct cfs_psdev_ops libcfs_psdev_ops = { - libcfs_psdev_open, - libcfs_psdev_release, - NULL, - NULL, - libcfs_ioctl -}; - int lprocfs_call_handler(void *data, int write, loff_t *ppos, void __user *buffer, size_t *lenp, int (*handler)(void *data, int write, loff_t pos, @@ -478,6 +421,13 @@ static struct ctl_table lnet_table[] = { .proc_handler = &proc_dointvec }, { + .procname = "fail_err", + .data = &cfs_fail_err, + .maxlen = sizeof(cfs_fail_err), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { } }; diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c index c75ae9a68e76..a9bdb284fd15 100644 --- a/drivers/staging/lustre/lnet/libcfs/prng.c +++ b/drivers/staging/lustre/lnet/libcfs/prng.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c index 244eb89eef68..1c7efdfaffcf 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -707,10 +703,9 @@ int cfs_tracefile_dump_all_pages(char *filename) struct cfs_trace_page *tage; struct cfs_trace_page *tmp; char *buf; + mm_segment_t __oldfs; int rc; - DECL_MMSPACE; - cfs_tracefile_write_lock(); filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, @@ -729,11 +724,12 @@ int cfs_tracefile_dump_all_pages(char *filename) rc = 0; goto close; } + __oldfs = get_fs(); + set_fs(get_ds()); /* ok, for now, just write the pages. in the future we'll be building * iobufs with the pages and calling generic_direct_IO */ - MMSPACE_OPEN; list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { __LASSERT_TAGE_INVARIANT(tage); @@ -752,7 +748,7 @@ int cfs_tracefile_dump_all_pages(char *filename) list_del(&tage->linkage); cfs_tage_free(tage); } - MMSPACE_CLOSE; + set_fs(__oldfs); rc = vfs_fsync(filp, 1); if (rc) pr_err("sync returns %d\n", rc); @@ -986,13 +982,12 @@ static int tracefiled(void *arg) struct tracefiled_ctl *tctl = arg; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; + mm_segment_t __oldfs; struct file *filp; char *buf; int last_loop = 0; int rc; - DECL_MMSPACE; - /* we're started late enough that we pick up init's fs context */ /* this is so broken in uml? what on earth is going on? */ @@ -1025,8 +1020,8 @@ static int tracefiled(void *arg) __LASSERT(list_empty(&pc.pc_pages)); goto end_loop; } - - MMSPACE_OPEN; + __oldfs = get_fs(); + set_fs(get_ds()); list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { static loff_t f_pos; @@ -1051,7 +1046,7 @@ static int tracefiled(void *arg) break; } } - MMSPACE_CLOSE; + set_fs(__oldfs); filp_close(filp, NULL); put_pages_on_daemon_list(&pc); diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h index ac84e7f4c859..d878676bc375 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.h +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c index c72fe00dce8d..e98c818a14fb 100644 --- a/drivers/staging/lustre/lnet/libcfs/workitem.c +++ b/drivers/staging/lustre/lnet/libcfs/workitem.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -111,7 +107,7 @@ cfs_wi_sched_cansleep(struct cfs_wi_sched *sched) * 1. when it returns no one shall try to schedule the workitem. */ void -cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) +cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi) { LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); @@ -138,7 +134,7 @@ EXPORT_SYMBOL(cfs_wi_exit); * cancel schedule request of workitem \a wi */ int -cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) +cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi) { int rc; @@ -179,7 +175,7 @@ EXPORT_SYMBOL(cfs_wi_deschedule); * be added, and even dynamic creation of serialised queues might be supported. */ void -cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) +cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi) { LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); @@ -229,12 +225,12 @@ static int cfs_wi_scheduler(void *arg) while (!sched->ws_stopping) { int nloops = 0; int rc; - cfs_workitem_t *wi; + struct cfs_workitem *wi; while (!list_empty(&sched->ws_runq) && nloops < CFS_WI_RESCHED) { - wi = list_entry(sched->ws_runq.next, cfs_workitem_t, - wi_list); + wi = list_entry(sched->ws_runq.next, + struct cfs_workitem, wi_list); LASSERT(wi->wi_scheduled && !wi->wi_running); list_del_init(&wi->wi_list); diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c index 1452bb3ad9eb..8c50c99d82d5 100644 --- a/drivers/staging/lustre/lnet/lnet/acceptor.c +++ b/drivers/staging/lustre/lnet/lnet/acceptor.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c index 8764755544c9..346db892f275 100644 --- a/drivers/staging/lustre/lnet/lnet/api-ni.c +++ b/drivers/staging/lustre/lnet/lnet/api-ni.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -1215,9 +1211,9 @@ lnet_shutdown_lndni(struct lnet_ni *ni) } static int -lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout, - __s32 peer_cr, __s32 peer_buf_cr, __s32 credits) +lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf) { + struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL; int rc = -EINVAL; int lnd_type; lnd_t *lnd; @@ -1275,6 +1271,21 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout, ni->ni_lnd = lnd; + if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf)) + lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk; + + if (lnd_tunables) { + LIBCFS_ALLOC(ni->ni_lnd_tunables, + sizeof(*ni->ni_lnd_tunables)); + if (!ni->ni_lnd_tunables) { + mutex_unlock(&the_lnet.ln_lnd_mutex); + rc = -ENOMEM; + goto failed0; + } + memcpy(ni->ni_lnd_tunables, lnd_tunables, + sizeof(*ni->ni_lnd_tunables)); + } + rc = lnd->lnd_startup(ni); mutex_unlock(&the_lnet.ln_lnd_mutex); @@ -1292,20 +1303,28 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout, * If given some LND tunable parameters, parse those now to * override the values in the NI structure. */ - if (peer_buf_cr >= 0) - ni->ni_peerrtrcredits = peer_buf_cr; - if (peer_timeout >= 0) - ni->ni_peertimeout = peer_timeout; + if (conf && conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0) { + ni->ni_peerrtrcredits = + conf->cfg_config_u.cfg_net.net_peer_rtr_credits; + } + if (conf && conf->cfg_config_u.cfg_net.net_peer_timeout >= 0) { + ni->ni_peertimeout = + conf->cfg_config_u.cfg_net.net_peer_timeout; + } /* * TODO * Note: For now, don't allow the user to change * peertxcredits as this number is used in the * IB LND to control queue depth. - * if (peer_cr != -1) - * ni->ni_peertxcredits = peer_cr; + * + * if (conf && conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1) + * ni->ni_peertxcredits = + * conf->cfg_config_u.cfg_net.net_peer_tx_credits; */ - if (credits >= 0) - ni->ni_maxtxcredits = credits; + if (conf && conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0) { + ni->ni_maxtxcredits = + conf->cfg_config_u.cfg_net.net_max_tx_credits; + } LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query); @@ -1367,7 +1386,7 @@ lnet_startup_lndnis(struct list_head *nilist) while (!list_empty(nilist)) { ni = list_entry(nilist->next, lnet_ni_t, ni_list); list_del(&ni->ni_list); - rc = lnet_startup_lndni(ni, -1, -1, -1, -1); + rc = lnet_startup_lndni(ni, NULL); if (rc < 0) goto failed; @@ -1641,25 +1660,20 @@ EXPORT_SYMBOL(LNetNIFini); * parameters * * \param[in] ni network interface structure - * \param[out] cpt_count the number of cpts the ni is on - * \param[out] nid Network Interface ID - * \param[out] peer_timeout NI peer timeout - * \param[out] peer_tx_crdits NI peer transmit credits - * \param[out] peer_rtr_credits NI peer router credits - * \param[out] max_tx_credits NI max transmit credit - * \param[out] net_config Network configuration + * \param[out] config NI configuration */ static void -lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid, - int *peer_timeout, int *peer_tx_credits, - int *peer_rtr_credits, int *max_tx_credits, - struct lnet_ioctl_net_config *net_config) +lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config) { + struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL; + struct lnet_ioctl_net_config *net_config; + size_t min_size, tunable_size = 0; int i; - if (!ni) + if (!ni || !config) return; + net_config = (struct lnet_ioctl_net_config *)config->cfg_bulk; if (!net_config) return; @@ -1675,11 +1689,11 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid, sizeof(net_config->ni_interfaces[i])); } - *nid = ni->ni_nid; - *peer_timeout = ni->ni_peertimeout; - *peer_tx_credits = ni->ni_peertxcredits; - *peer_rtr_credits = ni->ni_peerrtrcredits; - *max_tx_credits = ni->ni_maxtxcredits; + config->cfg_nid = ni->ni_nid; + config->cfg_config_u.cfg_net.net_peer_timeout = ni->ni_peertimeout; + config->cfg_config_u.cfg_net.net_max_tx_credits = ni->ni_maxtxcredits; + config->cfg_config_u.cfg_net.net_peer_tx_credits = ni->ni_peertxcredits; + config->cfg_config_u.cfg_net.net_peer_rtr_credits = ni->ni_peerrtrcredits; net_config->ni_status = ni->ni_status->ns_status; @@ -1689,18 +1703,40 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid, for (i = 0; i < num_cpts; i++) net_config->ni_cpts[i] = ni->ni_cpts[i]; - *cpt_count = num_cpts; + config->cfg_ncpts = num_cpts; + } + + /* + * See if user land tools sent in a newer and larger version + * of struct lnet_tunables than what the kernel uses. + */ + min_size = sizeof(*config) + sizeof(*net_config); + + if (config->cfg_hdr.ioc_len > min_size) + tunable_size = config->cfg_hdr.ioc_len - min_size; + + /* Don't copy to much data to user space */ + min_size = min(tunable_size, sizeof(*ni->ni_lnd_tunables)); + lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk; + + if (ni->ni_lnd_tunables && lnd_cfg && min_size) { + memcpy(lnd_cfg, ni->ni_lnd_tunables, min_size); + config->cfg_config_u.cfg_net.net_interface_count = 1; + + /* Tell user land that kernel side has less data */ + if (tunable_size > sizeof(*ni->ni_lnd_tunables)) { + min_size = tunable_size - sizeof(ni->ni_lnd_tunables); + config->cfg_hdr.ioc_len -= min_size; + } } } -int -lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout, - int *peer_tx_credits, int *peer_rtr_credits, - int *max_tx_credits, - struct lnet_ioctl_net_config *net_config) +static int +lnet_get_net_config(struct lnet_ioctl_config_data *config) { struct lnet_ni *ni; struct list_head *tmp; + int idx = config->cfg_count; int cpt, i = 0; int rc = -ENOENT; @@ -1712,9 +1748,7 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout, ni = list_entry(tmp, lnet_ni_t, ni_list); lnet_ni_lock(ni); - lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout, - peer_tx_credits, peer_rtr_credits, - max_tx_credits, net_config); + lnet_fill_ni_info(ni, config); lnet_ni_unlock(ni); rc = 0; break; @@ -1725,10 +1759,9 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout, } int -lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets, - __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr, - __s32 credits) +lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf) { + char *nets = conf->cfg_config_u.cfg_net.net_intf; lnet_ping_info_t *pinfo; lnet_handle_md_t md_handle; struct lnet_ni *ni; @@ -1773,8 +1806,7 @@ lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets, list_del_init(&ni->ni_list); - rc = lnet_startup_lndni(ni, peer_timeout, peer_cr, - peer_buf_cr, credits); + rc = lnet_startup_lndni(ni, conf); if (rc) goto failed1; @@ -1864,6 +1896,10 @@ LNetCtl(unsigned int cmd, void *arg) int rc; unsigned long secs_passed; + BUILD_BUG_ON(LIBCFS_IOC_DATA_MAX < + sizeof(struct lnet_ioctl_net_config) + + sizeof(struct lnet_ioctl_config_data)); + switch (cmd) { case IOC_LIBCFS_GET_NI: rc = LNetGetId(data->ioc_count, &id); @@ -1918,27 +1954,14 @@ LNetCtl(unsigned int cmd, void *arg) &config->cfg_config_u.cfg_route.rtr_priority); case IOC_LIBCFS_GET_NET: { - struct lnet_ioctl_net_config *net_config; - size_t total = sizeof(*config) + sizeof(*net_config); - + size_t total = sizeof(*config) + + sizeof(struct lnet_ioctl_net_config); config = arg; if (config->cfg_hdr.ioc_len < total) return -EINVAL; - net_config = (struct lnet_ioctl_net_config *) - config->cfg_bulk; - if (!net_config) - return -EINVAL; - - return lnet_get_net_config(config->cfg_count, - &config->cfg_ncpts, - &config->cfg_nid, - &config->cfg_config_u.cfg_net.net_peer_timeout, - &config->cfg_config_u.cfg_net.net_peer_tx_credits, - &config->cfg_config_u.cfg_net.net_peer_rtr_credits, - &config->cfg_config_u.cfg_net.net_max_tx_credits, - net_config); + return lnet_get_net_config(config); } case IOC_LIBCFS_GET_LNET_STATS: { diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c index 449069c9e649..a72afdf68bb2 100644 --- a/drivers/staging/lustre/lnet/lnet/config.c +++ b/drivers/staging/lustre/lnet/lnet/config.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -107,6 +103,9 @@ lnet_ni_free(struct lnet_ni *ni) if (ni->ni_cpts) cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts); + if (ni->ni_lnd_tunables) + LIBCFS_FREE(ni->ni_lnd_tunables, sizeof(*ni->ni_lnd_tunables)); + for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) { LIBCFS_FREE(ni->ni_interfaces[i], strlen(ni->ni_interfaces[i]) + 1); diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c index adbcadbab1be..d05c6cc797f6 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-eq.c +++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c index 75d31217bf92..1834bf7a27ef 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-md.c +++ b/drivers/staging/lustre/lnet/lnet/lib-md.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c index e671aed373df..b430046dc294 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-me.c +++ b/drivers/staging/lustre/lnet/lnet/lib-me.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c index f19aa9320e34..e6d3b801d87d 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-move.c +++ b/drivers/staging/lustre/lnet/lnet/lib-move.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -407,7 +403,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, LASSERT(niov > 0); LASSERT(nkiov > 0); this_nob = min(iov->iov_len - iovoffset, - (__kernel_size_t) kiov->kiov_len - kiovoffset); + (__kernel_size_t)kiov->kiov_len - kiovoffset); this_nob = min(this_nob, nob); if (!addr) @@ -477,7 +473,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, do { LASSERT(nkiov > 0); LASSERT(niov > 0); - this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset, + this_nob = min((__kernel_size_t)kiov->kiov_len - kiovoffset, iov->iov_len - iovoffset); this_nob = min(this_nob, nob); @@ -996,7 +992,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg) LASSERT(msg2->msg_txpeer->lp_ni == ni); LASSERT(msg2->msg_tx_delayed); - (void) lnet_post_send_locked(msg2, 1); + (void)lnet_post_send_locked(msg2, 1); } } @@ -1019,7 +1015,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg) LASSERT(msg2->msg_txpeer == txpeer); LASSERT(msg2->msg_tx_delayed); - (void) lnet_post_send_locked(msg2, 1); + (void)lnet_post_send_locked(msg2, 1); } } @@ -1142,7 +1138,7 @@ routing_off: lnet_msg_t, msg_list); list_del(&msg2->msg_list); - (void) lnet_post_routed_recv_locked(msg2, 1); + (void)lnet_post_routed_recv_locked(msg2, 1); } } if (rxpeer) { diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c index f879d7f28708..910e106e221d 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-msg.c +++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c index 468eda611bf8..08402712a452 100644 --- a/drivers/staging/lustre/lnet/lnet/lo.c +++ b/drivers/staging/lustre/lnet/lnet/lo.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c index 93037c1168ca..4ffbd3e441e8 100644 --- a/drivers/staging/lustre/lnet/lnet/module.c +++ b/drivers/staging/lustre/lnet/lnet/module.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -108,12 +104,7 @@ lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr) rc = -EINVAL; goto out_unlock; } - rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, - conf->cfg_config_u.cfg_net.net_intf, - conf->cfg_config_u.cfg_net.net_peer_timeout, - conf->cfg_config_u.cfg_net.net_peer_tx_credits, - conf->cfg_config_u.cfg_net.net_peer_rtr_credits, - conf->cfg_config_u.cfg_net.net_max_tx_credits); + rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, conf); out_unlock: mutex_unlock(&lnet_config_mutex); @@ -205,7 +196,7 @@ static int __init lnet_init(void) * Have to schedule a separate thread to avoid deadlocking * in modload */ - (void) kthread_run(lnet_configure, NULL, "lnet_initd"); + (void)kthread_run(lnet_configure, NULL, "lnet_initd"); } return 0; diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c index 7d76f28d3a7a..e4aceb71c4ec 100644 --- a/drivers/staging/lustre/lnet/lnet/net_fault.c +++ b/drivers/staging/lustre/lnet/lnet/net_fault.c @@ -760,9 +760,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr) wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running); } - init_timer(&rule->dl_timer); - rule->dl_timer.function = delay_timer_cb; - rule->dl_timer.data = (unsigned long)rule; + setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule); spin_lock_init(&rule->dl_lock); INIT_LIST_HEAD(&rule->dl_msg_list); diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c index ebf468fbc64f..a6d7a6159b8f 100644 --- a/drivers/staging/lustre/lnet/lnet/nidstrings.c +++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c index b026feebc03a..e8061916c241 100644 --- a/drivers/staging/lustre/lnet/lnet/peer.c +++ b/drivers/staging/lustre/lnet/lnet/peer.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c index b01dc424c514..063543233035 100644 --- a/drivers/staging/lustre/lnet/lnet/router.c +++ b/drivers/staging/lustre/lnet/lnet/router.c @@ -18,6 +18,7 @@ */ #define DEBUG_SUBSYSTEM S_LNET +#include <linux/completion.h> #include "../../include/linux/lnet/lib-lnet.h" #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */ @@ -1065,7 +1066,7 @@ lnet_router_checker_start(void) return -EINVAL; } - sema_init(&the_lnet.ln_rc_signal, 0); + init_completion(&the_lnet.ln_rc_signal); rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh); if (rc) { @@ -1079,7 +1080,7 @@ lnet_router_checker_start(void) rc = PTR_ERR(task); CERROR("Can't start router checker thread: %d\n", rc); /* block until event callback signals exit */ - down(&the_lnet.ln_rc_signal); + wait_for_completion(&the_lnet.ln_rc_signal); rc = LNetEQFree(the_lnet.ln_rc_eqh); LASSERT(!rc); the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN; @@ -1112,7 +1113,7 @@ lnet_router_checker_stop(void) wake_up(&the_lnet.ln_rc_waitq); /* block until event callback signals exit */ - down(&the_lnet.ln_rc_signal); + wait_for_completion(&the_lnet.ln_rc_signal); LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN); rc = LNetEQFree(the_lnet.ln_rc_eqh); @@ -1295,7 +1296,7 @@ rescan: lnet_prune_rc_data(1); /* wait for UNLINK */ the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN; - up(&the_lnet.ln_rc_signal); + complete(&the_lnet.ln_rc_signal); /* The unlink event callback will signal final completion */ return 0; } diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c index dcb6e506f592..13d0454e7fcb 100644 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -49,10 +45,10 @@ module_param(brw_inject_errors, int, 0644); MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default"); static void -brw_client_fini(sfw_test_instance_t *tsi) +brw_client_fini(struct sfw_test_instance *tsi) { - srpc_bulk_t *bulk; - sfw_test_unit_t *tsu; + struct srpc_bulk *bulk; + struct sfw_test_unit *tsu; LASSERT(tsi->tsi_is_client); @@ -67,21 +63,21 @@ brw_client_fini(sfw_test_instance_t *tsi) } static int -brw_client_init(sfw_test_instance_t *tsi) +brw_client_init(struct sfw_test_instance *tsi) { - sfw_session_t *sn = tsi->tsi_batch->bat_session; + struct sfw_session *sn = tsi->tsi_batch->bat_session; int flags; int npg; int len; int opc; - srpc_bulk_t *bulk; - sfw_test_unit_t *tsu; + struct srpc_bulk *bulk; + struct sfw_test_unit *tsu; LASSERT(sn); LASSERT(tsi->tsi_is_client); if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { - test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; + struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0; opc = breq->blk_opc; flags = breq->blk_flags; @@ -91,9 +87,8 @@ brw_client_init(sfw_test_instance_t *tsi) * but we have to keep it for compatibility */ len = npg * PAGE_SIZE; - } else { - test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; + struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1; /* * I should never get this step if it's unknown feature @@ -225,7 +220,7 @@ bad_data: } static void -brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) +brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) { int i; struct page *pg; @@ -237,7 +232,7 @@ brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) } static int -brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) +brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic) { int i; struct page *pg; @@ -255,14 +250,14 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) } static int -brw_client_prep_rpc(sfw_test_unit_t *tsu, - lnet_process_id_t dest, srpc_client_rpc_t **rpcpp) +brw_client_prep_rpc(struct sfw_test_unit *tsu, + lnet_process_id_t dest, struct srpc_client_rpc **rpcpp) { - srpc_bulk_t *bulk = tsu->tsu_private; - sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_session_t *sn = tsi->tsi_batch->bat_session; - srpc_client_rpc_t *rpc; - srpc_brw_reqst_t *req; + struct srpc_bulk *bulk = tsu->tsu_private; + struct sfw_test_instance *tsi = tsu->tsu_instance; + struct sfw_session *sn = tsi->tsi_batch->bat_session; + struct srpc_client_rpc *rpc; + struct srpc_brw_reqst *req; int flags; int npg; int len; @@ -273,15 +268,14 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, LASSERT(bulk); if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { - test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; + struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0; opc = breq->blk_opc; flags = breq->blk_flags; npg = breq->blk_npg; len = npg * PAGE_SIZE; - } else { - test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; + struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1; /* * I should never get this step if it's unknown feature @@ -299,7 +293,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, if (rc) return rc; - memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg])); + memcpy(&rpc->crpc_bulk, bulk, offsetof(struct srpc_bulk, bk_iovs[npg])); if (opc == LST_BRW_WRITE) brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC); else @@ -315,21 +309,21 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, } static void -brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) +brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) { __u64 magic = BRW_MAGIC; - sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_session_t *sn = tsi->tsi_batch->bat_session; - srpc_msg_t *msg = &rpc->crpc_replymsg; - srpc_brw_reply_t *reply = &msg->msg_body.brw_reply; - srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst; + struct sfw_test_instance *tsi = tsu->tsu_instance; + struct sfw_session *sn = tsi->tsi_batch->bat_session; + struct srpc_msg *msg = &rpc->crpc_replymsg; + struct srpc_brw_reply *reply = &msg->msg_body.brw_reply; + struct srpc_brw_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst; LASSERT(sn); if (rpc->crpc_status) { CERROR("BRW RPC to %s failed with %d\n", libcfs_id2str(rpc->crpc_dest), rpc->crpc_status); - if (!tsi->tsi_stopping) /* rpc could have been aborted */ + if (!tsi->tsi_stopping) /* rpc could have been aborted */ atomic_inc(&sn->sn_brw_errors); return; } @@ -363,7 +357,7 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) static void brw_server_rpc_done(struct srpc_server_rpc *rpc) { - srpc_bulk_t *blk = rpc->srpc_bulk; + struct srpc_bulk *blk = rpc->srpc_bulk; if (!blk) return; @@ -384,9 +378,9 @@ static int brw_bulk_ready(struct srpc_server_rpc *rpc, int status) { __u64 magic = BRW_MAGIC; - srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply; - srpc_brw_reqst_t *reqst; - srpc_msg_t *reqstmsg; + struct srpc_brw_reply *reply = &rpc->srpc_replymsg.msg_body.brw_reply; + struct srpc_brw_reqst *reqst; + struct srpc_msg *reqstmsg; LASSERT(rpc->srpc_bulk); LASSERT(rpc->srpc_reqstbuf); @@ -420,10 +414,10 @@ static int brw_server_handle(struct srpc_server_rpc *rpc) { struct srpc_service *sv = rpc->srpc_scd->scd_svc; - srpc_msg_t *replymsg = &rpc->srpc_replymsg; - srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; - srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply; - srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst; + struct srpc_msg *replymsg = &rpc->srpc_replymsg; + struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; + struct srpc_brw_reply *reply = &replymsg->msg_body.brw_reply; + struct srpc_brw_reqst *reqst = &reqstmsg->msg_body.brw_reqst; int npg; int rc; @@ -459,7 +453,7 @@ brw_server_handle(struct srpc_server_rpc *rpc) if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) { /* compat with old version */ - if (reqst->brw_len & ~CFS_PAGE_MASK) { + if (reqst->brw_len & ~PAGE_MASK) { reply->brw_status = EINVAL; return 0; } @@ -490,7 +484,8 @@ brw_server_handle(struct srpc_server_rpc *rpc) return 0; } -sfw_test_client_ops_t brw_test_client; +struct sfw_test_client_ops brw_test_client; + void brw_init_test_client(void) { brw_test_client.tso_init = brw_client_init; @@ -499,7 +494,8 @@ void brw_init_test_client(void) brw_test_client.tso_done_rpc = brw_client_done_rpc; }; -srpc_service_t brw_test_service; +struct srpc_service brw_test_service; + void brw_init_test_service(void) { brw_test_service.sv_id = SRPC_SERVICE_BRW; diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c index 79ee6c0bf7c1..b786f8b4a73d 100644 --- a/drivers/staging/lustre/lnet/selftest/conctl.c +++ b/drivers/staging/lustre/lnet/selftest/conctl.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -51,9 +47,9 @@ lst_session_new_ioctl(lstio_session_new_args_t *args) char *name; int rc; - if (!args->lstio_ses_idp || /* address for output sid */ - !args->lstio_ses_key || /* no key is specified */ - !args->lstio_ses_namep || /* session name */ + if (!args->lstio_ses_idp || /* address for output sid */ + !args->lstio_ses_key || /* no key is specified */ + !args->lstio_ses_namep || /* session name */ args->lstio_ses_nmlen <= 0 || args->lstio_ses_nmlen > LST_NAME_SIZE) return -EINVAL; @@ -95,11 +91,11 @@ lst_session_info_ioctl(lstio_session_info_args_t *args) { /* no checking of key */ - if (!args->lstio_ses_idp || /* address for output sid */ - !args->lstio_ses_keyp || /* address for output key */ - !args->lstio_ses_featp || /* address for output features */ - !args->lstio_ses_ndinfo || /* address for output ndinfo */ - !args->lstio_ses_namep || /* address for output name */ + if (!args->lstio_ses_idp || /* address for output sid */ + !args->lstio_ses_keyp || /* address for output key */ + !args->lstio_ses_featp || /* address for output features */ + !args->lstio_ses_ndinfo || /* address for output ndinfo */ + !args->lstio_ses_namep || /* address for output name */ args->lstio_ses_nmlen <= 0 || args->lstio_ses_nmlen > LST_NAME_SIZE) return -EINVAL; @@ -125,7 +121,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) if (!args->lstio_dbg_resultp) return -EINVAL; - if (args->lstio_dbg_namep && /* name of batch/group */ + if (args->lstio_dbg_namep && /* name of batch/group */ (args->lstio_dbg_nmlen <= 0 || args->lstio_dbg_nmlen > LST_NAME_SIZE)) return -EINVAL; @@ -326,7 +322,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (!args->lstio_grp_idsp || /* array of ids */ + if (!args->lstio_grp_idsp || /* array of ids */ args->lstio_grp_count <= 0 || !args->lstio_grp_resultp || !args->lstio_grp_featp || @@ -394,13 +390,13 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; - if (!args->lstio_grp_entp && /* output: group entry */ - !args->lstio_grp_dentsp) /* output: node entry */ + if (!args->lstio_grp_entp && /* output: group entry */ + !args->lstio_grp_dentsp) /* output: node entry */ return -EINVAL; - if (args->lstio_grp_dentsp) { /* have node entry */ - if (!args->lstio_grp_idxp || /* node index */ - !args->lstio_grp_ndentp) /* # of node entry */ + if (args->lstio_grp_dentsp) { /* have node entry */ + if (!args->lstio_grp_idxp || /* node index */ + !args->lstio_grp_ndentp) /* # of node entry */ return -EINVAL; if (copy_from_user(&ndent, args->lstio_grp_ndentp, @@ -612,18 +608,18 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (!args->lstio_bat_namep || /* batch name */ + if (!args->lstio_bat_namep || /* batch name */ args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; - if (!args->lstio_bat_entp && /* output: batch entry */ - !args->lstio_bat_dentsp) /* output: node entry */ + if (!args->lstio_bat_entp && /* output: batch entry */ + !args->lstio_bat_dentsp) /* output: node entry */ return -EINVAL; - if (args->lstio_bat_dentsp) { /* have node entry */ - if (!args->lstio_bat_idxp || /* node index */ - !args->lstio_bat_ndentp) /* # of node entry */ + if (args->lstio_bat_dentsp) { /* have node entry */ + if (!args->lstio_bat_idxp || /* node index */ + !args->lstio_bat_ndentp) /* # of node entry */ return -EINVAL; if (copy_from_user(&index, args->lstio_bat_idxp, @@ -722,18 +718,18 @@ static int lst_test_add_ioctl(lstio_test_args_t *args) if (!args->lstio_tes_resultp || !args->lstio_tes_retp || - !args->lstio_tes_bat_name || /* no specified batch */ + !args->lstio_tes_bat_name || /* no specified batch */ args->lstio_tes_bat_nmlen <= 0 || args->lstio_tes_bat_nmlen > LST_NAME_SIZE || - !args->lstio_tes_sgrp_name || /* no source group */ + !args->lstio_tes_sgrp_name || /* no source group */ args->lstio_tes_sgrp_nmlen <= 0 || args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE || - !args->lstio_tes_dgrp_name || /* no target group */ + !args->lstio_tes_dgrp_name || /* no target group */ args->lstio_tes_dgrp_nmlen <= 0 || args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE) return -EINVAL; - if (!args->lstio_tes_loop || /* negative is infinite */ + if (!args->lstio_tes_loop || /* negative is infinite */ args->lstio_tes_concur <= 0 || args->lstio_tes_dist <= 0 || args->lstio_tes_span <= 0) @@ -743,7 +739,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args) if (args->lstio_tes_param && (args->lstio_tes_param_len <= 0 || args->lstio_tes_param_len > - PAGE_SIZE - sizeof(lstcon_test_t))) + PAGE_SIZE - sizeof(struct lstcon_test))) return -EINVAL; LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c index 35a227d0c657..1be3cad727ae 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.c +++ b/drivers/staging/lustre/lnet/selftest/conrpc.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -46,13 +42,13 @@ #include "conrpc.h" #include "console.h" -void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *, srpc_msg_t *, - lstcon_node_t *, lstcon_trans_stat_t *); +void lstcon_rpc_stat_reply(struct lstcon_rpc_trans *, struct srpc_msg *, + struct lstcon_node *, lstcon_trans_stat_t *); static void -lstcon_rpc_done(srpc_client_rpc_t *rpc) +lstcon_rpc_done(struct srpc_client_rpc *rpc) { - lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv; + struct lstcon_rpc *crpc = (struct lstcon_rpc *)rpc->crpc_priv; LASSERT(crpc && rpc == crpc->crp_rpc); LASSERT(crpc->crp_posted && !crpc->crp_finished); @@ -90,8 +86,8 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc) } static int -lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats, - int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc) +lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats, + int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc) { crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service, feats, bulk_npg, bulk_len, @@ -115,16 +111,16 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats, } static int -lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats, - int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp) +lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats, + int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp) { - lstcon_rpc_t *crpc = NULL; + struct lstcon_rpc *crpc = NULL; int rc; spin_lock(&console_session.ses_rpc_lock); crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist, - lstcon_rpc_t, crp_link); + struct lstcon_rpc, crp_link); if (crpc) list_del_init(&crpc->crp_link); @@ -148,9 +144,9 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats, } void -lstcon_rpc_put(lstcon_rpc_t *crpc) +lstcon_rpc_put(struct lstcon_rpc *crpc) { - srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk; + struct srpc_bulk *bulk = &crpc->crp_rpc->crpc_bulk; int i; LASSERT(list_empty(&crpc->crp_link)); @@ -183,9 +179,9 @@ lstcon_rpc_put(lstcon_rpc_t *crpc) } static void -lstcon_rpc_post(lstcon_rpc_t *crpc) +lstcon_rpc_post(struct lstcon_rpc *crpc) { - lstcon_rpc_trans_t *trans = crpc->crp_trans; + struct lstcon_rpc_trans *trans = crpc->crp_trans; LASSERT(trans); @@ -236,9 +232,9 @@ lstcon_rpc_trans_name(int transop) int lstcon_rpc_trans_prep(struct list_head *translist, int transop, - lstcon_rpc_trans_t **transpp) + struct lstcon_rpc_trans **transpp) { - lstcon_rpc_trans_t *trans; + struct lstcon_rpc_trans *trans; if (translist) { list_for_each_entry(trans, translist, tas_link) { @@ -278,26 +274,26 @@ lstcon_rpc_trans_prep(struct list_head *translist, int transop, } void -lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc) +lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *crpc) { list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list); crpc->crp_trans = trans; } void -lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) +lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error) { - srpc_client_rpc_t *rpc; - lstcon_rpc_t *crpc; - lstcon_node_t *nd; + struct srpc_client_rpc *rpc; + struct lstcon_rpc *crpc; + struct lstcon_node *nd; list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { rpc = crpc->crp_rpc; spin_lock(&rpc->crpc_lock); - if (!crpc->crp_posted || /* not posted */ - crpc->crp_stamp) { /* rpc done or aborted already */ + if (!crpc->crp_posted || /* not posted */ + crpc->crp_stamp) { /* rpc done or aborted already */ if (!crpc->crp_stamp) { crpc->crp_stamp = cfs_time_current(); crpc->crp_status = -EINTR; @@ -326,7 +322,7 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) } static int -lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans) +lstcon_rpc_trans_check(struct lstcon_rpc_trans *trans) { if (console_session.ses_shutdown && !list_empty(&trans->tas_olink)) /* Not an end session RPC */ @@ -336,9 +332,9 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans) } int -lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) +lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout) { - lstcon_rpc_t *crpc; + struct lstcon_rpc *crpc; int rc; if (list_empty(&trans->tas_rpcs_list)) @@ -386,11 +382,11 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) } static int -lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) +lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp) { - lstcon_node_t *nd = crpc->crp_node; - srpc_client_rpc_t *rpc = crpc->crp_rpc; - srpc_generic_reply_t *rep; + struct lstcon_node *nd = crpc->crp_node; + struct srpc_client_rpc *rpc = crpc->crp_rpc; + struct srpc_generic_reply *rep; LASSERT(nd && rpc); LASSERT(crpc->crp_stamp); @@ -423,10 +419,10 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) } void -lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) +lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, lstcon_trans_stat_t *stat) { - lstcon_rpc_t *crpc; - srpc_msg_t *rep; + struct lstcon_rpc *crpc; + struct srpc_msg *rep; int error; LASSERT(stat); @@ -466,17 +462,17 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) } int -lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, +lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans, struct list_head __user *head_up, lstcon_rpc_readent_func_t readent) { struct list_head tmp; struct list_head __user *next; lstcon_rpc_ent_t *ent; - srpc_generic_reply_t *rep; - lstcon_rpc_t *crpc; - srpc_msg_t *msg; - lstcon_node_t *nd; + struct srpc_generic_reply *rep; + struct lstcon_rpc *crpc; + struct srpc_msg *msg; + struct lstcon_node *nd; long dur; struct timeval tv; int error; @@ -520,7 +516,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, continue; /* RPC is done */ - rep = (srpc_generic_reply_t *)&msg->msg_body.reply; + rep = (struct srpc_generic_reply *)&msg->msg_body.reply; if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) || copy_to_user(&ent->rpe_fwk_errno, &rep->status, @@ -531,7 +527,6 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, continue; error = readent(trans->tas_opc, msg, ent); - if (error) return error; } @@ -540,11 +535,11 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, } void -lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) +lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans) { - srpc_client_rpc_t *rpc; - lstcon_rpc_t *crpc; - lstcon_rpc_t *tmp; + struct srpc_client_rpc *rpc; + struct lstcon_rpc *crpc; + struct lstcon_rpc *tmp; int count = 0; list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) { @@ -563,10 +558,10 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) } /* - * rpcs can be still not callbacked (even LNetMDUnlink is called) - * because huge timeout for inaccessible network, don't make - * user wait for them, just abandon them, they will be recycled - * in callback + * rpcs can be still not callbacked (even LNetMDUnlink is + * called) because huge timeout for inaccessible network, + * don't make user wait for them, just abandon them, they + * will be recycled in callback */ LASSERT(crpc->crp_status); @@ -593,11 +588,11 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) } int -lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, - unsigned feats, lstcon_rpc_t **crpc) +lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, + unsigned feats, struct lstcon_rpc **crpc) { - srpc_mksn_reqst_t *msrq; - srpc_rmsn_reqst_t *rsrq; + struct srpc_mksn_reqst *msrq; + struct srpc_rmsn_reqst *rsrq; int rc; switch (transop) { @@ -632,9 +627,9 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, } int -lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) +lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc) { - srpc_debug_reqst_t *drq; + struct srpc_debug_reqst *drq; int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc); @@ -650,11 +645,11 @@ lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) } int -lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, - lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc) +lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats, + struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc) { - lstcon_batch_t *batch; - srpc_batch_reqst_t *brq; + struct lstcon_batch *batch; + struct srpc_batch_reqst *brq; int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc); @@ -676,16 +671,16 @@ lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, LASSERT(!tsb->tsb_index); - batch = (lstcon_batch_t *)tsb; + batch = (struct lstcon_batch *)tsb; brq->bar_arg = batch->bat_arg; return 0; } int -lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) +lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc) { - srpc_stat_reqst_t *srq; + struct srpc_stat_reqst *srq; int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc); @@ -716,12 +711,12 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov) } static int -lstcon_dstnodes_prep(lstcon_group_t *grp, int idx, +lstcon_dstnodes_prep(struct lstcon_group *grp, int idx, int dist, int span, int nkiov, lnet_kiov_t *kiov) { lnet_process_id_packed_t *pid; - lstcon_ndlink_t *ndl; - lstcon_node_t *nd; + struct lstcon_ndlink *ndl; + struct lstcon_node *nd; int start; int end; int i = 0; @@ -770,9 +765,9 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx, } static int -lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req) +lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req) { - test_ping_req_t *prq = &req->tsr_u.ping; + struct test_ping_req *prq = &req->tsr_u.ping; prq->png_size = param->png_size; prq->png_flags = param->png_flags; @@ -781,9 +776,9 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req) } static int -lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) +lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req) { - test_bulk_req_t *brq = &req->tsr_u.bulk_v0; + struct test_bulk_req *brq = &req->tsr_u.bulk_v0; brq->blk_opc = param->blk_opc; brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) / @@ -794,9 +789,9 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) } static int -lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) +lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req) { - test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1; + struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1; brq->blk_opc = param->blk_opc; brq->blk_flags = param->blk_flags; @@ -807,13 +802,13 @@ lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) } int -lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, - lstcon_test_t *test, lstcon_rpc_t **crpc) +lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats, + struct lstcon_test *test, struct lstcon_rpc **crpc) { - lstcon_group_t *sgrp = test->tes_src_grp; - lstcon_group_t *dgrp = test->tes_dst_grp; - srpc_test_reqst_t *trq; - srpc_bulk_t *bulk; + struct lstcon_group *sgrp = test->tes_src_grp; + struct lstcon_group *dgrp = test->tes_dst_grp; + struct srpc_test_reqst *trq; + struct srpc_bulk *bulk; int i; int npg = 0; int nob = 0; @@ -841,7 +836,6 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, trq->tsr_ndest = 0; trq->tsr_loop = nmax * test->tes_dist * test->tes_concur; - } else { bulk = &(*crpc)->crp_rpc->crpc_bulk; @@ -917,10 +911,10 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, } static int -lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans, - lstcon_node_t *nd, srpc_msg_t *reply) +lstcon_sesnew_stat_reply(struct lstcon_rpc_trans *trans, + struct lstcon_node *nd, struct srpc_msg *reply) { - srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply; + struct srpc_mksn_reply *mksn_rep = &reply->msg_body.mksn_reply; int status = mksn_rep->mksn_status; if (!status && @@ -940,7 +934,7 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans, if (!trans->tas_feats_updated) { spin_lock(&console_session.ses_rpc_lock); - if (!trans->tas_feats_updated) { /* recheck with lock */ + if (!trans->tas_feats_updated) { /* recheck with lock */ trans->tas_feats_updated = 1; trans->tas_features = reply->msg_ses_feats; } @@ -964,14 +958,14 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans, } void -lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, - lstcon_node_t *nd, lstcon_trans_stat_t *stat) +lstcon_rpc_stat_reply(struct lstcon_rpc_trans *trans, struct srpc_msg *msg, + struct lstcon_node *nd, lstcon_trans_stat_t *stat) { - srpc_rmsn_reply_t *rmsn_rep; - srpc_debug_reply_t *dbg_rep; - srpc_batch_reply_t *bat_rep; - srpc_test_reply_t *test_rep; - srpc_stat_reply_t *stat_rep; + struct srpc_rmsn_reply *rmsn_rep; + struct srpc_debug_reply *dbg_rep; + struct srpc_batch_reply *bat_rep; + struct srpc_test_reply *test_rep; + struct srpc_stat_reply *stat_rep; int rc = 0; switch (trans->tas_opc) { @@ -1085,12 +1079,12 @@ int lstcon_rpc_trans_ndlist(struct list_head *ndlist, struct list_head *translist, int transop, void *arg, lstcon_rpc_cond_func_t condition, - lstcon_rpc_trans_t **transpp) + struct lstcon_rpc_trans **transpp) { - lstcon_rpc_trans_t *trans; - lstcon_ndlink_t *ndl; - lstcon_node_t *nd; - lstcon_rpc_t *rpc; + struct lstcon_rpc_trans *trans; + struct lstcon_ndlink *ndl; + struct lstcon_node *nd; + struct lstcon_rpc *rpc; unsigned feats; int rc; @@ -1130,14 +1124,16 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, case LST_TRANS_TSBCLIADD: case LST_TRANS_TSBSRVADD: rc = lstcon_testrpc_prep(nd, transop, feats, - (lstcon_test_t *)arg, &rpc); + (struct lstcon_test *)arg, + &rpc); break; case LST_TRANS_TSBRUN: case LST_TRANS_TSBSTOP: case LST_TRANS_TSBCLIQRY: case LST_TRANS_TSBSRVQRY: rc = lstcon_batrpc_prep(nd, transop, feats, - (lstcon_tsb_hdr_t *)arg, &rpc); + (struct lstcon_tsb_hdr *)arg, + &rpc); break; case LST_TRANS_STATQRY: rc = lstcon_statrpc_prep(nd, feats, &rpc); @@ -1170,17 +1166,18 @@ static void lstcon_rpc_pinger(void *arg) { struct stt_timer *ptimer = (struct stt_timer *)arg; - lstcon_rpc_trans_t *trans; - lstcon_rpc_t *crpc; - srpc_msg_t *rep; - srpc_debug_reqst_t *drq; - lstcon_ndlink_t *ndl; - lstcon_node_t *nd; + struct lstcon_rpc_trans *trans; + struct lstcon_rpc *crpc; + struct srpc_msg *rep; + struct srpc_debug_reqst *drq; + struct lstcon_ndlink *ndl; + struct lstcon_node *nd; int intv; int count = 0; int rc; - /* RPC pinger is a special case of transaction, + /* + * RPC pinger is a special case of transaction, * it's called by timer at 8 seconds interval. */ mutex_lock(&console_session.ses_mutex); @@ -1326,9 +1323,9 @@ lstcon_rpc_pinger_stop(void) void lstcon_rpc_cleanup_wait(void) { - lstcon_rpc_trans_t *trans; - lstcon_rpc_t *crpc; - lstcon_rpc_t *temp; + struct lstcon_rpc_trans *trans; + struct lstcon_rpc *crpc; + struct lstcon_rpc *temp; struct list_head *pacer; struct list_head zlist; @@ -1338,7 +1335,7 @@ lstcon_rpc_cleanup_wait(void) while (!list_empty(&console_session.ses_trans_list)) { list_for_each(pacer, &console_session.ses_trans_list) { - trans = list_entry(pacer, lstcon_rpc_trans_t, + trans = list_entry(pacer, struct lstcon_rpc_trans, tas_link); CDEBUG(D_NET, "Session closed, wakeup transaction %s\n", @@ -1370,7 +1367,7 @@ lstcon_rpc_cleanup_wait(void) list_for_each_entry_safe(crpc, temp, &zlist, crp_link) { list_del(&crpc->crp_link); - LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t)); + LIBCFS_FREE(crpc, sizeof(struct lstcon_rpc)); } } diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h index 3e7839dad5bb..7ec6fc96959e 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.h +++ b/drivers/staging/lustre/lnet/selftest/conrpc.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -63,9 +59,9 @@ struct lstcon_tsb_hdr; struct lstcon_test; struct lstcon_node; -typedef struct lstcon_rpc { +struct lstcon_rpc { struct list_head crp_link; /* chain on rpc transaction */ - srpc_client_rpc_t *crp_rpc; /* client rpc */ + struct srpc_client_rpc *crp_rpc; /* client rpc */ struct lstcon_node *crp_node; /* destination node */ struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */ @@ -76,9 +72,9 @@ typedef struct lstcon_rpc { unsigned int crp_embedded:1; int crp_status; /* console rpc errors */ unsigned long crp_stamp; /* replied time stamp */ -} lstcon_rpc_t; +}; -typedef struct lstcon_rpc_trans { +struct lstcon_rpc_trans { struct list_head tas_olink; /* link chain on owner list */ struct list_head tas_link; /* link chain on global list */ int tas_opc; /* operation code of transaction */ @@ -87,7 +83,7 @@ typedef struct lstcon_rpc_trans { wait_queue_head_t tas_waitq; /* wait queue head */ atomic_t tas_remaining; /* # of un-scheduled rpcs */ struct list_head tas_rpcs_list; /* queued requests */ -} lstcon_rpc_trans_t; +}; #define LST_TRANS_PRIVATE 0x1000 @@ -106,35 +102,35 @@ typedef struct lstcon_rpc_trans { #define LST_TRANS_STATQRY 0x21 typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *); -typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, +typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *, lstcon_rpc_ent_t __user *); int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, - unsigned version, lstcon_rpc_t **crpc); + unsigned version, struct lstcon_rpc **crpc); int lstcon_dbgrpc_prep(struct lstcon_node *nd, - unsigned version, lstcon_rpc_t **crpc); + unsigned version, struct lstcon_rpc **crpc); int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version, - struct lstcon_tsb_hdr *tsb, lstcon_rpc_t **crpc); + struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc); int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version, - struct lstcon_test *test, lstcon_rpc_t **crpc); + struct lstcon_test *test, struct lstcon_rpc **crpc); int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version, - lstcon_rpc_t **crpc); -void lstcon_rpc_put(lstcon_rpc_t *crpc); + struct lstcon_rpc **crpc); +void lstcon_rpc_put(struct lstcon_rpc *crpc); int lstcon_rpc_trans_prep(struct list_head *translist, - int transop, lstcon_rpc_trans_t **transpp); + int transop, struct lstcon_rpc_trans **transpp); int lstcon_rpc_trans_ndlist(struct list_head *ndlist, struct list_head *translist, int transop, void *arg, lstcon_rpc_cond_func_t condition, - lstcon_rpc_trans_t **transpp); -void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, + struct lstcon_rpc_trans **transpp); +void lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, lstcon_trans_stat_t *stat); -int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, +int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans, struct list_head __user *head_up, lstcon_rpc_readent_func_t readent); -void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error); -void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans); -void lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *req); -int lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout); +void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error); +void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans); +void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req); +int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout); int lstcon_rpc_pinger_start(void); void lstcon_rpc_pinger_stop(void); void lstcon_rpc_cleanup_wait(void); diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c index 1a923ea3a755..4c33621f06da 100644 --- a/drivers/staging/lustre/lnet/selftest/console.c +++ b/drivers/staging/lustre/lnet/selftest/console.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -61,7 +57,7 @@ do { \ struct lstcon_session console_session; static void -lstcon_node_get(lstcon_node_t *nd) +lstcon_node_get(struct lstcon_node *nd) { LASSERT(nd->nd_ref >= 1); @@ -69,9 +65,9 @@ lstcon_node_get(lstcon_node_t *nd) } static int -lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) +lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create) { - lstcon_ndlink_t *ndl; + struct lstcon_ndlink *ndl; unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE; LASSERT(id.nid != LNET_NID_ANY); @@ -90,11 +86,11 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) if (!create) return -ENOENT; - LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t)); + LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink)); if (!*ndpp) return -ENOMEM; - ndl = (lstcon_ndlink_t *)(*ndpp + 1); + ndl = (struct lstcon_ndlink *)(*ndpp + 1); ndl->ndl_node = *ndpp; @@ -103,7 +99,7 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) ndl->ndl_node->nd_stamp = cfs_time_current(); ndl->ndl_node->nd_state = LST_NODE_UNKNOWN; ndl->ndl_node->nd_timeout = 0; - memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t)); + memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc)); /* * queued in global hash & list, no refcount is taken by @@ -117,16 +113,16 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) } static void -lstcon_node_put(lstcon_node_t *nd) +lstcon_node_put(struct lstcon_node *nd) { - lstcon_ndlink_t *ndl; + struct lstcon_ndlink *ndl; LASSERT(nd->nd_ref > 0); if (--nd->nd_ref > 0) return; - ndl = (lstcon_ndlink_t *)(nd + 1); + ndl = (struct lstcon_ndlink *)(nd + 1); LASSERT(!list_empty(&ndl->ndl_link)); LASSERT(!list_empty(&ndl->ndl_hlink)); @@ -135,16 +131,16 @@ lstcon_node_put(lstcon_node_t *nd) list_del(&ndl->ndl_link); list_del(&ndl->ndl_hlink); - LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t)); + LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink)); } static int lstcon_ndlink_find(struct list_head *hash, - lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create) + lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create) { unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; - lstcon_ndlink_t *ndl; - lstcon_node_t *nd; + struct lstcon_ndlink *ndl; + struct lstcon_node *nd; int rc; if (id.nid == LNET_NID_ANY) @@ -168,7 +164,7 @@ lstcon_ndlink_find(struct list_head *hash, if (rc) return rc; - LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t)); + LIBCFS_ALLOC(ndl, sizeof(struct lstcon_ndlink)); if (!ndl) { lstcon_node_put(nd); return -ENOMEM; @@ -184,7 +180,7 @@ lstcon_ndlink_find(struct list_head *hash, } static void -lstcon_ndlink_release(lstcon_ndlink_t *ndl) +lstcon_ndlink_release(struct lstcon_ndlink *ndl) { LASSERT(list_empty(&ndl->ndl_link)); LASSERT(!list_empty(&ndl->ndl_hlink)); @@ -196,12 +192,12 @@ lstcon_ndlink_release(lstcon_ndlink_t *ndl) } static int -lstcon_group_alloc(char *name, lstcon_group_t **grpp) +lstcon_group_alloc(char *name, struct lstcon_group **grpp) { - lstcon_group_t *grp; + struct lstcon_group *grp; int i; - LIBCFS_ALLOC(grp, offsetof(lstcon_group_t, + LIBCFS_ALLOC(grp, offsetof(struct lstcon_group, grp_ndl_hash[LST_NODE_HASHSIZE])); if (!grp) return -ENOMEM; @@ -209,7 +205,7 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp) grp->grp_ref = 1; if (name) { if (strlen(name) > sizeof(grp->grp_name) - 1) { - LIBCFS_FREE(grp, offsetof(lstcon_group_t, + LIBCFS_FREE(grp, offsetof(struct lstcon_group, grp_ndl_hash[LST_NODE_HASHSIZE])); return -E2BIG; } @@ -229,18 +225,18 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp) } static void -lstcon_group_addref(lstcon_group_t *grp) +lstcon_group_addref(struct lstcon_group *grp) { grp->grp_ref++; } -static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *); +static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *); static void -lstcon_group_drain(lstcon_group_t *grp, int keep) +lstcon_group_drain(struct lstcon_group *grp, int keep) { - lstcon_ndlink_t *ndl; - lstcon_ndlink_t *tmp; + struct lstcon_ndlink *ndl; + struct lstcon_ndlink *tmp; list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) { if (!(ndl->ndl_node->nd_state & keep)) @@ -249,7 +245,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep) } static void -lstcon_group_decref(lstcon_group_t *grp) +lstcon_group_decref(struct lstcon_group *grp) { int i; @@ -264,20 +260,20 @@ lstcon_group_decref(lstcon_group_t *grp) for (i = 0; i < LST_NODE_HASHSIZE; i++) LASSERT(list_empty(&grp->grp_ndl_hash[i])); - LIBCFS_FREE(grp, offsetof(lstcon_group_t, + LIBCFS_FREE(grp, offsetof(struct lstcon_group, grp_ndl_hash[LST_NODE_HASHSIZE])); } static int -lstcon_group_find(const char *name, lstcon_group_t **grpp) +lstcon_group_find(const char *name, struct lstcon_group **grpp) { - lstcon_group_t *grp; + struct lstcon_group *grp; list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { if (strncmp(grp->grp_name, name, LST_NAME_SIZE)) continue; - lstcon_group_addref(grp); /* +1 ref for caller */ + lstcon_group_addref(grp); /* +1 ref for caller */ *grpp = grp; return 0; } @@ -286,8 +282,8 @@ lstcon_group_find(const char *name, lstcon_group_t **grpp) } static int -lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id, - lstcon_ndlink_t **ndlpp, int create) +lstcon_group_ndlink_find(struct lstcon_group *grp, lnet_process_id_t id, + struct lstcon_ndlink **ndlpp, int create) { int rc; @@ -305,7 +301,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id, } static void -lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl) +lstcon_group_ndlink_release(struct lstcon_group *grp, struct lstcon_ndlink *ndl) { list_del_init(&ndl->ndl_link); lstcon_ndlink_release(ndl); @@ -313,8 +309,8 @@ lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl) } static void -lstcon_group_ndlink_move(lstcon_group_t *old, - lstcon_group_t *new, lstcon_ndlink_t *ndl) +lstcon_group_ndlink_move(struct lstcon_group *old, + struct lstcon_group *new, struct lstcon_ndlink *ndl) { unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) % LST_NODE_HASHSIZE; @@ -329,21 +325,21 @@ lstcon_group_ndlink_move(lstcon_group_t *old, } static void -lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new) +lstcon_group_move(struct lstcon_group *old, struct lstcon_group *new) { - lstcon_ndlink_t *ndl; + struct lstcon_ndlink *ndl; while (!list_empty(&old->grp_ndl_list)) { ndl = list_entry(old->grp_ndl_list.next, - lstcon_ndlink_t, ndl_link); + struct lstcon_ndlink, ndl_link); lstcon_group_ndlink_move(old, new, ndl); } } static int -lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg) +lstcon_sesrpc_condition(int transop, struct lstcon_node *nd, void *arg) { - lstcon_group_t *grp = (lstcon_group_t *)arg; + struct lstcon_group *grp = (struct lstcon_group *)arg; switch (transop) { case LST_TRANS_SESNEW: @@ -370,10 +366,10 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg) } static int -lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, +lstcon_sesrpc_readent(int transop, struct srpc_msg *msg, lstcon_rpc_ent_t __user *ent_up) { - srpc_debug_reply_t *rep; + struct srpc_debug_reply *rep; switch (transop) { case LST_TRANS_SESNEW: @@ -399,13 +395,13 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, } static int -lstcon_group_nodes_add(lstcon_group_t *grp, +lstcon_group_nodes_add(struct lstcon_group *grp, int count, lnet_process_id_t __user *ids_up, unsigned *featp, struct list_head __user *result_up) { - lstcon_rpc_trans_t *trans; - lstcon_ndlink_t *ndl; - lstcon_group_t *tmp; + struct lstcon_rpc_trans *trans; + struct lstcon_ndlink *ndl; + struct lstcon_group *tmp; lnet_process_id_t id; int i; int rc; @@ -466,13 +462,13 @@ lstcon_group_nodes_add(lstcon_group_t *grp, } static int -lstcon_group_nodes_remove(lstcon_group_t *grp, +lstcon_group_nodes_remove(struct lstcon_group *grp, int count, lnet_process_id_t __user *ids_up, struct list_head __user *result_up) { - lstcon_rpc_trans_t *trans; - lstcon_ndlink_t *ndl; - lstcon_group_t *tmp; + struct lstcon_rpc_trans *trans; + struct lstcon_ndlink *ndl; + struct lstcon_group *tmp; lnet_process_id_t id; int rc; int i; @@ -523,7 +519,7 @@ error: int lstcon_group_add(char *name) { - lstcon_group_t *grp; + struct lstcon_group *grp; int rc; rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST; @@ -548,7 +544,7 @@ int lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up, unsigned *featp, struct list_head __user *result_up) { - lstcon_group_t *grp; + struct lstcon_group *grp; int rc; LASSERT(count > 0); @@ -578,8 +574,8 @@ lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up, int lstcon_group_del(char *name) { - lstcon_rpc_trans_t *trans; - lstcon_group_t *grp; + struct lstcon_rpc_trans *trans; + struct lstcon_group *grp; int rc; rc = lstcon_group_find(name, &grp); @@ -621,7 +617,7 @@ lstcon_group_del(char *name) int lstcon_group_clean(char *name, int args) { - lstcon_group_t *grp = NULL; + struct lstcon_group *grp = NULL; int rc; rc = lstcon_group_find(name, &grp); @@ -654,7 +650,7 @@ int lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up, struct list_head __user *result_up) { - lstcon_group_t *grp = NULL; + struct lstcon_group *grp = NULL; int rc; rc = lstcon_group_find(name, &grp); @@ -683,8 +679,8 @@ lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up, int lstcon_group_refresh(char *name, struct list_head __user *result_up) { - lstcon_rpc_trans_t *trans; - lstcon_group_t *grp; + struct lstcon_rpc_trans *trans; + struct lstcon_group *grp; int rc; rc = lstcon_group_find(name, &grp); @@ -725,7 +721,7 @@ lstcon_group_refresh(char *name, struct list_head __user *result_up) int lstcon_group_list(int index, int len, char __user *name_up) { - lstcon_group_t *grp; + struct lstcon_group *grp; LASSERT(index >= 0); LASSERT(name_up); @@ -733,7 +729,7 @@ lstcon_group_list(int index, int len, char __user *name_up) list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { if (!index--) { return copy_to_user(name_up, grp->grp_name, len) ? - -EFAULT : 0; + -EFAULT : 0; } } @@ -744,8 +740,8 @@ static int lstcon_nodes_getent(struct list_head *head, int *index_p, int *count_p, lstcon_node_ent_t __user *dents_up) { - lstcon_ndlink_t *ndl; - lstcon_node_t *nd; + struct lstcon_ndlink *ndl; + struct lstcon_node *nd; int count = 0; int index = 0; @@ -786,8 +782,8 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p, lstcon_node_ent_t __user *dents_up) { lstcon_ndlist_ent_t *gentp; - lstcon_group_t *grp; - lstcon_ndlink_t *ndl; + struct lstcon_group *grp; + struct lstcon_ndlink *ndl; int rc; rc = lstcon_group_find(name, &grp); @@ -828,9 +824,9 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p, } static int -lstcon_batch_find(const char *name, lstcon_batch_t **batpp) +lstcon_batch_find(const char *name, struct lstcon_batch **batpp) { - lstcon_batch_t *bat; + struct lstcon_batch *bat; list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) { @@ -845,7 +841,7 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp) int lstcon_batch_add(char *name) { - lstcon_batch_t *bat; + struct lstcon_batch *bat; int i; int rc; @@ -855,7 +851,7 @@ lstcon_batch_add(char *name) return rc; } - LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t)); + LIBCFS_ALLOC(bat, sizeof(struct lstcon_batch)); if (!bat) { CERROR("Can't allocate descriptor for batch %s\n", name); return -ENOMEM; @@ -865,7 +861,7 @@ lstcon_batch_add(char *name) sizeof(struct list_head) * LST_NODE_HASHSIZE); if (!bat->bat_cli_hash) { CERROR("Can't allocate hash for batch %s\n", name); - LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); + LIBCFS_FREE(bat, sizeof(struct lstcon_batch)); return -ENOMEM; } @@ -875,7 +871,7 @@ lstcon_batch_add(char *name) if (!bat->bat_srv_hash) { CERROR("Can't allocate hash for batch %s\n", name); LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); - LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); + LIBCFS_FREE(bat, sizeof(struct lstcon_batch)); return -ENOMEM; } @@ -883,7 +879,7 @@ lstcon_batch_add(char *name) if (strlen(name) > sizeof(bat->bat_name) - 1) { LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE); LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); - LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); + LIBCFS_FREE(bat, sizeof(struct lstcon_batch)); return -E2BIG; } strncpy(bat->bat_name, name, sizeof(bat->bat_name)); @@ -911,7 +907,7 @@ lstcon_batch_add(char *name) int lstcon_batch_list(int index, int len, char __user *name_up) { - lstcon_batch_t *bat; + struct lstcon_batch *bat; LASSERT(name_up); LASSERT(index >= 0); @@ -934,9 +930,9 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up, lstcon_test_batch_ent_t *entp; struct list_head *clilst; struct list_head *srvlst; - lstcon_test_t *test = NULL; - lstcon_batch_t *bat; - lstcon_ndlink_t *ndl; + struct lstcon_test *test = NULL; + struct lstcon_batch *bat; + struct lstcon_ndlink *ndl; int rc; rc = lstcon_batch_find(name, &bat); @@ -977,7 +973,6 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up, if (!test) { entp->u.tbe_batch.bae_ntest = bat->bat_ntest; entp->u.tbe_batch.bae_state = bat->bat_state; - } else { entp->u.tbe_test.tse_type = test->tes_type; entp->u.tbe_test.tse_loop = test->tes_loop; @@ -999,7 +994,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up, } static int -lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg) +lstcon_batrpc_condition(int transop, struct lstcon_node *nd, void *arg) { switch (transop) { case LST_TRANS_TSBRUN: @@ -1021,10 +1016,10 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg) } static int -lstcon_batch_op(lstcon_batch_t *bat, int transop, +lstcon_batch_op(struct lstcon_batch *bat, int transop, struct list_head __user *result_up) { - lstcon_rpc_trans_t *trans; + struct lstcon_rpc_trans *trans; int rc; rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list, @@ -1047,7 +1042,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop, int lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up) { - lstcon_batch_t *bat; + struct lstcon_batch *bat; int rc; if (lstcon_batch_find(name, &bat)) { @@ -1069,7 +1064,7 @@ lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up) int lstcon_batch_stop(char *name, int force, struct list_head __user *result_up) { - lstcon_batch_t *bat; + struct lstcon_batch *bat; int rc; if (lstcon_batch_find(name, &bat)) { @@ -1089,17 +1084,17 @@ lstcon_batch_stop(char *name, int force, struct list_head __user *result_up) } static void -lstcon_batch_destroy(lstcon_batch_t *bat) +lstcon_batch_destroy(struct lstcon_batch *bat) { - lstcon_ndlink_t *ndl; - lstcon_test_t *test; + struct lstcon_ndlink *ndl; + struct lstcon_test *test; int i; list_del(&bat->bat_link); while (!list_empty(&bat->bat_test_list)) { test = list_entry(bat->bat_test_list.next, - lstcon_test_t, tes_link); + struct lstcon_test, tes_link); LASSERT(list_empty(&test->tes_trans_list)); list_del(&test->tes_link); @@ -1107,7 +1102,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat) lstcon_group_decref(test->tes_src_grp); lstcon_group_decref(test->tes_dst_grp); - LIBCFS_FREE(test, offsetof(lstcon_test_t, + LIBCFS_FREE(test, offsetof(struct lstcon_test, tes_param[test->tes_paramlen])); } @@ -1115,7 +1110,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat) while (!list_empty(&bat->bat_cli_list)) { ndl = list_entry(bat->bat_cli_list.next, - lstcon_ndlink_t, ndl_link); + struct lstcon_ndlink, ndl_link); list_del_init(&ndl->ndl_link); lstcon_ndlink_release(ndl); @@ -1123,7 +1118,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat) while (!list_empty(&bat->bat_srv_list)) { ndl = list_entry(bat->bat_srv_list.next, - lstcon_ndlink_t, ndl_link); + struct lstcon_ndlink, ndl_link); list_del_init(&ndl->ndl_link); lstcon_ndlink_release(ndl); @@ -1138,19 +1133,19 @@ lstcon_batch_destroy(lstcon_batch_t *bat) sizeof(struct list_head) * LST_NODE_HASHSIZE); LIBCFS_FREE(bat->bat_srv_hash, sizeof(struct list_head) * LST_NODE_HASHSIZE); - LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); + LIBCFS_FREE(bat, sizeof(struct lstcon_batch)); } static int -lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) +lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg) { - lstcon_test_t *test; - lstcon_batch_t *batch; - lstcon_ndlink_t *ndl; + struct lstcon_test *test; + struct lstcon_batch *batch; + struct lstcon_ndlink *ndl; struct list_head *hash; struct list_head *head; - test = (lstcon_test_t *)arg; + test = (struct lstcon_test *)arg; LASSERT(test); batch = test->tes_batch; @@ -1186,10 +1181,10 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) } static int -lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up) +lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up) { - lstcon_rpc_trans_t *trans; - lstcon_group_t *grp; + struct lstcon_rpc_trans *trans; + struct lstcon_group *grp; int transop; int rc; @@ -1237,7 +1232,7 @@ again: } static int -lstcon_verify_batch(const char *name, lstcon_batch_t **batch) +lstcon_verify_batch(const char *name, struct lstcon_batch **batch) { int rc; @@ -1256,10 +1251,10 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch) } static int -lstcon_verify_group(const char *name, lstcon_group_t **grp) +lstcon_verify_group(const char *name, struct lstcon_group **grp) { int rc; - lstcon_ndlink_t *ndl; + struct lstcon_ndlink *ndl; rc = lstcon_group_find(name, grp); if (rc) { @@ -1284,11 +1279,11 @@ lstcon_test_add(char *batch_name, int type, int loop, void *param, int paramlen, int *retp, struct list_head __user *result_up) { - lstcon_test_t *test = NULL; + struct lstcon_test *test = NULL; int rc; - lstcon_group_t *src_grp = NULL; - lstcon_group_t *dst_grp = NULL; - lstcon_batch_t *batch = NULL; + struct lstcon_group *src_grp = NULL; + struct lstcon_group *dst_grp = NULL; + struct lstcon_batch *batch = NULL; /* * verify that a batch of the given name exists, and the groups @@ -1310,7 +1305,7 @@ lstcon_test_add(char *batch_name, int type, int loop, if (dst_grp->grp_userland) *retp = 1; - LIBCFS_ALLOC(test, offsetof(lstcon_test_t, tes_param[paramlen])); + LIBCFS_ALLOC(test, offsetof(struct lstcon_test, tes_param[paramlen])); if (!test) { CERROR("Can't allocate test descriptor\n"); rc = -ENOMEM; @@ -1357,7 +1352,7 @@ lstcon_test_add(char *batch_name, int type, int loop, return rc; out: if (test) - LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen])); + LIBCFS_FREE(test, offsetof(struct lstcon_test, tes_param[paramlen])); if (dst_grp) lstcon_group_decref(dst_grp); @@ -1369,9 +1364,9 @@ out: } static int -lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp) +lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp) { - lstcon_test_t *test; + struct lstcon_test *test; list_for_each_entry(test, &batch->bat_test_list, tes_link) { if (idx == test->tes_hdr.tsb_index) { @@ -1384,10 +1379,10 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp) } static int -lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg, +lstcon_tsbrpc_readent(int transop, struct srpc_msg *msg, lstcon_rpc_ent_t __user *ent_up) { - srpc_batch_reply_t *rep = &msg->msg_body.bat_reply; + struct srpc_batch_reply *rep = &msg->msg_body.bat_reply; LASSERT(transop == LST_TRANS_TSBCLIQRY || transop == LST_TRANS_TSBSRVQRY); @@ -1404,12 +1399,12 @@ int lstcon_test_batch_query(char *name, int testidx, int client, int timeout, struct list_head __user *result_up) { - lstcon_rpc_trans_t *trans; + struct lstcon_rpc_trans *trans; struct list_head *translist; struct list_head *ndlist; - lstcon_tsb_hdr_t *hdr; - lstcon_batch_t *batch; - lstcon_test_t *test = NULL; + struct lstcon_tsb_hdr *hdr; + struct lstcon_batch *batch; + struct lstcon_test *test = NULL; int transop; int rc; @@ -1423,7 +1418,6 @@ lstcon_test_batch_query(char *name, int testidx, int client, translist = &batch->bat_trans_list; ndlist = &batch->bat_cli_list; hdr = &batch->bat_hdr; - } else { /* query specified test only */ rc = lstcon_test_find(batch, testidx, &test); @@ -1448,7 +1442,8 @@ lstcon_test_batch_query(char *name, int testidx, int client, lstcon_rpc_trans_postwait(trans, timeout); - if (!testidx && /* query a batch, not a test */ + /* query a batch, not a test */ + if (!testidx && !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) && !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) { /* all RPCs finished, and no active test */ @@ -1463,10 +1458,10 @@ lstcon_test_batch_query(char *name, int testidx, int client, } static int -lstcon_statrpc_readent(int transop, srpc_msg_t *msg, +lstcon_statrpc_readent(int transop, struct srpc_msg *msg, lstcon_rpc_ent_t __user *ent_up) { - srpc_stat_reply_t *rep = &msg->msg_body.stat_reply; + struct srpc_stat_reply *rep = &msg->msg_body.stat_reply; sfw_counters_t __user *sfwk_stat; srpc_counters_t __user *srpc_stat; lnet_counters_t __user *lnet_stat; @@ -1491,7 +1486,7 @@ lstcon_ndlist_stat(struct list_head *ndlist, int timeout, struct list_head __user *result_up) { struct list_head head; - lstcon_rpc_trans_t *trans; + struct lstcon_rpc_trans *trans; int rc; INIT_LIST_HEAD(&head); @@ -1516,7 +1511,7 @@ int lstcon_group_stat(char *grp_name, int timeout, struct list_head __user *result_up) { - lstcon_group_t *grp; + struct lstcon_group *grp; int rc; rc = lstcon_group_find(grp_name, &grp); @@ -1536,8 +1531,8 @@ int lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up, int timeout, struct list_head __user *result_up) { - lstcon_ndlink_t *ndl; - lstcon_group_t *tmp; + struct lstcon_ndlink *ndl; + struct lstcon_group *tmp; lnet_process_id_t id; int i; int rc; @@ -1581,7 +1576,7 @@ lstcon_debug_ndlist(struct list_head *ndlist, struct list_head *translist, int timeout, struct list_head __user *result_up) { - lstcon_rpc_trans_t *trans; + struct lstcon_rpc_trans *trans; int rc; rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY, @@ -1611,7 +1606,7 @@ int lstcon_batch_debug(int timeout, char *name, int client, struct list_head __user *result_up) { - lstcon_batch_t *bat; + struct lstcon_batch *bat; int rc; rc = lstcon_batch_find(name, &bat); @@ -1629,7 +1624,7 @@ int lstcon_group_debug(int timeout, char *name, struct list_head __user *result_up) { - lstcon_group_t *grp; + struct lstcon_group *grp; int rc; rc = lstcon_group_find(name, &grp); @@ -1649,8 +1644,8 @@ lstcon_nodes_debug(int timeout, struct list_head __user *result_up) { lnet_process_id_t id; - lstcon_ndlink_t *ndl; - lstcon_group_t *grp; + struct lstcon_ndlink *ndl; + struct lstcon_group *grp; int i; int rc; @@ -1749,7 +1744,7 @@ lstcon_session_new(char *name, int key, unsigned feats, if (strlen(name) > sizeof(console_session.ses_name) - 1) return -E2BIG; - strncpy(console_session.ses_name, name, + strlcpy(console_session.ses_name, name, sizeof(console_session.ses_name)); rc = lstcon_batch_add(LST_DEFAULT_BATCH); @@ -1758,7 +1753,7 @@ lstcon_session_new(char *name, int key, unsigned feats, rc = lstcon_rpc_pinger_start(); if (rc) { - lstcon_batch_t *bat = NULL; + struct lstcon_batch *bat = NULL; lstcon_batch_find(LST_DEFAULT_BATCH, &bat); lstcon_batch_destroy(bat); @@ -1782,7 +1777,7 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up, char __user *name_up, int len) { lstcon_ndlist_ent_t *entp; - lstcon_ndlink_t *ndl; + struct lstcon_ndlink *ndl; int rc = 0; if (console_session.ses_state != LST_SESSION_ACTIVE) @@ -1813,9 +1808,9 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up, int lstcon_session_end(void) { - lstcon_rpc_trans_t *trans; - lstcon_group_t *grp; - lstcon_batch_t *bat; + struct lstcon_rpc_trans *trans; + struct lstcon_group *grp; + struct lstcon_batch *bat; int rc = 0; LASSERT(console_session.ses_state == LST_SESSION_ACTIVE); @@ -1849,7 +1844,7 @@ lstcon_session_end(void) /* destroy all batches */ while (!list_empty(&console_session.ses_bat_list)) { bat = list_entry(console_session.ses_bat_list.next, - lstcon_batch_t, bat_link); + struct lstcon_batch, bat_link); lstcon_batch_destroy(bat); } @@ -1857,7 +1852,7 @@ lstcon_session_end(void) /* destroy all groups */ while (!list_empty(&console_session.ses_grp_list)) { grp = list_entry(console_session.ses_grp_list.next, - lstcon_group_t, grp_link); + struct lstcon_group, grp_link); LASSERT(grp->grp_ref == 1); lstcon_group_decref(grp); @@ -1906,12 +1901,12 @@ lstcon_session_feats_check(unsigned feats) static int lstcon_acceptor_handle(struct srpc_server_rpc *rpc) { - srpc_msg_t *rep = &rpc->srpc_replymsg; - srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg; - srpc_join_reqst_t *jreq = &req->msg_body.join_reqst; - srpc_join_reply_t *jrep = &rep->msg_body.join_reply; - lstcon_group_t *grp = NULL; - lstcon_ndlink_t *ndl; + struct srpc_msg *rep = &rpc->srpc_replymsg; + struct srpc_msg *req = &rpc->srpc_reqstbuf->buf_msg; + struct srpc_join_reqst *jreq = &req->msg_body.join_reqst; + struct srpc_join_reply *jrep = &rep->msg_body.join_reply; + struct lstcon_group *grp = NULL; + struct lstcon_ndlink *ndl; int rc = 0; sfw_unpack_message(req); @@ -1987,7 +1982,8 @@ out: return rc; } -static srpc_service_t lstcon_acceptor_service; +static struct srpc_service lstcon_acceptor_service; + static void lstcon_init_acceptor_service(void) { /* initialize selftest console acceptor service table */ diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h index 554f582441f1..78b147732615 100644 --- a/drivers/staging/lustre/lnet/selftest/console.h +++ b/drivers/staging/lustre/lnet/selftest/console.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -50,22 +46,25 @@ #include "selftest.h" #include "conrpc.h" -typedef struct lstcon_node { +/* node descriptor */ +struct lstcon_node { lnet_process_id_t nd_id; /* id of the node */ int nd_ref; /* reference count */ int nd_state; /* state of the node */ int nd_timeout; /* session timeout */ unsigned long nd_stamp; /* timestamp of last replied RPC */ struct lstcon_rpc nd_ping; /* ping rpc */ -} lstcon_node_t; /* node descriptor */ +}; -typedef struct { +/* node link descriptor */ +struct lstcon_ndlink { struct list_head ndl_link; /* chain on list */ struct list_head ndl_hlink; /* chain on hash */ - lstcon_node_t *ndl_node; /* pointer to node */ -} lstcon_ndlink_t; /* node link descriptor */ + struct lstcon_node *ndl_node; /* pointer to node */ +}; -typedef struct { +/* (alias of nodes) group descriptor */ +struct lstcon_group { struct list_head grp_link; /* chain on global group list */ int grp_ref; /* reference count */ @@ -76,18 +75,19 @@ typedef struct { struct list_head grp_trans_list; /* transaction list */ struct list_head grp_ndl_list; /* nodes list */ struct list_head grp_ndl_hash[0]; /* hash table for nodes */ -} lstcon_group_t; /* (alias of nodes) group descriptor */ +}; #define LST_BATCH_IDLE 0xB0 /* idle batch */ #define LST_BATCH_RUNNING 0xB1 /* running batch */ -typedef struct lstcon_tsb_hdr { +struct lstcon_tsb_hdr { lst_bid_t tsb_id; /* batch ID */ int tsb_index; /* test index */ -} lstcon_tsb_hdr_t; +}; -typedef struct { - lstcon_tsb_hdr_t bat_hdr; /* test_batch header */ +/* (tests ) batch descriptor */ +struct lstcon_batch { + struct lstcon_tsb_hdr bat_hdr; /* test_batch header */ struct list_head bat_link; /* chain on session's batches list */ int bat_ntest; /* # of test */ int bat_state; /* state of the batch */ @@ -95,20 +95,21 @@ typedef struct { * for run, force for stop */ char bat_name[LST_NAME_SIZE];/* name of batch */ - struct list_head bat_test_list; /* list head of tests (lstcon_test_t) + struct list_head bat_test_list; /* list head of tests (struct lstcon_test) */ struct list_head bat_trans_list; /* list head of transaction */ struct list_head bat_cli_list; /* list head of client nodes - * (lstcon_node_t) */ + * (struct lstcon_node) */ struct list_head *bat_cli_hash; /* hash table of client nodes */ struct list_head bat_srv_list; /* list head of server nodes */ struct list_head *bat_srv_hash; /* hash table of server nodes */ -} lstcon_batch_t; /* (tests ) batch descriptor */ +}; -typedef struct lstcon_test { - lstcon_tsb_hdr_t tes_hdr; /* test batch header */ +/* a single test descriptor */ +struct lstcon_test { + struct lstcon_tsb_hdr tes_hdr; /* test batch header */ struct list_head tes_link; /* chain on batch's tests list */ - lstcon_batch_t *tes_batch; /* pointer to batch */ + struct lstcon_batch *tes_batch; /* pointer to batch */ int tes_type; /* type of the test, i.e: bulk, ping */ int tes_stop_onerr; /* stop on error */ @@ -120,12 +121,12 @@ typedef struct lstcon_test { int tes_cliidx; /* client index, used for RPC creating */ struct list_head tes_trans_list; /* transaction list */ - lstcon_group_t *tes_src_grp; /* group run the test */ - lstcon_group_t *tes_dst_grp; /* target group */ + struct lstcon_group *tes_src_grp; /* group run the test */ + struct lstcon_group *tes_dst_grp; /* target group */ int tes_paramlen; /* test parameter length */ char tes_param[0]; /* test parameter */ -} lstcon_test_t; /* a single test descriptor */ +}; #define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */ #define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */ @@ -152,7 +153,7 @@ struct lstcon_session { unsigned ses_expired:1; /* console is timedout */ __u64 ses_id_cookie; /* batch id cookie */ char ses_name[LST_NAME_SIZE];/* session name */ - lstcon_rpc_trans_t *ses_ping; /* session pinger */ + struct lstcon_rpc_trans *ses_ping; /* session pinger */ struct stt_timer ses_ping_timer; /* timer for pinger */ lstcon_trans_stat_t ses_trans_stat; /* transaction stats */ diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c index e2c532399366..c2f121f44d33 100644 --- a/drivers/staging/lustre/lnet/selftest/framework.c +++ b/drivers/staging/lustre/lnet/selftest/framework.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -109,19 +105,19 @@ static struct smoketest_framework { struct list_head fw_tests; /* registered test cases */ atomic_t fw_nzombies; /* # zombie sessions */ spinlock_t fw_lock; /* serialise */ - sfw_session_t *fw_session; /* _the_ session */ + struct sfw_session *fw_session; /* _the_ session */ int fw_shuttingdown; /* shutdown in progress */ struct srpc_server_rpc *fw_active_srpc;/* running RPC */ } sfw_data; /* forward ref's */ -int sfw_stop_batch(sfw_batch_t *tsb, int force); -void sfw_destroy_session(sfw_session_t *sn); +int sfw_stop_batch(struct sfw_batch *tsb, int force); +void sfw_destroy_session(struct sfw_session *sn); -static inline sfw_test_case_t * +static inline struct sfw_test_case * sfw_find_test_case(int id) { - sfw_test_case_t *tsc; + struct sfw_test_case *tsc; LASSERT(id <= SRPC_SERVICE_MAX_ID); LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID); @@ -135,9 +131,9 @@ sfw_find_test_case(int id) } static int -sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops) +sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops) { - sfw_test_case_t *tsc; + struct sfw_test_case *tsc; if (sfw_find_test_case(service->sv_id)) { CERROR("Failed to register test %s (%d)\n", @@ -145,7 +141,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops) return -EEXIST; } - LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t)); + LIBCFS_ALLOC(tsc, sizeof(struct sfw_test_case)); if (!tsc) return -ENOMEM; @@ -159,7 +155,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops) static void sfw_add_session_timer(void) { - sfw_session_t *sn = sfw_data.fw_session; + struct sfw_session *sn = sfw_data.fw_session; struct stt_timer *timer = &sn->sn_timer; LASSERT(!sfw_data.fw_shuttingdown); @@ -177,7 +173,7 @@ sfw_add_session_timer(void) static int sfw_del_session_timer(void) { - sfw_session_t *sn = sfw_data.fw_session; + struct sfw_session *sn = sfw_data.fw_session; if (!sn || !sn->sn_timer_active) return 0; @@ -196,10 +192,10 @@ static void sfw_deactivate_session(void) __must_hold(&sfw_data.fw_lock) { - sfw_session_t *sn = sfw_data.fw_session; + struct sfw_session *sn = sfw_data.fw_session; int nactive = 0; - sfw_batch_t *tsb; - sfw_test_case_t *tsc; + struct sfw_batch *tsb; + struct sfw_test_case *tsc; if (!sn) return; @@ -226,7 +222,7 @@ __must_hold(&sfw_data.fw_lock) } if (nactive) - return; /* wait for active batches to stop */ + return; /* wait for active batches to stop */ list_del_init(&sn->sn_list); spin_unlock(&sfw_data.fw_lock); @@ -239,7 +235,7 @@ __must_hold(&sfw_data.fw_lock) static void sfw_session_expired(void *data) { - sfw_session_t *sn = data; + struct sfw_session *sn = data; spin_lock(&sfw_data.fw_lock); @@ -257,12 +253,12 @@ sfw_session_expired(void *data) } static inline void -sfw_init_session(sfw_session_t *sn, lst_sid_t sid, +sfw_init_session(struct sfw_session *sn, lst_sid_t sid, unsigned features, const char *name) { struct stt_timer *timer = &sn->sn_timer; - memset(sn, 0, sizeof(sfw_session_t)); + memset(sn, 0, sizeof(struct sfw_session)); INIT_LIST_HEAD(&sn->sn_list); INIT_LIST_HEAD(&sn->sn_batches); atomic_set(&sn->sn_refcount, 1); /* +1 for caller */ @@ -298,7 +294,7 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc) } static void -sfw_client_rpc_fini(srpc_client_rpc_t *rpc) +sfw_client_rpc_fini(struct srpc_client_rpc *rpc) { LASSERT(!rpc->crpc_bulk.bk_niov); LASSERT(list_empty(&rpc->crpc_list)); @@ -318,11 +314,11 @@ sfw_client_rpc_fini(srpc_client_rpc_t *rpc) spin_unlock(&sfw_data.fw_lock); } -static sfw_batch_t * +static struct sfw_batch * sfw_find_batch(lst_bid_t bid) { - sfw_session_t *sn = sfw_data.fw_session; - sfw_batch_t *bat; + struct sfw_session *sn = sfw_data.fw_session; + struct sfw_batch *bat; LASSERT(sn); @@ -334,11 +330,11 @@ sfw_find_batch(lst_bid_t bid) return NULL; } -static sfw_batch_t * +static struct sfw_batch * sfw_bid2batch(lst_bid_t bid) { - sfw_session_t *sn = sfw_data.fw_session; - sfw_batch_t *bat; + struct sfw_session *sn = sfw_data.fw_session; + struct sfw_batch *bat; LASSERT(sn); @@ -346,7 +342,7 @@ sfw_bid2batch(lst_bid_t bid) if (bat) return bat; - LIBCFS_ALLOC(bat, sizeof(sfw_batch_t)); + LIBCFS_ALLOC(bat, sizeof(struct sfw_batch)); if (!bat) return NULL; @@ -361,11 +357,11 @@ sfw_bid2batch(lst_bid_t bid) } static int -sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) +sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply) { - sfw_session_t *sn = sfw_data.fw_session; + struct sfw_session *sn = sfw_data.fw_session; sfw_counters_t *cnt = &reply->str_fw; - sfw_batch_t *bat; + struct sfw_batch *bat; reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id; @@ -402,10 +398,10 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) } int -sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) +sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply) { - sfw_session_t *sn = sfw_data.fw_session; - srpc_msg_t *msg = container_of(request, srpc_msg_t, + struct sfw_session *sn = sfw_data.fw_session; + struct srpc_msg *msg = container_of(request, struct srpc_msg, msg_body.mksn_reqst); int cplen = 0; @@ -438,7 +434,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) /* * reject the request if it requires unknown features * NB: old version will always accept all features because it's not - * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also + * aware of srpc_msg::msg_ses_feats, it's a defect but it's also * harmless because it will return zero feature to console, and it's * console's responsibility to make sure all nodes in a session have * same feature mask. @@ -449,7 +445,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) } /* brand new or create by force */ - LIBCFS_ALLOC(sn, sizeof(sfw_session_t)); + LIBCFS_ALLOC(sn, sizeof(struct sfw_session)); if (!sn) { CERROR("dropping RPC mksn under memory pressure\n"); return -ENOMEM; @@ -473,9 +469,9 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) } static int -sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply) +sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply) { - sfw_session_t *sn = sfw_data.fw_session; + struct sfw_session *sn = sfw_data.fw_session; reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id; @@ -505,9 +501,9 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply) } static int -sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply) +sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply) { - sfw_session_t *sn = sfw_data.fw_session; + struct sfw_session *sn = sfw_data.fw_session; if (!sn) { reply->dbg_status = ESRCH; @@ -526,10 +522,10 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply) } static void -sfw_test_rpc_fini(srpc_client_rpc_t *rpc) +sfw_test_rpc_fini(struct srpc_client_rpc *rpc) { - sfw_test_unit_t *tsu = rpc->crpc_priv; - sfw_test_instance_t *tsi = tsu->tsu_instance; + struct sfw_test_unit *tsu = rpc->crpc_priv; + struct sfw_test_instance *tsi = tsu->tsu_instance; /* Called with hold of tsi->tsi_lock */ LASSERT(list_empty(&rpc->crpc_list)); @@ -537,7 +533,7 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc) } static inline int -sfw_test_buffers(sfw_test_instance_t *tsi) +sfw_test_buffers(struct sfw_test_instance *tsi) { struct sfw_test_case *tsc; struct srpc_service *svc; @@ -614,10 +610,10 @@ sfw_unload_test(struct sfw_test_instance *tsi) } static void -sfw_destroy_test_instance(sfw_test_instance_t *tsi) +sfw_destroy_test_instance(struct sfw_test_instance *tsi) { - srpc_client_rpc_t *rpc; - sfw_test_unit_t *tsu; + struct srpc_client_rpc *rpc; + struct sfw_test_unit *tsu; if (!tsi->tsi_is_client) goto clean; @@ -630,14 +626,14 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi) while (!list_empty(&tsi->tsi_units)) { tsu = list_entry(tsi->tsi_units.next, - sfw_test_unit_t, tsu_list); + struct sfw_test_unit, tsu_list); list_del(&tsu->tsu_list); LIBCFS_FREE(tsu, sizeof(*tsu)); } while (!list_empty(&tsi->tsi_free_rpcs)) { rpc = list_entry(tsi->tsi_free_rpcs.next, - srpc_client_rpc_t, crpc_list); + struct srpc_client_rpc, crpc_list); list_del(&rpc->crpc_list); LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); } @@ -648,34 +644,34 @@ clean: } static void -sfw_destroy_batch(sfw_batch_t *tsb) +sfw_destroy_batch(struct sfw_batch *tsb) { - sfw_test_instance_t *tsi; + struct sfw_test_instance *tsi; LASSERT(!sfw_batch_active(tsb)); LASSERT(list_empty(&tsb->bat_list)); while (!list_empty(&tsb->bat_tests)) { tsi = list_entry(tsb->bat_tests.next, - sfw_test_instance_t, tsi_list); + struct sfw_test_instance, tsi_list); list_del_init(&tsi->tsi_list); sfw_destroy_test_instance(tsi); } - LIBCFS_FREE(tsb, sizeof(sfw_batch_t)); + LIBCFS_FREE(tsb, sizeof(struct sfw_batch)); } void -sfw_destroy_session(sfw_session_t *sn) +sfw_destroy_session(struct sfw_session *sn) { - sfw_batch_t *batch; + struct sfw_batch *batch; LASSERT(list_empty(&sn->sn_list)); LASSERT(sn != sfw_data.fw_session); while (!list_empty(&sn->sn_batches)) { batch = list_entry(sn->sn_batches.next, - sfw_batch_t, bat_list); + struct sfw_batch, bat_list); list_del_init(&batch->bat_list); sfw_destroy_batch(batch); } @@ -685,28 +681,28 @@ sfw_destroy_session(sfw_session_t *sn) } static void -sfw_unpack_addtest_req(srpc_msg_t *msg) +sfw_unpack_addtest_req(struct srpc_msg *msg) { - srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; + struct srpc_test_reqst *req = &msg->msg_body.tes_reqst; LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST); LASSERT(req->tsr_is_client); if (msg->msg_magic == SRPC_MSG_MAGIC) - return; /* no flipping needed */ + return; /* no flipping needed */ LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC)); if (req->tsr_service == SRPC_SERVICE_BRW) { if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) { - test_bulk_req_t *bulk = &req->tsr_u.bulk_v0; + struct test_bulk_req *bulk = &req->tsr_u.bulk_v0; __swab32s(&bulk->blk_opc); __swab32s(&bulk->blk_npg); __swab32s(&bulk->blk_flags); } else { - test_bulk_req_v1_t *bulk = &req->tsr_u.bulk_v1; + struct test_bulk_req_v1 *bulk = &req->tsr_u.bulk_v1; __swab16s(&bulk->blk_opc); __swab16s(&bulk->blk_flags); @@ -718,7 +714,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg) } if (req->tsr_service == SRPC_SERVICE_PING) { - test_ping_req_t *ping = &req->tsr_u.ping; + struct test_ping_req *ping = &req->tsr_u.ping; __swab32s(&ping->png_size); __swab32s(&ping->png_flags); @@ -729,14 +725,14 @@ sfw_unpack_addtest_req(srpc_msg_t *msg) } static int -sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) +sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc) { - srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg; - srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; - srpc_bulk_t *bk = rpc->srpc_bulk; + struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg; + struct srpc_test_reqst *req = &msg->msg_body.tes_reqst; + struct srpc_bulk *bk = rpc->srpc_bulk; int ndest = req->tsr_ndest; - sfw_test_unit_t *tsu; - sfw_test_instance_t *tsi; + struct sfw_test_unit *tsu; + struct sfw_test_instance *tsi; int i; int rc; @@ -789,13 +785,13 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) int j; dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page); - LASSERT(dests); /* my pages are within KVM always */ + LASSERT(dests); /* my pages are within KVM always */ id = dests[i % SFW_ID_PER_PAGE]; if (msg->msg_magic != SRPC_MSG_MAGIC) sfw_unpack_id(id); for (j = 0; j < tsi->tsi_concur; j++) { - LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t)); + LIBCFS_ALLOC(tsu, sizeof(struct sfw_test_unit)); if (!tsu) { rc = -ENOMEM; CERROR("Can't allocate tsu for %d\n", @@ -824,11 +820,11 @@ error: } static void -sfw_test_unit_done(sfw_test_unit_t *tsu) +sfw_test_unit_done(struct sfw_test_unit *tsu) { - sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_batch_t *tsb = tsi->tsi_batch; - sfw_session_t *sn = tsb->bat_session; + struct sfw_test_instance *tsi = tsu->tsu_instance; + struct sfw_batch *tsb = tsi->tsi_batch; + struct sfw_session *sn = tsb->bat_session; LASSERT(sfw_test_active(tsi)); @@ -844,8 +840,8 @@ sfw_test_unit_done(sfw_test_unit_t *tsu) spin_lock(&sfw_data.fw_lock); - if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */ - sn == sfw_data.fw_session) { /* sn also active */ + if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */ + sn == sfw_data.fw_session) { /* sn also active */ spin_unlock(&sfw_data.fw_lock); return; } @@ -866,10 +862,10 @@ sfw_test_unit_done(sfw_test_unit_t *tsu) } static void -sfw_test_rpc_done(srpc_client_rpc_t *rpc) +sfw_test_rpc_done(struct srpc_client_rpc *rpc) { - sfw_test_unit_t *tsu = rpc->crpc_priv; - sfw_test_instance_t *tsi = tsu->tsu_instance; + struct sfw_test_unit *tsu = rpc->crpc_priv; + struct sfw_test_instance *tsi = tsu->tsu_instance; int done = 0; tsi->tsi_ops->tso_done_rpc(tsu, rpc); @@ -900,19 +896,19 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc) } int -sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, +sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer, unsigned features, int nblk, int blklen, - srpc_client_rpc_t **rpcpp) + struct srpc_client_rpc **rpcpp) { - srpc_client_rpc_t *rpc = NULL; - sfw_test_instance_t *tsi = tsu->tsu_instance; + struct srpc_client_rpc *rpc = NULL; + struct sfw_test_instance *tsi = tsu->tsu_instance; spin_lock(&tsi->tsi_lock); LASSERT(sfw_test_active(tsi)); /* pick request from buffer */ rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs, - srpc_client_rpc_t, crpc_list); + struct srpc_client_rpc, crpc_list); if (rpc) { LASSERT(nblk == rpc->crpc_bulk.bk_niov); list_del_init(&rpc->crpc_list); @@ -942,11 +938,11 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, } static int -sfw_run_test(swi_workitem_t *wi) +sfw_run_test(struct swi_workitem *wi) { - sfw_test_unit_t *tsu = wi->swi_workitem.wi_data; - sfw_test_instance_t *tsi = tsu->tsu_instance; - srpc_client_rpc_t *rpc = NULL; + struct sfw_test_unit *tsu = wi->swi_workitem.wi_data; + struct sfw_test_instance *tsi = tsu->tsu_instance; + struct srpc_client_rpc *rpc = NULL; LASSERT(wi == &tsu->tsu_worker); @@ -991,11 +987,11 @@ test_done: } static int -sfw_run_batch(sfw_batch_t *tsb) +sfw_run_batch(struct sfw_batch *tsb) { - swi_workitem_t *wi; - sfw_test_unit_t *tsu; - sfw_test_instance_t *tsi; + struct swi_workitem *wi; + struct sfw_test_unit *tsu; + struct sfw_test_instance *tsi; if (sfw_batch_active(tsb)) { CDEBUG(D_NET, "Batch already active: %llu (%d)\n", @@ -1026,10 +1022,10 @@ sfw_run_batch(sfw_batch_t *tsb) } int -sfw_stop_batch(sfw_batch_t *tsb, int force) +sfw_stop_batch(struct sfw_batch *tsb, int force) { - sfw_test_instance_t *tsi; - srpc_client_rpc_t *rpc; + struct sfw_test_instance *tsi; + struct srpc_client_rpc *rpc; if (!sfw_batch_active(tsb)) { CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id); @@ -1068,9 +1064,9 @@ sfw_stop_batch(sfw_batch_t *tsb, int force) } static int -sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply) +sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply) { - sfw_test_instance_t *tsi; + struct sfw_test_instance *tsi; if (testidx < 0) return -EINVAL; @@ -1115,11 +1111,11 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, static int sfw_add_test(struct srpc_server_rpc *rpc) { - sfw_session_t *sn = sfw_data.fw_session; - srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply; - srpc_test_reqst_t *request; + struct sfw_session *sn = sfw_data.fw_session; + struct srpc_test_reply *reply = &rpc->srpc_replymsg.msg_body.tes_reply; + struct srpc_test_reqst *request; int rc; - sfw_batch_t *bat; + struct sfw_batch *bat; request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst; reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id; @@ -1183,11 +1179,11 @@ sfw_add_test(struct srpc_server_rpc *rpc) } static int -sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply) +sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply) { - sfw_session_t *sn = sfw_data.fw_session; + struct sfw_session *sn = sfw_data.fw_session; int rc = 0; - sfw_batch_t *bat; + struct sfw_batch *bat; reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id; @@ -1227,8 +1223,8 @@ static int sfw_handle_server_rpc(struct srpc_server_rpc *rpc) { struct srpc_service *sv = rpc->srpc_scd->scd_svc; - srpc_msg_t *reply = &rpc->srpc_replymsg; - srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg; + struct srpc_msg *reply = &rpc->srpc_replymsg; + struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg; unsigned features = LST_FEATS_MASK; int rc = 0; @@ -1244,7 +1240,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) /* Remove timer to avoid racing with it or expiring active session */ if (sfw_del_session_timer()) { - CERROR("Dropping RPC (%s) from %s: racing with expiry timer.", + CERROR("dropping RPC %s from %s: racing with expiry timer\n", sv->sv_name, libcfs_id2str(rpc->srpc_peer)); spin_unlock(&sfw_data.fw_lock); return -EAGAIN; @@ -1261,7 +1257,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION && sv->sv_id != SRPC_SERVICE_DEBUG) { - sfw_session_t *sn = sfw_data.fw_session; + struct sfw_session *sn = sfw_data.fw_session; if (sn && sn->sn_features != request->msg_ses_feats) { @@ -1273,7 +1269,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) } } else if (request->msg_ses_feats & ~LST_FEATS_MASK) { - /** + /* * NB: at this point, old version will ignore features and * create new session anyway, so console should be able * to handle this @@ -1377,12 +1373,12 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status) return rc; } -srpc_client_rpc_t * +struct srpc_client_rpc * sfw_create_rpc(lnet_process_id_t peer, int service, unsigned features, int nbulkiov, int bulklen, - void (*done)(srpc_client_rpc_t *), void *priv) + void (*done)(struct srpc_client_rpc *), void *priv) { - srpc_client_rpc_t *rpc = NULL; + struct srpc_client_rpc *rpc = NULL; spin_lock(&sfw_data.fw_lock); @@ -1391,7 +1387,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service, if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) { rpc = list_entry(sfw_data.fw_zombie_rpcs.next, - srpc_client_rpc_t, crpc_list); + struct srpc_client_rpc, crpc_list); list_del(&rpc->crpc_list); srpc_init_client_rpc(rpc, peer, service, 0, 0, @@ -1415,7 +1411,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service, } void -sfw_unpack_message(srpc_msg_t *msg) +sfw_unpack_message(struct srpc_msg *msg) { if (msg->msg_magic == SRPC_MSG_MAGIC) return; /* no flipping needed */ @@ -1424,7 +1420,7 @@ sfw_unpack_message(srpc_msg_t *msg) LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC)); if (msg->msg_type == SRPC_MSG_STAT_REQST) { - srpc_stat_reqst_t *req = &msg->msg_body.stat_reqst; + struct srpc_stat_reqst *req = &msg->msg_body.stat_reqst; __swab32s(&req->str_type); __swab64s(&req->str_rpyid); @@ -1433,7 +1429,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_STAT_REPLY) { - srpc_stat_reply_t *rep = &msg->msg_body.stat_reply; + struct srpc_stat_reply *rep = &msg->msg_body.stat_reply; __swab32s(&rep->str_status); sfw_unpack_sid(rep->str_sid); @@ -1444,7 +1440,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_MKSN_REQST) { - srpc_mksn_reqst_t *req = &msg->msg_body.mksn_reqst; + struct srpc_mksn_reqst *req = &msg->msg_body.mksn_reqst; __swab64s(&req->mksn_rpyid); __swab32s(&req->mksn_force); @@ -1453,7 +1449,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_MKSN_REPLY) { - srpc_mksn_reply_t *rep = &msg->msg_body.mksn_reply; + struct srpc_mksn_reply *rep = &msg->msg_body.mksn_reply; __swab32s(&rep->mksn_status); __swab32s(&rep->mksn_timeout); @@ -1462,7 +1458,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_RMSN_REQST) { - srpc_rmsn_reqst_t *req = &msg->msg_body.rmsn_reqst; + struct srpc_rmsn_reqst *req = &msg->msg_body.rmsn_reqst; __swab64s(&req->rmsn_rpyid); sfw_unpack_sid(req->rmsn_sid); @@ -1470,7 +1466,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_RMSN_REPLY) { - srpc_rmsn_reply_t *rep = &msg->msg_body.rmsn_reply; + struct srpc_rmsn_reply *rep = &msg->msg_body.rmsn_reply; __swab32s(&rep->rmsn_status); sfw_unpack_sid(rep->rmsn_sid); @@ -1478,7 +1474,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_DEBUG_REQST) { - srpc_debug_reqst_t *req = &msg->msg_body.dbg_reqst; + struct srpc_debug_reqst *req = &msg->msg_body.dbg_reqst; __swab64s(&req->dbg_rpyid); __swab32s(&req->dbg_flags); @@ -1487,7 +1483,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) { - srpc_debug_reply_t *rep = &msg->msg_body.dbg_reply; + struct srpc_debug_reply *rep = &msg->msg_body.dbg_reply; __swab32s(&rep->dbg_nbatch); __swab32s(&rep->dbg_timeout); @@ -1496,7 +1492,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_BATCH_REQST) { - srpc_batch_reqst_t *req = &msg->msg_body.bat_reqst; + struct srpc_batch_reqst *req = &msg->msg_body.bat_reqst; __swab32s(&req->bar_opc); __swab64s(&req->bar_rpyid); @@ -1508,7 +1504,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_BATCH_REPLY) { - srpc_batch_reply_t *rep = &msg->msg_body.bat_reply; + struct srpc_batch_reply *rep = &msg->msg_body.bat_reply; __swab32s(&rep->bar_status); sfw_unpack_sid(rep->bar_sid); @@ -1516,7 +1512,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_TEST_REQST) { - srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; + struct srpc_test_reqst *req = &msg->msg_body.tes_reqst; __swab64s(&req->tsr_rpyid); __swab64s(&req->tsr_bulkid); @@ -1530,7 +1526,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_TEST_REPLY) { - srpc_test_reply_t *rep = &msg->msg_body.tes_reply; + struct srpc_test_reply *rep = &msg->msg_body.tes_reply; __swab32s(&rep->tsr_status); sfw_unpack_sid(rep->tsr_sid); @@ -1538,7 +1534,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_JOIN_REQST) { - srpc_join_reqst_t *req = &msg->msg_body.join_reqst; + struct srpc_join_reqst *req = &msg->msg_body.join_reqst; __swab64s(&req->join_rpyid); sfw_unpack_sid(req->join_sid); @@ -1546,7 +1542,7 @@ sfw_unpack_message(srpc_msg_t *msg) } if (msg->msg_type == SRPC_MSG_JOIN_REPLY) { - srpc_join_reply_t *rep = &msg->msg_body.join_reply; + struct srpc_join_reply *rep = &msg->msg_body.join_reply; __swab32s(&rep->join_status); __swab32s(&rep->join_timeout); @@ -1558,7 +1554,7 @@ sfw_unpack_message(srpc_msg_t *msg) } void -sfw_abort_rpc(srpc_client_rpc_t *rpc) +sfw_abort_rpc(struct srpc_client_rpc *rpc) { LASSERT(atomic_read(&rpc->crpc_refcount) > 0); LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID); @@ -1569,7 +1565,7 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc) } void -sfw_post_rpc(srpc_client_rpc_t *rpc) +sfw_post_rpc(struct srpc_client_rpc *rpc) { spin_lock(&rpc->crpc_lock); @@ -1584,7 +1580,7 @@ sfw_post_rpc(srpc_client_rpc_t *rpc) spin_unlock(&rpc->crpc_lock); } -static srpc_service_t sfw_services[] = { +static struct srpc_service sfw_services[] = { { /* sv_id */ SRPC_SERVICE_DEBUG, /* sv_name */ "debug", @@ -1628,8 +1624,8 @@ sfw_startup(void) int i; int rc; int error; - srpc_service_t *sv; - sfw_test_case_t *tsc; + struct srpc_service *sv; + struct sfw_test_case *tsc; if (session_timeout < 0) { CERROR("Session timeout must be non-negative: %d\n", @@ -1721,8 +1717,8 @@ sfw_startup(void) void sfw_shutdown(void) { - srpc_service_t *sv; - sfw_test_case_t *tsc; + struct srpc_service *sv; + struct sfw_test_case *tsc; int i; spin_lock(&sfw_data.fw_lock); @@ -1759,10 +1755,10 @@ sfw_shutdown(void) } while (!list_empty(&sfw_data.fw_zombie_rpcs)) { - srpc_client_rpc_t *rpc; + struct srpc_client_rpc *rpc; rpc = list_entry(sfw_data.fw_zombie_rpcs.next, - srpc_client_rpc_t, crpc_list); + struct srpc_client_rpc, crpc_list); list_del(&rpc->crpc_list); LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); @@ -1778,7 +1774,7 @@ sfw_shutdown(void) while (!list_empty(&sfw_data.fw_tests)) { tsc = list_entry(sfw_data.fw_tests.next, - sfw_test_case_t, tsc_list); + struct sfw_test_case, tsc_list); srpc_wait_service_shutdown(tsc->tsc_srv_service); diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c index cc046b1d4d0a..71485f992297 100644 --- a/drivers/staging/lustre/lnet/selftest/module.c +++ b/drivers/staging/lustre/lnet/selftest/module.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c index 81a45045e186..9331ca4e3606 100644 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c +++ b/drivers/staging/lustre/lnet/selftest/ping_test.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -56,9 +52,9 @@ struct lst_ping_data { static struct lst_ping_data lst_ping_data; static int -ping_client_init(sfw_test_instance_t *tsi) +ping_client_init(struct sfw_test_instance *tsi) { - sfw_session_t *sn = tsi->tsi_batch->bat_session; + struct sfw_session *sn = tsi->tsi_batch->bat_session; LASSERT(tsi->tsi_is_client); LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK)); @@ -70,9 +66,9 @@ ping_client_init(sfw_test_instance_t *tsi) } static void -ping_client_fini(sfw_test_instance_t *tsi) +ping_client_fini(struct sfw_test_instance *tsi) { - sfw_session_t *sn = tsi->tsi_batch->bat_session; + struct sfw_session *sn = tsi->tsi_batch->bat_session; int errors; LASSERT(sn); @@ -86,12 +82,12 @@ ping_client_fini(sfw_test_instance_t *tsi) } static int -ping_client_prep_rpc(sfw_test_unit_t *tsu, - lnet_process_id_t dest, srpc_client_rpc_t **rpc) +ping_client_prep_rpc(struct sfw_test_unit *tsu, lnet_process_id_t dest, + struct srpc_client_rpc **rpc) { - srpc_ping_reqst_t *req; - sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_session_t *sn = tsi->tsi_batch->bat_session; + struct srpc_ping_reqst *req; + struct sfw_test_instance *tsi = tsu->tsu_instance; + struct sfw_session *sn = tsi->tsi_batch->bat_session; struct timespec64 ts; int rc; @@ -118,18 +114,18 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu, } static void -ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) +ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) { - sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_session_t *sn = tsi->tsi_batch->bat_session; - srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst; - srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply; + struct sfw_test_instance *tsi = tsu->tsu_instance; + struct sfw_session *sn = tsi->tsi_batch->bat_session; + struct srpc_ping_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst; + struct srpc_ping_reply *reply = &rpc->crpc_replymsg.msg_body.ping_reply; struct timespec64 ts; LASSERT(sn); if (rpc->crpc_status) { - if (!tsi->tsi_stopping) /* rpc could have been aborted */ + if (!tsi->tsi_stopping) /* rpc could have been aborted */ atomic_inc(&sn->sn_ping_errors); CERROR("Unable to ping %s (%d): %d\n", libcfs_id2str(rpc->crpc_dest), @@ -171,10 +167,10 @@ static int ping_server_handle(struct srpc_server_rpc *rpc) { struct srpc_service *sv = rpc->srpc_scd->scd_svc; - srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; - srpc_msg_t *replymsg = &rpc->srpc_replymsg; - srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst; - srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply; + struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; + struct srpc_msg *replymsg = &rpc->srpc_replymsg; + struct srpc_ping_reqst *req = &reqstmsg->msg_body.ping_reqst; + struct srpc_ping_reply *rep = &rpc->srpc_replymsg.msg_body.ping_reply; LASSERT(sv->sv_id == SRPC_SERVICE_PING); @@ -210,7 +206,8 @@ ping_server_handle(struct srpc_server_rpc *rpc) return 0; } -sfw_test_client_ops_t ping_test_client; +struct sfw_test_client_ops ping_test_client; + void ping_init_test_client(void) { ping_test_client.tso_init = ping_client_init; @@ -219,7 +216,8 @@ void ping_init_test_client(void) ping_test_client.tso_done_rpc = ping_client_done_rpc; } -srpc_service_t ping_test_service; +struct srpc_service ping_test_service; + void ping_init_test_service(void) { ping_test_service.sv_id = SRPC_SERVICE_PING; diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c index 7d7748d96332..3b26d6eb4240 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ b/drivers/staging/lustre/lnet/selftest/rpc.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -46,19 +42,19 @@ #include "selftest.h" -typedef enum { +enum srpc_state { SRPC_STATE_NONE, SRPC_STATE_NI_INIT, SRPC_STATE_EQ_INIT, SRPC_STATE_RUNNING, SRPC_STATE_STOPPING, -} srpc_state_t; +}; static struct smoketest_rpc { spinlock_t rpc_glock; /* global lock */ - srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1]; + struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1]; lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */ - srpc_state_t rpc_state; + enum srpc_state rpc_state; srpc_counters_t rpc_counters; __u64 rpc_matchbits; /* matchbits counter */ } srpc_data; @@ -71,7 +67,7 @@ srpc_serv_portal(int svc_id) } /* forward ref's */ -int srpc_handle_rpc(swi_workitem_t *wi); +int srpc_handle_rpc(struct swi_workitem *wi); void srpc_get_counters(srpc_counters_t *cnt) { @@ -88,7 +84,7 @@ void srpc_set_counters(const srpc_counters_t *cnt) } static int -srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) +srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob) { nob = min_t(int, nob, PAGE_SIZE); @@ -102,7 +98,7 @@ srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) } void -srpc_free_bulk(srpc_bulk_t *bk) +srpc_free_bulk(struct srpc_bulk *bk) { int i; struct page *pg; @@ -117,25 +113,25 @@ srpc_free_bulk(srpc_bulk_t *bk) __free_page(pg); } - LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov])); + LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov])); } -srpc_bulk_t * +struct srpc_bulk * srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) { - srpc_bulk_t *bk; + struct srpc_bulk *bk; int i; LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt, - offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); + offsetof(struct srpc_bulk, bk_iovs[bulk_npg])); if (!bk) { CERROR("Can't allocate descriptor for %d pages\n", bulk_npg); return NULL; } - memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); + memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg])); bk->bk_sink = sink; bk->bk_len = bulk_len; bk->bk_niov = bulk_npg; @@ -256,7 +252,7 @@ srpc_service_init(struct srpc_service *svc) svc->sv_shuttingdown = 0; svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(struct srpc_service_cd)); + sizeof(*svc->sv_cpt_data)); if (!svc->sv_cpt_data) return -ENOMEM; @@ -338,7 +334,7 @@ srpc_add_service(struct srpc_service *sv) } int -srpc_remove_service(srpc_service_t *sv) +srpc_remove_service(struct srpc_service *sv) { int id = sv->sv_id; @@ -357,7 +353,7 @@ srpc_remove_service(srpc_service_t *sv) static int srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, int len, int options, lnet_process_id_t peer, - lnet_handle_md_t *mdh, srpc_event_t *ev) + lnet_handle_md_t *mdh, struct srpc_event *ev) { int rc; lnet_md_t md; @@ -396,7 +392,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, static int srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, int options, lnet_process_id_t peer, lnet_nid_t self, - lnet_handle_md_t *mdh, srpc_event_t *ev) + lnet_handle_md_t *mdh, struct srpc_event *ev) { int rc; lnet_md_t md; @@ -449,7 +445,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, static int srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, - lnet_handle_md_t *mdh, srpc_event_t *ev) + lnet_handle_md_t *mdh, struct srpc_event *ev) { lnet_process_id_t any = { 0 }; @@ -697,7 +693,7 @@ srpc_finish_service(struct srpc_service *sv) /* called with sv->sv_lock held */ static void -srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf) +srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) __must_hold(&scd->scd_lock) { if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) { @@ -755,11 +751,11 @@ srpc_abort_service(struct srpc_service *sv) } void -srpc_shutdown_service(srpc_service_t *sv) +srpc_shutdown_service(struct srpc_service *sv) { struct srpc_service_cd *scd; struct srpc_server_rpc *rpc; - srpc_buffer_t *buf; + struct srpc_buffer *buf; int i; CDEBUG(D_NET, "Shutting down service: id %d, name %s\n", @@ -792,9 +788,9 @@ srpc_shutdown_service(srpc_service_t *sv) } static int -srpc_send_request(srpc_client_rpc_t *rpc) +srpc_send_request(struct srpc_client_rpc *rpc) { - srpc_event_t *ev = &rpc->crpc_reqstev; + struct srpc_event *ev = &rpc->crpc_reqstev; int rc; ev->ev_fired = 0; @@ -803,7 +799,7 @@ srpc_send_request(srpc_client_rpc_t *rpc) rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service), rpc->crpc_service, &rpc->crpc_reqstmsg, - sizeof(srpc_msg_t), LNET_MD_OP_PUT, + sizeof(struct srpc_msg), LNET_MD_OP_PUT, rpc->crpc_dest, LNET_NID_ANY, &rpc->crpc_reqstmdh, ev); if (rc) { @@ -814,9 +810,9 @@ srpc_send_request(srpc_client_rpc_t *rpc) } static int -srpc_prepare_reply(srpc_client_rpc_t *rpc) +srpc_prepare_reply(struct srpc_client_rpc *rpc) { - srpc_event_t *ev = &rpc->crpc_replyev; + struct srpc_event *ev = &rpc->crpc_replyev; __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid; int rc; @@ -827,7 +823,8 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc) *id = srpc_next_id(); rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id, - &rpc->crpc_replymsg, sizeof(srpc_msg_t), + &rpc->crpc_replymsg, + sizeof(struct srpc_msg), LNET_MD_OP_PUT, rpc->crpc_dest, &rpc->crpc_replymdh, ev); if (rc) { @@ -838,10 +835,10 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc) } static int -srpc_prepare_bulk(srpc_client_rpc_t *rpc) +srpc_prepare_bulk(struct srpc_client_rpc *rpc) { - srpc_bulk_t *bk = &rpc->crpc_bulk; - srpc_event_t *ev = &rpc->crpc_bulkev; + struct srpc_bulk *bk = &rpc->crpc_bulk; + struct srpc_event *ev = &rpc->crpc_bulkev; __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid; int rc; int opt; @@ -873,8 +870,8 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc) static int srpc_do_bulk(struct srpc_server_rpc *rpc) { - srpc_event_t *ev = &rpc->srpc_ev; - srpc_bulk_t *bk = rpc->srpc_bulk; + struct srpc_event *ev = &rpc->srpc_ev; + struct srpc_bulk *bk = rpc->srpc_bulk; __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid; int rc; int opt; @@ -903,7 +900,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) { struct srpc_service_cd *scd = rpc->srpc_scd; struct srpc_service *sv = scd->scd_svc; - srpc_buffer_t *buffer; + struct srpc_buffer *buffer; LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE); @@ -948,7 +945,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) { buffer = list_entry(scd->scd_buf_blocked.next, - srpc_buffer_t, buf_list); + struct srpc_buffer, buf_list); list_del(&buffer->buf_list); srpc_init_server_rpc(rpc, scd, buffer); @@ -963,12 +960,12 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) /* handles an incoming RPC */ int -srpc_handle_rpc(swi_workitem_t *wi) +srpc_handle_rpc(struct swi_workitem *wi) { struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data; struct srpc_service_cd *scd = rpc->srpc_scd; struct srpc_service *sv = scd->scd_svc; - srpc_event_t *ev = &rpc->srpc_ev; + struct srpc_event *ev = &rpc->srpc_ev; int rc = 0; LASSERT(wi == &rpc->srpc_wi); @@ -995,8 +992,8 @@ srpc_handle_rpc(swi_workitem_t *wi) default: LBUG(); case SWI_STATE_NEWBORN: { - srpc_msg_t *msg; - srpc_generic_reply_t *reply; + struct srpc_msg *msg; + struct srpc_generic_reply *reply; msg = &rpc->srpc_reqstbuf->buf_msg; reply = &rpc->srpc_replymsg.msg_body.reply; @@ -1077,7 +1074,7 @@ srpc_handle_rpc(swi_workitem_t *wi) static void srpc_client_rpc_expired(void *data) { - srpc_client_rpc_t *rpc = data; + struct srpc_client_rpc *rpc = data; CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n", rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), @@ -1096,7 +1093,7 @@ srpc_client_rpc_expired(void *data) } static void -srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc) +srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc) { struct stt_timer *timer = &rpc->crpc_timer; @@ -1117,7 +1114,7 @@ srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc) * running on any CPU. */ static void -srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc) +srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc) { /* timer not planted or already exploded */ if (!rpc->crpc_timeout) @@ -1138,9 +1135,9 @@ srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc) } static void -srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status) +srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status) { - swi_workitem_t *wi = &rpc->crpc_wi; + struct swi_workitem *wi = &rpc->crpc_wi; LASSERT(status || wi->swi_state == SWI_STATE_DONE); @@ -1175,11 +1172,11 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status) /* sends an outgoing RPC */ int -srpc_send_rpc(swi_workitem_t *wi) +srpc_send_rpc(struct swi_workitem *wi) { int rc = 0; - srpc_client_rpc_t *rpc; - srpc_msg_t *reply; + struct srpc_client_rpc *rpc; + struct srpc_msg *reply; int do_bulk; LASSERT(wi); @@ -1237,7 +1234,7 @@ srpc_send_rpc(swi_workitem_t *wi) wi->swi_state = SWI_STATE_REQUEST_SENT; /* perhaps more events, fall thru */ case SWI_STATE_REQUEST_SENT: { - srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service); + enum srpc_msg_type type = srpc_service2reply(rpc->crpc_service); if (!rpc->crpc_replyev.ev_fired) break; @@ -1308,15 +1305,15 @@ abort: return 0; } -srpc_client_rpc_t * +struct srpc_client_rpc * srpc_create_client_rpc(lnet_process_id_t peer, int service, int nbulkiov, int bulklen, - void (*rpc_done)(srpc_client_rpc_t *), - void (*rpc_fini)(srpc_client_rpc_t *), void *priv) + void (*rpc_done)(struct srpc_client_rpc *), + void (*rpc_fini)(struct srpc_client_rpc *), void *priv) { - srpc_client_rpc_t *rpc; + struct srpc_client_rpc *rpc; - LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t, + LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[nbulkiov])); if (!rpc) return NULL; @@ -1328,12 +1325,12 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service, /* called with rpc->crpc_lock held */ void -srpc_abort_rpc(srpc_client_rpc_t *rpc, int why) +srpc_abort_rpc(struct srpc_client_rpc *rpc, int why) { LASSERT(why); - if (rpc->crpc_aborted || /* already aborted */ - rpc->crpc_closed) /* callback imminent */ + if (rpc->crpc_aborted || /* already aborted */ + rpc->crpc_closed) /* callback imminent */ return; CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n", @@ -1347,7 +1344,7 @@ srpc_abort_rpc(srpc_client_rpc_t *rpc, int why) /* called with rpc->crpc_lock held */ void -srpc_post_rpc(srpc_client_rpc_t *rpc) +srpc_post_rpc(struct srpc_client_rpc *rpc) { LASSERT(!rpc->crpc_aborted); LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING); @@ -1363,7 +1360,7 @@ srpc_post_rpc(srpc_client_rpc_t *rpc) int srpc_send_reply(struct srpc_server_rpc *rpc) { - srpc_event_t *ev = &rpc->srpc_ev; + struct srpc_event *ev = &rpc->srpc_ev; struct srpc_msg *msg = &rpc->srpc_replymsg; struct srpc_buffer *buffer = rpc->srpc_reqstbuf; struct srpc_service_cd *scd = rpc->srpc_scd; @@ -1401,7 +1398,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc) rpc->srpc_peer, rpc->srpc_self, &rpc->srpc_replymdh, ev); if (rc) - ev->ev_fired = 1; /* no more event expected */ + ev->ev_fired = 1; /* no more event expected */ return rc; } @@ -1410,13 +1407,13 @@ static void srpc_lnet_ev_handler(lnet_event_t *ev) { struct srpc_service_cd *scd; - srpc_event_t *rpcev = ev->md.user_ptr; - srpc_client_rpc_t *crpc; + struct srpc_event *rpcev = ev->md.user_ptr; + struct srpc_client_rpc *crpc; struct srpc_server_rpc *srpc; - srpc_buffer_t *buffer; - srpc_service_t *sv; - srpc_msg_t *msg; - srpc_msg_type_t type; + struct srpc_buffer *buffer; + struct srpc_service *sv; + struct srpc_msg *msg; + enum srpc_msg_type type; LASSERT(!in_interrupt()); @@ -1486,7 +1483,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev) LASSERT(ev->type != LNET_EVENT_UNLINK || sv->sv_shuttingdown); - buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg); + buffer = container_of(ev->md.start, struct srpc_buffer, buf_msg); buffer->buf_peer = ev->initiator; buffer->buf_self = ev->target.nid; @@ -1509,7 +1506,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev) scd->scd_buf_err = 0; } - if (!scd->scd_buf_err && /* adding buffer is enabled */ + if (!scd->scd_buf_err && /* adding buffer is enabled */ !scd->scd_buf_adjust && scd->scd_buf_nposted < scd->scd_buf_low) { scd->scd_buf_adjust = max(scd->scd_buf_total / 2, @@ -1663,7 +1660,7 @@ srpc_shutdown(void) spin_lock(&srpc_data.rpc_glock); for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) { - srpc_service_t *sv = srpc_data.rpc_services[i]; + struct srpc_service *sv = srpc_data.rpc_services[i]; LASSERTF(!sv, "service not empty: id %d, name %s\n", i, sv->sv_name); diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h index a79c315f2ceb..4ab2ee264004 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.h +++ b/drivers/staging/lustre/lnet/selftest/rpc.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -44,7 +40,7 @@ * * XXX: *REPLY == *REQST + 1 */ -typedef enum { +enum srpc_msg_type { SRPC_MSG_MKSN_REQST = 0, SRPC_MSG_MKSN_REPLY = 1, SRPC_MSG_RMSN_REQST = 2, @@ -63,7 +59,7 @@ typedef enum { SRPC_MSG_PING_REPLY = 15, SRPC_MSG_JOIN_REQST = 16, SRPC_MSG_JOIN_REPLY = 17, -} srpc_msg_type_t; +}; /* CAVEAT EMPTOR: * All srpc_*_reqst_t's 1st field must be matchbits of reply buffer, @@ -72,122 +68,122 @@ typedef enum { * All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field * session id if needed. */ -typedef struct { +struct srpc_generic_reqst { __u64 rpyid; /* reply buffer matchbits */ __u64 bulkid; /* bulk buffer matchbits */ -} WIRE_ATTR srpc_generic_reqst_t; +} WIRE_ATTR; -typedef struct { +struct srpc_generic_reply { __u32 status; lst_sid_t sid; -} WIRE_ATTR srpc_generic_reply_t; +} WIRE_ATTR; /* FRAMEWORK RPCs */ -typedef struct { +struct srpc_mksn_reqst { __u64 mksn_rpyid; /* reply buffer matchbits */ lst_sid_t mksn_sid; /* session id */ __u32 mksn_force; /* use brute force */ char mksn_name[LST_NAME_SIZE]; -} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */ +} WIRE_ATTR; /* make session request */ -typedef struct { +struct srpc_mksn_reply { __u32 mksn_status; /* session status */ lst_sid_t mksn_sid; /* session id */ __u32 mksn_timeout; /* session timeout */ char mksn_name[LST_NAME_SIZE]; -} WIRE_ATTR srpc_mksn_reply_t; /* make session reply */ +} WIRE_ATTR; /* make session reply */ -typedef struct { +struct srpc_rmsn_reqst { __u64 rmsn_rpyid; /* reply buffer matchbits */ lst_sid_t rmsn_sid; /* session id */ -} WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */ +} WIRE_ATTR; /* remove session request */ -typedef struct { +struct srpc_rmsn_reply { __u32 rmsn_status; lst_sid_t rmsn_sid; /* session id */ -} WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */ +} WIRE_ATTR; /* remove session reply */ -typedef struct { +struct srpc_join_reqst { __u64 join_rpyid; /* reply buffer matchbits */ lst_sid_t join_sid; /* session id to join */ char join_group[LST_NAME_SIZE]; /* group name */ -} WIRE_ATTR srpc_join_reqst_t; +} WIRE_ATTR; -typedef struct { +struct srpc_join_reply { __u32 join_status; /* returned status */ lst_sid_t join_sid; /* session id */ __u32 join_timeout; /* # seconds' inactivity to * expire */ char join_session[LST_NAME_SIZE]; /* session name */ -} WIRE_ATTR srpc_join_reply_t; +} WIRE_ATTR; -typedef struct { +struct srpc_debug_reqst { __u64 dbg_rpyid; /* reply buffer matchbits */ lst_sid_t dbg_sid; /* session id */ __u32 dbg_flags; /* bitmap of debug */ -} WIRE_ATTR srpc_debug_reqst_t; +} WIRE_ATTR; -typedef struct { +struct srpc_debug_reply { __u32 dbg_status; /* returned code */ lst_sid_t dbg_sid; /* session id */ __u32 dbg_timeout; /* session timeout */ __u32 dbg_nbatch; /* # of batches in the node */ char dbg_name[LST_NAME_SIZE]; /* session name */ -} WIRE_ATTR srpc_debug_reply_t; +} WIRE_ATTR; #define SRPC_BATCH_OPC_RUN 1 #define SRPC_BATCH_OPC_STOP 2 #define SRPC_BATCH_OPC_QUERY 3 -typedef struct { +struct srpc_batch_reqst { __u64 bar_rpyid; /* reply buffer matchbits */ lst_sid_t bar_sid; /* session id */ lst_bid_t bar_bid; /* batch id */ __u32 bar_opc; /* create/start/stop batch */ __u32 bar_testidx; /* index of test */ __u32 bar_arg; /* parameters */ -} WIRE_ATTR srpc_batch_reqst_t; +} WIRE_ATTR; -typedef struct { +struct srpc_batch_reply { __u32 bar_status; /* status of request */ lst_sid_t bar_sid; /* session id */ __u32 bar_active; /* # of active tests in batch/test */ __u32 bar_time; /* remained time */ -} WIRE_ATTR srpc_batch_reply_t; +} WIRE_ATTR; -typedef struct { +struct srpc_stat_reqst { __u64 str_rpyid; /* reply buffer matchbits */ lst_sid_t str_sid; /* session id */ __u32 str_type; /* type of stat */ -} WIRE_ATTR srpc_stat_reqst_t; +} WIRE_ATTR; -typedef struct { +struct srpc_stat_reply { __u32 str_status; lst_sid_t str_sid; sfw_counters_t str_fw; srpc_counters_t str_rpc; lnet_counters_t str_lnet; -} WIRE_ATTR srpc_stat_reply_t; +} WIRE_ATTR; -typedef struct { +struct test_bulk_req { __u32 blk_opc; /* bulk operation code */ __u32 blk_npg; /* # of pages */ __u32 blk_flags; /* reserved flags */ -} WIRE_ATTR test_bulk_req_t; +} WIRE_ATTR; -typedef struct { +struct test_bulk_req_v1 { __u16 blk_opc; /* bulk operation code */ __u16 blk_flags; /* data check flags */ __u32 blk_len; /* data length */ __u32 blk_offset; /* reserved: offset */ -} WIRE_ATTR test_bulk_req_v1_t; +} WIRE_ATTR; -typedef struct { +struct test_ping_req { __u32 png_size; /* size of ping message */ __u32 png_flags; /* reserved flags */ -} WIRE_ATTR test_ping_req_t; +} WIRE_ATTR; -typedef struct { +struct srpc_test_reqst { __u64 tsr_rpyid; /* reply buffer matchbits */ __u64 tsr_bulkid; /* bulk buffer matchbits */ lst_sid_t tsr_sid; /* session id */ @@ -201,82 +197,82 @@ typedef struct { __u32 tsr_ndest; /* # of dest nodes */ union { - test_ping_req_t ping; - test_bulk_req_t bulk_v0; - test_bulk_req_v1_t bulk_v1; - } tsr_u; -} WIRE_ATTR srpc_test_reqst_t; + struct test_ping_req ping; + struct test_bulk_req bulk_v0; + struct test_bulk_req_v1 bulk_v1; + } tsr_u; +} WIRE_ATTR; -typedef struct { +struct srpc_test_reply { __u32 tsr_status; /* returned code */ lst_sid_t tsr_sid; -} WIRE_ATTR srpc_test_reply_t; +} WIRE_ATTR; /* TEST RPCs */ -typedef struct { +struct srpc_ping_reqst { __u64 pnr_rpyid; __u32 pnr_magic; __u32 pnr_seq; __u64 pnr_time_sec; __u64 pnr_time_usec; -} WIRE_ATTR srpc_ping_reqst_t; +} WIRE_ATTR; -typedef struct { +struct srpc_ping_reply { __u32 pnr_status; __u32 pnr_magic; __u32 pnr_seq; -} WIRE_ATTR srpc_ping_reply_t; +} WIRE_ATTR; -typedef struct { +struct srpc_brw_reqst { __u64 brw_rpyid; /* reply buffer matchbits */ __u64 brw_bulkid; /* bulk buffer matchbits */ __u32 brw_rw; /* read or write */ __u32 brw_len; /* bulk data len */ __u32 brw_flags; /* bulk data patterns */ -} WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */ +} WIRE_ATTR; /* bulk r/w request */ -typedef struct { +struct srpc_brw_reply { __u32 brw_status; -} WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */ +} WIRE_ATTR; /* bulk r/w reply */ #define SRPC_MSG_MAGIC 0xeeb0f00d #define SRPC_MSG_VERSION 1 -typedef struct srpc_msg { +struct srpc_msg { __u32 msg_magic; /* magic number */ __u32 msg_version; /* message version number */ - __u32 msg_type; /* type of message body: srpc_msg_type_t */ + __u32 msg_type; /* type of message body: srpc_msg_type */ __u32 msg_reserved0; __u32 msg_reserved1; __u32 msg_ses_feats; /* test session features */ union { - srpc_generic_reqst_t reqst; - srpc_generic_reply_t reply; - - srpc_mksn_reqst_t mksn_reqst; - srpc_mksn_reply_t mksn_reply; - srpc_rmsn_reqst_t rmsn_reqst; - srpc_rmsn_reply_t rmsn_reply; - srpc_debug_reqst_t dbg_reqst; - srpc_debug_reply_t dbg_reply; - srpc_batch_reqst_t bat_reqst; - srpc_batch_reply_t bat_reply; - srpc_stat_reqst_t stat_reqst; - srpc_stat_reply_t stat_reply; - srpc_test_reqst_t tes_reqst; - srpc_test_reply_t tes_reply; - srpc_join_reqst_t join_reqst; - srpc_join_reply_t join_reply; - - srpc_ping_reqst_t ping_reqst; - srpc_ping_reply_t ping_reply; - srpc_brw_reqst_t brw_reqst; - srpc_brw_reply_t brw_reply; + struct srpc_generic_reqst reqst; + struct srpc_generic_reply reply; + + struct srpc_mksn_reqst mksn_reqst; + struct srpc_mksn_reply mksn_reply; + struct srpc_rmsn_reqst rmsn_reqst; + struct srpc_rmsn_reply rmsn_reply; + struct srpc_debug_reqst dbg_reqst; + struct srpc_debug_reply dbg_reply; + struct srpc_batch_reqst bat_reqst; + struct srpc_batch_reply bat_reply; + struct srpc_stat_reqst stat_reqst; + struct srpc_stat_reply stat_reply; + struct srpc_test_reqst tes_reqst; + struct srpc_test_reply tes_reply; + struct srpc_join_reqst join_reqst; + struct srpc_join_reply join_reply; + + struct srpc_ping_reqst ping_reqst; + struct srpc_ping_reply ping_reply; + struct srpc_brw_reqst brw_reqst; + struct srpc_brw_reply brw_reply; } msg_body; -} WIRE_ATTR srpc_msg_t; +} WIRE_ATTR; static inline void -srpc_unpack_msg_hdr(srpc_msg_t *msg) +srpc_unpack_msg_hdr(struct srpc_msg *msg) { if (msg->msg_magic == SRPC_MSG_MAGIC) return; /* no flipping needed */ diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h index e689ca1846e1..d033ac03d953 100644 --- a/drivers/staging/lustre/lnet/selftest/selftest.h +++ b/drivers/staging/lustre/lnet/selftest/selftest.h @@ -15,12 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * copy of GPLv2]. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -93,7 +88,7 @@ struct sfw_test_instance; /* all reply/bulk RDMAs go to this portal */ #define SRPC_RDMA_PORTAL 52 -static inline srpc_msg_type_t +static inline enum srpc_msg_type srpc_service2request(int service) { switch (service) { @@ -128,13 +123,13 @@ srpc_service2request(int service) } } -static inline srpc_msg_type_t +static inline enum srpc_msg_type srpc_service2reply(int service) { return srpc_service2request(service) + 1; } -typedef enum { +enum srpc_event_type { SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) * received */ SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */ @@ -143,57 +138,58 @@ typedef enum { SRPC_REPLY_SENT = 5, /* outgoing reply sent */ SRPC_REQUEST_RCVD = 6, /* incoming request received */ SRPC_REQUEST_SENT = 7, /* outgoing request sent */ -} srpc_event_type_t; +}; /* RPC event */ -typedef struct { - srpc_event_type_t ev_type; /* what's up */ +struct srpc_event { + enum srpc_event_type ev_type; /* what's up */ lnet_event_kind_t ev_lnet; /* LNet event type */ int ev_fired; /* LNet event fired? */ int ev_status; /* LNet event status */ void *ev_data; /* owning server/client RPC */ -} srpc_event_t; +}; -typedef struct { +/* bulk descriptor */ +struct srpc_bulk { int bk_len; /* len of bulk data */ lnet_handle_md_t bk_mdh; int bk_sink; /* sink/source */ int bk_niov; /* # iov in bk_iovs */ lnet_kiov_t bk_iovs[0]; -} srpc_bulk_t; /* bulk descriptor */ +}; /* message buffer descriptor */ -typedef struct srpc_buffer { +struct srpc_buffer { struct list_head buf_list; /* chain on srpc_service::*_msgq */ - srpc_msg_t buf_msg; + struct srpc_msg buf_msg; lnet_handle_md_t buf_mdh; lnet_nid_t buf_self; lnet_process_id_t buf_peer; -} srpc_buffer_t; +}; struct swi_workitem; typedef int (*swi_action_t) (struct swi_workitem *); -typedef struct swi_workitem { +struct swi_workitem { struct cfs_wi_sched *swi_sched; - cfs_workitem_t swi_workitem; + struct cfs_workitem swi_workitem; swi_action_t swi_action; int swi_state; -} swi_workitem_t; +}; /* server-side state of a RPC */ struct srpc_server_rpc { /* chain on srpc_service::*_rpcq */ struct list_head srpc_list; struct srpc_service_cd *srpc_scd; - swi_workitem_t srpc_wi; - srpc_event_t srpc_ev; /* bulk/reply event */ + struct swi_workitem srpc_wi; + struct srpc_event srpc_ev; /* bulk/reply event */ lnet_nid_t srpc_self; lnet_process_id_t srpc_peer; - srpc_msg_t srpc_replymsg; + struct srpc_msg srpc_replymsg; lnet_handle_md_t srpc_replymdh; - srpc_buffer_t *srpc_reqstbuf; - srpc_bulk_t *srpc_bulk; + struct srpc_buffer *srpc_reqstbuf; + struct srpc_bulk *srpc_bulk; unsigned int srpc_aborted; /* being given up */ int srpc_status; @@ -201,14 +197,14 @@ struct srpc_server_rpc { }; /* client-side state of a RPC */ -typedef struct srpc_client_rpc { +struct srpc_client_rpc { struct list_head crpc_list; /* chain on user's lists */ spinlock_t crpc_lock; /* serialize */ int crpc_service; atomic_t crpc_refcount; int crpc_timeout; /* # seconds to wait for reply */ struct stt_timer crpc_timer; - swi_workitem_t crpc_wi; + struct swi_workitem crpc_wi; lnet_process_id_t crpc_dest; void (*crpc_done)(struct srpc_client_rpc *); @@ -221,20 +217,20 @@ typedef struct srpc_client_rpc { unsigned int crpc_closed:1; /* completed */ /* RPC events */ - srpc_event_t crpc_bulkev; /* bulk event */ - srpc_event_t crpc_reqstev; /* request event */ - srpc_event_t crpc_replyev; /* reply event */ + struct srpc_event crpc_bulkev; /* bulk event */ + struct srpc_event crpc_reqstev; /* request event */ + struct srpc_event crpc_replyev; /* reply event */ /* bulk, request(reqst), and reply exchanged on wire */ - srpc_msg_t crpc_reqstmsg; - srpc_msg_t crpc_replymsg; + struct srpc_msg crpc_reqstmsg; + struct srpc_msg crpc_replymsg; lnet_handle_md_t crpc_reqstmdh; lnet_handle_md_t crpc_replymdh; - srpc_bulk_t crpc_bulk; -} srpc_client_rpc_t; + struct srpc_bulk crpc_bulk; +}; #define srpc_client_rpc_size(rpc) \ -offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov]) +offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov]) #define srpc_client_rpc_addref(rpc) \ do { \ @@ -266,13 +262,13 @@ struct srpc_service_cd { /** backref to service */ struct srpc_service *scd_svc; /** event buffer */ - srpc_event_t scd_ev; + struct srpc_event scd_ev; /** free RPC descriptors */ struct list_head scd_rpc_free; /** in-flight RPCs */ struct list_head scd_rpc_active; /** workitem for posting buffer */ - swi_workitem_t scd_buf_wi; + struct swi_workitem scd_buf_wi; /** CPT id */ int scd_cpt; /** error code for scd_buf_wi */ @@ -306,7 +302,7 @@ struct srpc_service_cd { #define SFW_FRWK_WI_MIN 16 #define SFW_FRWK_WI_MAX 256 -typedef struct srpc_service { +struct srpc_service { int sv_id; /* service id */ const char *sv_name; /* human readable name */ int sv_wi_total; /* total server workitems */ @@ -320,9 +316,9 @@ typedef struct srpc_service { */ int (*sv_handler)(struct srpc_server_rpc *); int (*sv_bulk_ready)(struct srpc_server_rpc *, int); -} srpc_service_t; +}; -typedef struct { +struct sfw_session { struct list_head sn_list; /* chain on fw_zombie_sessions */ lst_sid_t sn_id; /* unique identifier */ unsigned int sn_timeout; /* # seconds' inactivity to expire */ @@ -335,37 +331,37 @@ typedef struct { atomic_t sn_brw_errors; atomic_t sn_ping_errors; unsigned long sn_started; -} sfw_session_t; +}; #define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \ (sid0).ses_stamp == (sid1).ses_stamp) -typedef struct { +struct sfw_batch { struct list_head bat_list; /* chain on sn_batches */ lst_bid_t bat_id; /* batch id */ int bat_error; /* error code of batch */ - sfw_session_t *bat_session; /* batch's session */ + struct sfw_session *bat_session; /* batch's session */ atomic_t bat_nactive; /* # of active tests */ struct list_head bat_tests; /* test instances */ -} sfw_batch_t; +}; -typedef struct { +struct sfw_test_client_ops { int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test * client */ void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test * client */ int (*tso_prep_rpc)(struct sfw_test_unit *tsu, lnet_process_id_t dest, - srpc_client_rpc_t **rpc); /* prep a tests rpc */ + struct srpc_client_rpc **rpc); /* prep a tests rpc */ void (*tso_done_rpc)(struct sfw_test_unit *tsu, - srpc_client_rpc_t *rpc); /* done a test rpc */ -} sfw_test_client_ops_t; + struct srpc_client_rpc *rpc); /* done a test rpc */ +}; -typedef struct sfw_test_instance { +struct sfw_test_instance { struct list_head tsi_list; /* chain on batch */ int tsi_service; /* test type */ - sfw_batch_t *tsi_batch; /* batch */ - sfw_test_client_ops_t *tsi_ops; /* test client operation + struct sfw_batch *tsi_batch; /* batch */ + struct sfw_test_client_ops *tsi_ops; /* test client operation */ /* public parameter for all test units */ @@ -384,11 +380,11 @@ typedef struct sfw_test_instance { struct list_head tsi_active_rpcs; /* active rpcs */ union { - test_ping_req_t ping; /* ping parameter */ - test_bulk_req_t bulk_v0; /* bulk parameter */ - test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */ + struct test_ping_req ping; /* ping parameter */ + struct test_bulk_req bulk_v0; /* bulk parameter */ + struct test_bulk_req_v1 bulk_v1; /* bulk v1 parameter */ } tsi_u; -} sfw_test_instance_t; +}; /* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of * pages are not used */ @@ -397,57 +393,58 @@ typedef struct sfw_test_instance { #define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) #define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) -typedef struct sfw_test_unit { +struct sfw_test_unit { struct list_head tsu_list; /* chain on lst_test_instance */ lnet_process_id_t tsu_dest; /* id of dest node */ int tsu_loop; /* loop count of the test */ - sfw_test_instance_t *tsu_instance; /* pointer to test instance */ + struct sfw_test_instance *tsu_instance; /* pointer to test instance */ void *tsu_private; /* private data */ - swi_workitem_t tsu_worker; /* workitem of the test unit */ -} sfw_test_unit_t; + struct swi_workitem tsu_worker; /* workitem of the test unit */ +}; -typedef struct sfw_test_case { +struct sfw_test_case { struct list_head tsc_list; /* chain on fw_tests */ - srpc_service_t *tsc_srv_service; /* test service */ - sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */ -} sfw_test_case_t; + struct srpc_service *tsc_srv_service; /* test service */ + struct sfw_test_client_ops *tsc_cli_ops; /* ops of test client */ +}; -srpc_client_rpc_t * +struct srpc_client_rpc * sfw_create_rpc(lnet_process_id_t peer, int service, unsigned features, int nbulkiov, int bulklen, - void (*done)(srpc_client_rpc_t *), void *priv); -int sfw_create_test_rpc(sfw_test_unit_t *tsu, + void (*done)(struct srpc_client_rpc *), void *priv); +int sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer, unsigned features, - int nblk, int blklen, srpc_client_rpc_t **rpc); -void sfw_abort_rpc(srpc_client_rpc_t *rpc); -void sfw_post_rpc(srpc_client_rpc_t *rpc); -void sfw_client_rpc_done(srpc_client_rpc_t *rpc); -void sfw_unpack_message(srpc_msg_t *msg); + int nblk, int blklen, struct srpc_client_rpc **rpc); +void sfw_abort_rpc(struct srpc_client_rpc *rpc); +void sfw_post_rpc(struct srpc_client_rpc *rpc); +void sfw_client_rpc_done(struct srpc_client_rpc *rpc); +void sfw_unpack_message(struct srpc_msg *msg); void sfw_free_pages(struct srpc_server_rpc *rpc); -void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i); +void sfw_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i); int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, int sink); -int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); +int sfw_make_session(struct srpc_mksn_reqst *request, + struct srpc_mksn_reply *reply); -srpc_client_rpc_t * +struct srpc_client_rpc * srpc_create_client_rpc(lnet_process_id_t peer, int service, int nbulkiov, int bulklen, - void (*rpc_done)(srpc_client_rpc_t *), - void (*rpc_fini)(srpc_client_rpc_t *), void *priv); -void srpc_post_rpc(srpc_client_rpc_t *rpc); -void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why); -void srpc_free_bulk(srpc_bulk_t *bk); -srpc_bulk_t *srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, - int sink); -int srpc_send_rpc(swi_workitem_t *wi); + void (*rpc_done)(struct srpc_client_rpc *), + void (*rpc_fini)(struct srpc_client_rpc *), void *priv); +void srpc_post_rpc(struct srpc_client_rpc *rpc); +void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why); +void srpc_free_bulk(struct srpc_bulk *bk); +struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg, + unsigned bulk_len, int sink); +int srpc_send_rpc(struct swi_workitem *wi); int srpc_send_reply(struct srpc_server_rpc *rpc); -int srpc_add_service(srpc_service_t *sv); -int srpc_remove_service(srpc_service_t *sv); -void srpc_shutdown_service(srpc_service_t *sv); -void srpc_abort_service(srpc_service_t *sv); -int srpc_finish_service(srpc_service_t *sv); -int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer); -void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer); +int srpc_add_service(struct srpc_service *sv); +int srpc_remove_service(struct srpc_service *sv); +void srpc_shutdown_service(struct srpc_service *sv); +void srpc_abort_service(struct srpc_service *sv); +int srpc_finish_service(struct srpc_service *sv); +int srpc_service_add_buffers(struct srpc_service *sv, int nbuffer); +void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer); void srpc_get_counters(srpc_counters_t *cnt); void srpc_set_counters(const srpc_counters_t *cnt); @@ -461,15 +458,17 @@ srpc_serv_is_framework(struct srpc_service *svc) } static inline int -swi_wi_action(cfs_workitem_t *wi) +swi_wi_action(struct cfs_workitem *wi) { - swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem); + struct swi_workitem *swi; + + swi = container_of(wi, struct swi_workitem, swi_workitem); return swi->swi_action(swi); } static inline void -swi_init_workitem(swi_workitem_t *swi, void *data, +swi_init_workitem(struct swi_workitem *swi, void *data, swi_action_t action, struct cfs_wi_sched *sched) { swi->swi_sched = sched; @@ -479,19 +478,19 @@ swi_init_workitem(swi_workitem_t *swi, void *data, } static inline void -swi_schedule_workitem(swi_workitem_t *wi) +swi_schedule_workitem(struct swi_workitem *wi) { cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem); } static inline void -swi_exit_workitem(swi_workitem_t *swi) +swi_exit_workitem(struct swi_workitem *swi) { cfs_wi_exit(swi->swi_sched, &swi->swi_workitem); } static inline int -swi_deschedule_workitem(swi_workitem_t *swi) +swi_deschedule_workitem(struct swi_workitem *swi) { return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem); } @@ -502,7 +501,7 @@ void sfw_shutdown(void); void srpc_shutdown(void); static inline void -srpc_destroy_client_rpc(srpc_client_rpc_t *rpc) +srpc_destroy_client_rpc(struct srpc_client_rpc *rpc) { LASSERT(rpc); LASSERT(!srpc_event_pending(rpc)); @@ -515,14 +514,14 @@ srpc_destroy_client_rpc(srpc_client_rpc_t *rpc) } static inline void -srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer, +srpc_init_client_rpc(struct srpc_client_rpc *rpc, lnet_process_id_t peer, int service, int nbulkiov, int bulklen, - void (*rpc_done)(srpc_client_rpc_t *), - void (*rpc_fini)(srpc_client_rpc_t *), void *priv) + void (*rpc_done)(struct srpc_client_rpc *), + void (*rpc_fini)(struct srpc_client_rpc *), void *priv) { LASSERT(nbulkiov <= LNET_MAX_IOV); - memset(rpc, 0, offsetof(srpc_client_rpc_t, + memset(rpc, 0, offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[nbulkiov])); INIT_LIST_HEAD(&rpc->crpc_list); @@ -592,7 +591,7 @@ do { \ } while (0) static inline void -srpc_wait_service_shutdown(srpc_service_t *sv) +srpc_wait_service_shutdown(struct srpc_service *sv) { int i = 2; @@ -607,16 +606,16 @@ srpc_wait_service_shutdown(srpc_service_t *sv) } } -extern sfw_test_client_ops_t brw_test_client; +extern struct sfw_test_client_ops brw_test_client; void brw_init_test_client(void); -extern srpc_service_t brw_test_service; +extern struct srpc_service brw_test_service; void brw_init_test_service(void); -extern sfw_test_client_ops_t ping_test_client; +extern struct sfw_test_client_ops ping_test_client; void ping_init_test_client(void); -extern srpc_service_t ping_test_service; +extern struct srpc_service ping_test_service; void ping_init_test_service(void); #endif /* __SELFTEST_SELFTEST_H__ */ diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c index 8be52526ae5a..dcd22580b1f0 100644 --- a/drivers/staging/lustre/lnet/selftest/timer.c +++ b/drivers/staging/lustre/lnet/selftest/timer.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -49,7 +45,7 @@ * sorted by increasing expiry time. The number of slots is 2**7 (128), * to cover a time period of 1024 seconds into the future before wrapping. */ -#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */ +#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */ #define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL) #define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1)) #define STTIMER_NSLOTS (1 << 7) @@ -170,20 +166,22 @@ stt_check_timers(unsigned long *last) static int stt_timer_main(void *arg) { + int rc = 0; + cfs_block_allsigs(); while (!stt_data.stt_shuttingdown) { stt_check_timers(&stt_data.stt_prev_slot); - wait_event_timeout(stt_data.stt_waitq, - stt_data.stt_shuttingdown, - cfs_time_seconds(STTIMER_SLOTTIME)); + rc = wait_event_timeout(stt_data.stt_waitq, + stt_data.stt_shuttingdown, + cfs_time_seconds(STTIMER_SLOTTIME)); } spin_lock(&stt_data.stt_lock); stt_data.stt_nthreads--; spin_unlock(&stt_data.stt_lock); - return 0; + return rc; } static int diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h index f1fbebd8a67c..441d6d6b4f8e 100644 --- a/drivers/staging/lustre/lnet/selftest/timer.h +++ b/drivers/staging/lustre/lnet/selftest/timer.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ |