int n;
int i;
- LASSERT (msg->ibm_type == IBLND_MSG_GET_REQ ||
+ LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
msg->ibm_type == IBLND_MSG_PUT_ACK);
rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
return 1;
}
- nob = offsetof (kib_msg_t, ibm_u) +
+ nob = offsetof(kib_msg_t, ibm_u) +
kiblnd_rd_msg_size(rd, msg->ibm_type, n);
if (msg->ibm_nob < nob) {
}
void
-kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
+kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
int credits, lnet_nid_t dstnid, __u64 dststamp)
{
kib_net_t *net = ni->ni_data;
if (flip) {
/* leave magic unflipped as a clue to peer endianness */
msg->ibm_version = version;
- CLASSERT (sizeof(msg->ibm_type) == 1);
- CLASSERT (sizeof(msg->ibm_credits) == 1);
+ CLASSERT(sizeof(msg->ibm_type) == 1);
+ CLASSERT(sizeof(msg->ibm_credits) == 1);
msg->ibm_nob = msg_nob;
__swab64s(&msg->ibm_srcnid);
__swab64s(&msg->ibm_srcstamp);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
/* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT (net->ibn_shutdown == 0);
+ LASSERT(net->ibn_shutdown == 0);
/* npeers only grows with the global lock held */
atomic_inc(&net->ibn_npeers);
}
void
-kiblnd_destroy_peer (kib_peer_t *peer)
+kiblnd_destroy_peer(kib_peer_t *peer)
{
kib_net_t *net = peer->ibp_ni->ni_data;
- LASSERT (net != NULL);
- LASSERT (atomic_read(&peer->ibp_refcount) == 0);
- LASSERT (!kiblnd_peer_active(peer));
- LASSERT (peer->ibp_connecting == 0);
- LASSERT (peer->ibp_accepting == 0);
- LASSERT (list_empty(&peer->ibp_conns));
- LASSERT (list_empty(&peer->ibp_tx_queue));
+ LASSERT(net != NULL);
+ LASSERT(atomic_read(&peer->ibp_refcount) == 0);
+ LASSERT(!kiblnd_peer_active(peer));
+ LASSERT(peer->ibp_connecting == 0);
+ LASSERT(peer->ibp_accepting == 0);
+ LASSERT(list_empty(&peer->ibp_conns));
+ LASSERT(list_empty(&peer->ibp_tx_queue));
LIBCFS_FREE(peer, sizeof(*peer));
}
kib_peer_t *
-kiblnd_find_peer_locked (lnet_nid_t nid)
+kiblnd_find_peer_locked(lnet_nid_t nid)
{
/* the caller is responsible for accounting the additional reference
* that this creates */
struct list_head *tmp;
kib_peer_t *peer;
- list_for_each (tmp, peer_list) {
+ list_for_each(tmp, peer_list) {
peer = list_entry(tmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 || /* creating conns */
+ LASSERT(peer->ibp_connecting > 0 || /* creating conns */
peer->ibp_accepting > 0 ||
!list_empty(&peer->ibp_conns)); /* active conn */
}
void
-kiblnd_unlink_peer_locked (kib_peer_t *peer)
+kiblnd_unlink_peer_locked(kib_peer_t *peer)
{
- LASSERT (list_empty(&peer->ibp_conns));
+ LASSERT(list_empty(&peer->ibp_conns));
- LASSERT (kiblnd_peer_active(peer));
+ LASSERT(kiblnd_peer_active(peer));
list_del_init(&peer->ibp_list);
/* lose peerlist's ref */
kiblnd_peer_decref(peer);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+ list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
+ LASSERT(peer->ibp_connecting > 0 ||
peer->ibp_accepting > 0 ||
!list_empty(&peer->ibp_conns));
if (list_empty(&peer->ibp_conns)) {
kiblnd_unlink_peer_locked(peer);
} else {
- list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
kiblnd_close_conn_locked(conn, 0);
static int
kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
{
- LIST_HEAD (zombies);
+ LIST_HEAD(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
kib_peer_t *peer;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+ list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
+ LASSERT(peer->ibp_connecting > 0 ||
peer->ibp_accepting > 0 ||
!list_empty(&peer->ibp_conns));
continue;
if (!list_empty(&peer->ibp_tx_queue)) {
- LASSERT (list_empty(&peer->ibp_conns));
+ LASSERT(list_empty(&peer->ibp_conns));
list_splice_init(&peer->ibp_tx_queue,
&zombies);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+ list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
+ LASSERT(peer->ibp_connecting > 0 ||
peer->ibp_accepting > 0 ||
!list_empty(&peer->ibp_conns));
if (peer->ibp_ni != ni)
continue;
- list_for_each (ctmp, &peer->ibp_conns) {
+ list_for_each(ctmp, &peer->ibp_conns) {
if (index-- > 0)
continue;
return;
mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
- LASSERT (mtu >= 0);
+ LASSERT(mtu >= 0);
if (mtu != 0)
cmid->route.path_rec->mtu = mtu;
}
}
/* Init successful! */
- LASSERT (state == IBLND_CONN_ACTIVE_CONNECT ||
+ LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
state == IBLND_CONN_PASSIVE_WAIT);
conn->ibc_state = state;
}
void
-kiblnd_destroy_conn (kib_conn_t *conn)
+kiblnd_destroy_conn(kib_conn_t *conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
kib_peer_t *peer = conn->ibc_peer;
int rc;
- LASSERT (!in_interrupt());
- LASSERT (atomic_read(&conn->ibc_refcount) == 0);
- LASSERT (list_empty(&conn->ibc_early_rxs));
- LASSERT (list_empty(&conn->ibc_tx_noops));
- LASSERT (list_empty(&conn->ibc_tx_queue));
- LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd));
- LASSERT (list_empty(&conn->ibc_tx_queue_nocred));
- LASSERT (list_empty(&conn->ibc_active_txs));
- LASSERT (conn->ibc_noops_posted == 0);
- LASSERT (conn->ibc_nsends_posted == 0);
+ LASSERT(!in_interrupt());
+ LASSERT(atomic_read(&conn->ibc_refcount) == 0);
+ LASSERT(list_empty(&conn->ibc_early_rxs));
+ LASSERT(list_empty(&conn->ibc_tx_noops));
+ LASSERT(list_empty(&conn->ibc_tx_queue));
+ LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
+ LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
+ LASSERT(list_empty(&conn->ibc_active_txs));
+ LASSERT(conn->ibc_noops_posted == 0);
+ LASSERT(conn->ibc_nsends_posted == 0);
switch (conn->ibc_state) {
default:
case IBLND_CONN_DISCONNECTED:
/* connvars should have been freed already */
- LASSERT (conn->ibc_connvars == NULL);
+ LASSERT(conn->ibc_connvars == NULL);
break;
case IBLND_CONN_INIT:
}
int
-kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
+kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
{
kib_conn_t *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
- list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
}
int
-kiblnd_close_stale_conns_locked (kib_peer_t *peer,
+kiblnd_close_stale_conns_locked(kib_peer_t *peer,
int version, __u64 incarnation)
{
kib_conn_t *conn;
struct list_head *cnxt;
int count = 0;
- list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
if (conn->ibc_version == version &&
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+ list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
+ LASSERT(peer->ibp_connecting > 0 ||
peer->ibp_accepting > 0 ||
!list_empty(&peer->ibp_conns));
break;
}
- LASSERT (conn->ibc_cmid != NULL);
+ LASSERT(conn->ibc_cmid != NULL);
data->ioc_nid = conn->ibc_peer->ibp_nid;
if (conn->ibc_cmid->route.path_rec == NULL)
data->ioc_u32[0] = 0; /* iWarp has no path MTU */
}
void
-kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
+kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
{
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
peer = kiblnd_find_peer_locked(nid);
if (peer != NULL) {
- LASSERT (peer->ibp_connecting > 0 || /* creating conns */
+ LASSERT(peer->ibp_connecting > 0 || /* creating conns */
peer->ibp_accepting > 0 ||
!list_empty(&peer->ibp_conns)); /* active conn */
last_alive = peer->ibp_last_alive;
kib_rx_t *rx;
int i;
- LASSERT (conn->ibc_rxs != NULL);
- LASSERT (conn->ibc_hdev != NULL);
+ LASSERT(conn->ibc_rxs != NULL);
+ LASSERT(conn->ibc_hdev != NULL);
for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
rx = &conn->ibc_rxs[i];
- LASSERT (rx->rx_nob >= 0); /* not posted */
+ LASSERT(rx->rx_nob >= 0); /* not posted */
kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
rx->rx_msg, IBLND_MSG_SIZE,
DMA_FROM_DEVICE);
- LASSERT (!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
+ LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
rx->rx_msgaddr));
KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
lnet_page2phys(pg) + pg_off);
pg_off += IBLND_MSG_SIZE;
- LASSERT (pg_off <= PAGE_SIZE);
+ LASSERT(pg_off <= PAGE_SIZE);
if (pg_off == PAGE_SIZE) {
pg_off = 0;
ipg++;
- LASSERT (ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
+ LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
}
}
}
kib_tx_t *tx;
int i;
- LASSERT (tpo->tpo_pool.po_allocated == 0);
+ LASSERT(tpo->tpo_pool.po_allocated == 0);
if (hdev == NULL)
return;
int ipage;
int i;
- LASSERT (net != NULL);
+ LASSERT(net != NULL);
dev = net->ibn_dev;
/* pre-mapped messages are not bigger than 1 page */
- CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
+ CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
/* No fancy arithmetic when we do the buffer calculations */
- CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
+ CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
tpo->tpo_hdev = kiblnd_current_hdev(dev);
tx->tx_msgaddr = kiblnd_dma_map_single(
tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
IBLND_MSG_SIZE, DMA_TO_DEVICE);
- LASSERT (!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
+ LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
tx->tx_msgaddr));
KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
list_add(&tx->tx_list, &pool->po_free_list);
page_offset += IBLND_MSG_SIZE;
- LASSERT (page_offset <= PAGE_SIZE);
+ LASSERT(page_offset <= PAGE_SIZE);
if (page_offset == PAGE_SIZE) {
page_offset = 0;
ipage++;
- LASSERT (ipage <= txpgs->ibp_npages);
+ LASSERT(ipage <= txpgs->ibp_npages);
}
}
}
{
__u64 index;
- LASSERT (hdev->ibh_mrs[0] != NULL);
+ LASSERT(hdev->ibh_mrs[0] != NULL);
if (hdev->ibh_nmrs == 1)
return hdev->ibh_mrs[0];
struct ib_mr *mr;
int i;
- LASSERT (hdev->ibh_mrs[0] != NULL);
+ LASSERT(hdev->ibh_mrs[0] != NULL);
if (*kiblnd_tunables.kib_map_on_demand > 0 &&
*kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
static void
kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
{
- LASSERT (pool->fpo_map_count == 0);
+ LASSERT(pool->fpo_map_count == 0);
if (pool->fpo_fmr_pool != NULL)
ib_destroy_fmr_pool(pool->fpo_fmr_pool);
void
kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
- LIST_HEAD (zombies);
+ LIST_HEAD(zombies);
kib_fmr_pool_t *fpo = fmr->fmr_pool;
kib_fmr_poolset_t *fps = fpo->fpo_owner;
unsigned long now = cfs_time_current();
int rc;
rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT (rc == 0);
+ LASSERT(rc == 0);
if (status != 0) {
rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT (rc == 0);
+ LASSERT(rc == 0);
}
fmr->fmr_pool = NULL;
static void
kiblnd_fini_pool(kib_pool_t *pool)
{
- LASSERT (list_empty(&pool->po_free_list));
- LASSERT (pool->po_allocated == 0);
+ LASSERT(list_empty(&pool->po_free_list));
+ LASSERT(pool->po_allocated == 0);
CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
}
pool = list_entry(head->next, kib_pool_t, po_list);
list_del(&pool->po_list);
- LASSERT (pool->po_owner != NULL);
+ LASSERT(pool->po_owner != NULL);
pool->po_owner->ps_pool_destroy(pool);
}
}
void
kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
{
- LIST_HEAD (zombies);
+ LIST_HEAD(zombies);
kib_poolset_t *ps = pool->po_owner;
kib_pool_t *tmp;
unsigned long now = cfs_time_current();
if (ps->ps_node_fini != NULL)
ps->ps_node_fini(pool, node);
- LASSERT (pool->po_allocated > 0);
+ LASSERT(pool->po_allocated > 0);
list_add(node, &pool->po_free_list);
pool->po_allocated--;
kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool);
kib_phys_mr_t *pmr;
- LASSERT (pool->po_allocated == 0);
+ LASSERT(pool->po_allocated == 0);
while (!list_empty(&pool->po_free_list)) {
pmr = list_entry(pool->po_free_list.next,
kib_phys_mr_t, pmr_list);
- LASSERT (pmr->pmr_mr == NULL);
+ LASSERT(pmr->pmr_mr == NULL);
list_del(&pmr->pmr_list);
if (pmr->pmr_ipb != NULL) {
kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
int i;
- LASSERT (pool->po_allocated == 0);
+ LASSERT(pool->po_allocated == 0);
if (tpo->tpo_tx_pages != NULL) {
kiblnd_unmap_tx_pool(tpo);
return PTR_ERR(mr);
}
- LASSERT (iova == ipb.addr);
+ LASSERT(iova == ipb.addr);
hdev->ibh_mrs[i] = mr;
}
int
kiblnd_dev_failover(kib_dev_t *dev)
{
- LIST_HEAD (zombie_tpo);
- LIST_HEAD (zombie_ppo);
- LIST_HEAD (zombie_fpo);
+ LIST_HEAD(zombie_tpo);
+ LIST_HEAD(zombie_ppo);
+ LIST_HEAD(zombie_fpo);
struct rdma_cm_id *cmid = NULL;
kib_hca_dev_t *hdev = NULL;
kib_hca_dev_t *old;
int rc = 0;
int i;
- LASSERT (*kiblnd_tunables.kib_dev_failover > 1 ||
+ LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
dev->ibd_can_failover ||
dev->ibd_hdev == NULL);
}
void
-kiblnd_destroy_dev (kib_dev_t *dev)
+kiblnd_destroy_dev(kib_dev_t *dev)
{
- LASSERT (dev->ibd_nnets == 0);
- LASSERT (list_empty(&dev->ibd_nets));
+ LASSERT(dev->ibd_nnets == 0);
+ LASSERT(list_empty(&dev->ibd_nets));
list_del(&dev->ibd_fail_list);
list_del(&dev->ibd_list);
struct kib_sched_info *sched;
int i;
- LASSERT (list_empty(&kiblnd_data.kib_devs));
+ LASSERT(list_empty(&kiblnd_data.kib_devs));
CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
atomic_read(&libcfs_kmemory));
case IBLND_INIT_ALL:
case IBLND_INIT_DATA:
- LASSERT (kiblnd_data.kib_peers != NULL);
+ LASSERT(kiblnd_data.kib_peers != NULL);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- LASSERT (list_empty(&kiblnd_data.kib_peers[i]));
+ LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
}
- LASSERT (list_empty(&kiblnd_data.kib_connd_zombies));
- LASSERT (list_empty(&kiblnd_data.kib_connd_conns));
+ LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
+ LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
/* flag threads to terminate; wake and wait for them to die */
kiblnd_data.kib_shutdown = 1;
}
void
-kiblnd_shutdown (lnet_ni_t *ni)
+kiblnd_shutdown(lnet_ni_t *ni)
{
kib_net_t *net = ni->ni_data;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
/* fall through */
case IBLND_INIT_NOTHING:
- LASSERT (atomic_read(&net->ibn_nconns) == 0);
+ LASSERT(atomic_read(&net->ibn_nconns) == 0);
if (net->ibn_dev != NULL &&
net->ibn_dev->ibd_nnets == 0)
int rc;
int i;
- LASSERT (kiblnd_data.kib_init == IBLND_INIT_NOTHING);
+ LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
try_module_get(THIS_MODULE);
memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
}
int
-kiblnd_startup (lnet_ni_t *ni)
+kiblnd_startup(lnet_ni_t *ni)
{
char *ifname;
kib_dev_t *ibdev = NULL;
int rc;
int newdev;
- LASSERT (ni->ni_lnd == &the_o2iblnd);
+ LASSERT(ni->ni_lnd == &the_o2iblnd);
if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
rc = kiblnd_base_startup();
if (ni->ni_interfaces[0] != NULL) {
/* Use the IPoIB interface specified in 'networks=' */
- CLASSERT (LNET_MAX_INTERFACES > 1);
+ CLASSERT(LNET_MAX_INTERFACES > 1);
if (ni->ni_interfaces[1] != NULL) {
CERROR("Multiple interfaces not supported\n");
goto failed;
}
static void __exit
-kiblnd_module_fini (void)
+kiblnd_module_fini(void)
{
lnet_unregister_lnd(&the_o2iblnd);
}
static int __init
-kiblnd_module_init (void)
+kiblnd_module_init(void)
{
int rc;
- CLASSERT (sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
- CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+ CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
+ CLASSERT(offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);
- CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+ CLASSERT(offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);
rc = kiblnd_tunables_init();