@@ -339,7 +339,7 @@ int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
peer_ni->ibp_queue_depth_mod = 0; /* try to use the default */
atomic_set(&peer_ni->ibp_refcount, 1); /* 1 ref for caller */
- INIT_LIST_HEAD(&peer_ni->ibp_list);
+ INIT_HLIST_NODE(&peer_ni->ibp_list);
INIT_LIST_HEAD(&peer_ni->ibp_conns);
INIT_LIST_HEAD(&peer_ni->ibp_tx_queue);
@@ -385,10 +385,10 @@ struct kib_peer_ni *kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid)
* the caller is responsible for accounting the additional reference
* that this creates
*/
- struct list_head *peer_list = kiblnd_nid2peerlist(nid);
struct kib_peer_ni *peer_ni;
- list_for_each_entry(peer_ni, peer_list, ibp_list) {
+ hash_for_each_possible(kiblnd_data.kib_peers, peer_ni,
+ ibp_list, nid) {
LASSERT(!kiblnd_peer_idle(peer_ni));
/*
@@ -415,7 +415,7 @@ void kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni)
LASSERT(list_empty(&peer_ni->ibp_conns));
LASSERT(kiblnd_peer_active(peer_ni));
- list_del_init(&peer_ni->ibp_list);
+ hlist_del_init(&peer_ni->ibp_list);
/* lose peerlist's ref */
kiblnd_peer_decref(peer_ni);
}
@@ -429,24 +429,20 @@ static int kiblnd_get_peer_info(struct lnet_ni *ni, int index,
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- list_for_each_entry(peer_ni, &kiblnd_data.kib_peers[i],
- ibp_list) {
- LASSERT(!kiblnd_peer_idle(peer_ni));
+ hash_for_each(kiblnd_data.kib_peers, i, peer_ni, ibp_list) {
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer_ni->ibp_ni != ni)
- continue;
+ if (peer_ni->ibp_ni != ni)
+ continue;
- if (index-- > 0)
- continue;
+ if (index-- > 0)
+ continue;
- *nidp = peer_ni->ibp_nid;
- *count = atomic_read(&peer_ni->ibp_refcount);
+ *nidp = peer_ni->ibp_nid;
+ *count = atomic_read(&peer_ni->ibp_refcount);
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
- return 0;
- }
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ return 0;
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
@@ -476,7 +472,7 @@ static void kiblnd_del_peer_locked(struct kib_peer_ni *peer_ni)
static int kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid)
{
LIST_HEAD(zombies);
- struct kib_peer_ni *pnxt;
+ struct hlist_node *pnxt;
struct kib_peer_ni *peer_ni;
int lo;
int hi;
@@ -487,16 +483,16 @@ static int kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid)
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (nid != LNET_NID_ANY) {
- lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ lo = hash_min(nid, HASH_BITS(kiblnd_data.kib_peers));
+ hi = lo;
} else {
lo = 0;
- hi = kiblnd_data.kib_peer_hash_size - 1;
+ hi = HASH_SIZE(kiblnd_data.kib_peers) - 1;
}
for (i = lo; i <= hi; i++) {
- list_for_each_entry_safe(peer_ni, pnxt,
- &kiblnd_data.kib_peers[i], ibp_list) {
+ hlist_for_each_entry_safe(peer_ni, pnxt,
+ &kiblnd_data.kib_peers[i], ibp_list) {
LASSERT(!kiblnd_peer_idle(peer_ni));
if (peer_ni->ibp_ni != ni)
@@ -533,25 +529,21 @@ static struct kib_conn *kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- list_for_each_entry(peer_ni, &kiblnd_data.kib_peers[i],
- ibp_list) {
- LASSERT(!kiblnd_peer_idle(peer_ni));
+ hash_for_each(kiblnd_data.kib_peers, i, peer_ni, ibp_list) {
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer_ni->ibp_ni != ni)
- continue;
+ if (peer_ni->ibp_ni != ni)
+ continue;
- list_for_each_entry(conn, &peer_ni->ibp_conns,
- ibc_list) {
- if (index-- > 0)
- continue;
+ list_for_each_entry(conn, &peer_ni->ibp_conns,
+ ibc_list) {
+ if (index-- > 0)
+ continue;
- kiblnd_conn_addref(conn);
- read_unlock_irqrestore(
- &kiblnd_data.kib_global_lock,
- flags);
- return conn;
- }
+ kiblnd_conn_addref(conn);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
+ return conn;
}
}
@@ -1014,7 +1006,7 @@ int kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni,
static int kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid)
{
struct kib_peer_ni *peer_ni;
- struct kib_peer_ni *pnxt;
+ struct hlist_node *pnxt;
int lo;
int hi;
int i;
@@ -1024,16 +1016,16 @@ static int kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid)
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (nid != LNET_NID_ANY) {
- lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ lo = hash_min(nid, HASH_BITS(kiblnd_data.kib_peers));
+ hi = lo;
} else {
lo = 0;
- hi = kiblnd_data.kib_peer_hash_size - 1;
+ hi = HASH_SIZE(kiblnd_data.kib_peers) - 1;
}
for (i = lo; i <= hi; i++) {
- list_for_each_entry_safe(peer_ni, pnxt,
- &kiblnd_data.kib_peers[i], ibp_list) {
+ hlist_for_each_entry_safe(peer_ni, pnxt,
+ &kiblnd_data.kib_peers[i], ibp_list) {
LASSERT(!kiblnd_peer_idle(peer_ni));
if (peer_ni->ibp_ni != ni)
@@ -2499,6 +2491,7 @@ void kiblnd_destroy_dev(struct kib_dev *dev)
static void kiblnd_base_shutdown(void)
{
struct kib_sched_info *sched;
+ struct kib_peer_ni *peer_ni;
int i;
LASSERT(list_empty(&kiblnd_data.kib_devs));
@@ -2509,9 +2502,8 @@ static void kiblnd_base_shutdown(void)
case IBLND_INIT_ALL:
case IBLND_INIT_DATA:
- LASSERT(kiblnd_data.kib_peers);
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
- LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
+ hash_for_each(kiblnd_data.kib_peers, i, peer_ni, ibp_list)
+ LASSERT(0);
LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
LASSERT(list_empty(&kiblnd_data.kib_reconn_list));
@@ -2541,8 +2533,6 @@ static void kiblnd_base_shutdown(void)
break;
}
- kvfree(kiblnd_data.kib_peers);
-
if (kiblnd_data.kib_scheds)
cfs_percpt_free(kiblnd_data.kib_scheds);
@@ -2628,14 +2618,7 @@ static int kiblnd_base_startup(struct net *ns)
INIT_LIST_HEAD(&kiblnd_data.kib_devs);
INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
- kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
- kiblnd_data.kib_peers = kvmalloc_array(kiblnd_data.kib_peer_hash_size,
- sizeof(struct list_head),
- GFP_KERNEL);
- if (!kiblnd_data.kib_peers)
- goto failed;
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
- INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
+ hash_init(kiblnd_data.kib_peers);
spin_lock_init(&kiblnd_data.kib_connd_lock);
INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
@@ -66,7 +66,7 @@
#include <linux/lnet/lib-lnet.h>
#include "o2iblnd-idl.h"
-#define IBLND_PEER_HASH_SIZE 101 /* # peer_ni lists */
+#define IBLND_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */
#define IBLND_N_SCHED 2
#define IBLND_N_SCHED_HIGH 4
@@ -342,9 +342,7 @@ struct kib_data {
/* stabilize net/dev/peer_ni/conn ops */
rwlock_t kib_global_lock;
/* hash table of all my known peers */
- struct list_head *kib_peers;
- /* size of kib_peers */
- int kib_peer_hash_size;
+ DECLARE_HASHTABLE(kib_peers, IBLND_PEER_HASH_BITS);
/* the connd task (serialisation assertions) */
void *kib_connd;
/* connections to setup/teardown */
@@ -488,7 +486,7 @@ struct kib_conn {
#define IBLND_CONN_DISCONNECTED 5 /* disconnected */
struct kib_peer_ni {
- struct list_head ibp_list; /* stash on global peer_ni list */
+ struct hlist_node ibp_list; /* on peer_ni hash chain */
lnet_nid_t ibp_nid; /* who's on the other end(s) */
struct lnet_ni *ibp_ni; /* LNet interface */
struct list_head ibp_conns; /* all active connections */
@@ -642,20 +640,11 @@ static inline int kiblnd_timeout(void)
list_empty(&peer_ni->ibp_conns);
}
-static inline struct list_head *
-kiblnd_nid2peerlist(lnet_nid_t nid)
-{
- unsigned int hash =
- ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
-
- return &kiblnd_data.kib_peers[hash];
-}
-
static inline int
kiblnd_peer_active(struct kib_peer_ni *peer_ni)
{
/* Am I in the peer_ni hash table? */
- return !list_empty(&peer_ni->ibp_list);
+ return !hlist_unhashed(&peer_ni->ibp_list);
}
static inline struct kib_conn *
@@ -1494,7 +1494,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
kiblnd_peer_addref(peer_ni);
- list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
+ hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
write_unlock_irqrestore(g_lock, flags);
@@ -2533,7 +2533,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
LASSERT(!net->ibn_shutdown);
kiblnd_peer_addref(peer_ni);
- list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
+ hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
write_unlock_irqrestore(g_lock, flags);
}
@@ -3257,7 +3257,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
LIST_HEAD(closes);
LIST_HEAD(checksends);
LIST_HEAD(timedout_txs);
- struct list_head *peers = &kiblnd_data.kib_peers[idx];
+ struct hlist_head *peers = &kiblnd_data.kib_peers[idx];
struct kib_peer_ni *peer_ni;
struct kib_tx *tx_tmp, *tx;
struct kib_conn *conn;
@@ -3270,7 +3270,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
*/
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- list_for_each_entry(peer_ni, peers, ibp_list) {
+ hlist_for_each_entry(peer_ni, peers, ibp_list) {
/* Check tx_deadline */
list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
@@ -3499,7 +3499,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
if (timeout <= 0) {
const int n = 4;
const int p = 1;
- int chunk = kiblnd_data.kib_peer_hash_size;
+ int chunk = HASH_SIZE(kiblnd_data.kib_peers);
unsigned int lnd_timeout;
spin_unlock_irqrestore(lock, flags);
@@ -3524,7 +3524,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
for (i = 0; i < chunk; i++) {
kiblnd_check_conns(peer_index);
peer_index = (peer_index + 1) %
- kiblnd_data.kib_peer_hash_size;
+ HASH_SIZE(kiblnd_data.kib_peers);
}
deadline += p * HZ;