@@ -104,10 +104,10 @@ static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
static unsigned int qrtr_local_nid = -1;
/* for node ids */
-static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
+static DEFINE_XARRAY(qrtr_nodes);
/* broadcast list */
static LIST_HEAD(qrtr_all_nodes);
-/* lock for qrtr_nodes, qrtr_all_nodes and node reference */
+/* lock for qrtr_all_nodes */
static DEFINE_MUTEX(qrtr_node_lock);
/* local port allocation management */
@@ -148,12 +148,15 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
* kref_put_mutex. As such, the node mutex is expected to be locked on call.
*/
static void __qrtr_node_release(struct kref *kref)
+ __releases(qrtr_nodes.xa_lock)
{
struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
if (node->nid != QRTR_EP_NID_AUTO)
- radix_tree_delete(&qrtr_nodes, node->nid);
+ __xa_erase(&qrtr_nodes, node->nid);
+ xa_unlock(&qrtr_nodes);
+ mutex_lock(&qrtr_node_lock);
list_del(&node->item);
mutex_unlock(&qrtr_node_lock);
@@ -174,7 +177,7 @@ static void qrtr_node_release(struct qrtr_node *node)
{
if (!node)
return;
- kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
+ kref_put_lock(&node->ref, __qrtr_node_release, &qrtr_nodes.xa_lock);
}
/* Pass an outgoing packet socket buffer to the endpoint driver. */
@@ -217,10 +220,10 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
{
struct qrtr_node *node;
- mutex_lock(&qrtr_node_lock);
- node = radix_tree_lookup(&qrtr_nodes, nid);
+ xa_lock(&qrtr_nodes);
+ node = xa_load(&qrtr_nodes, nid);
node = qrtr_node_acquire(node);
- mutex_unlock(&qrtr_node_lock);
+ xa_unlock(&qrtr_nodes);
return node;
}
@@ -235,10 +238,8 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
return;
- mutex_lock(&qrtr_node_lock);
- radix_tree_insert(&qrtr_nodes, nid, node);
node->nid = nid;
- mutex_unlock(&qrtr_node_lock);
+ xa_store(&qrtr_nodes, nid, node, GFP_KERNEL);
}
/**