@@ -257,7 +257,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
node->tree = tree;
node->this = cnid;
set_bit(HFS_BNODE_NEW, &node->flags);
- atomic_set(&node->refcnt, 1);
+ refcount_set(&node->refcnt, 1);
hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
node->tree->cnid, node->this);
init_waitqueue_head(&node->lock_wq);
@@ -302,7 +302,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
struct hfs_bnode **p;
hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
- node->tree->cnid, node->this, atomic_read(&node->refcnt));
+ node->tree->cnid, node->this, refcount_read(&node->refcnt));
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash)
;
@@ -446,10 +446,10 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
void hfs_bnode_get(struct hfs_bnode *node)
{
if (node) {
- atomic_inc(&node->refcnt);
+ refcount_inc(&node->refcnt);
hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
node->tree->cnid, node->this,
- atomic_read(&node->refcnt));
+ refcount_read(&node->refcnt));
}
}
@@ -462,9 +462,9 @@ void hfs_bnode_put(struct hfs_bnode *node)
hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
node->tree->cnid, node->this,
- atomic_read(&node->refcnt));
- BUG_ON(!atomic_read(&node->refcnt));
- if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
+ refcount_read(&node->refcnt));
+ BUG_ON(!refcount_read(&node->refcnt));
+ if (!refcount_dec_and_lock(&node->refcnt, &tree->hash_lock))
return;
for (i = 0; i < tree->pages_per_bnode; i++) {
if (!node->page[i])
@@ -144,10 +144,10 @@ void hfs_btree_close(struct hfs_btree *tree)
for (i = 0; i < NODE_HASH_SIZE; i++) {
while ((node = tree->node_hash[i])) {
tree->node_hash[i] = node->next_hash;
- if (atomic_read(&node->refcnt))
+ if (refcount_read(&node->refcnt))
pr_err("node %d:%d still has %d user(s)!\n",
node->tree->cnid, node->this,
- atomic_read(&node->refcnt));
+ refcount_read(&node->refcnt));
hfs_bnode_free(node);
tree->node_hash_cnt--;
}
@@ -6,6 +6,7 @@
* (C) 2003 Ardis Technologies <roman@ardistech.com>
*/
+#include <linux/refcount.h>
#include "hfs_fs.h"
typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
@@ -57,7 +58,7 @@ struct hfs_bnode {
struct hfs_bnode *next_hash;
unsigned long flags;
wait_queue_head_t lock_wq;
- atomic_t refcnt;
+ refcount_t refcnt;
unsigned int page_offset;
struct page *page[0];
};
@@ -98,7 +98,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
- else if (atomic_read(&node->refcnt))
+ else if (refcount_read(&node->refcnt))
res = 0;
if (res && node) {
hfs_bnode_unhash(node);
@@ -113,7 +113,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
node = hfs_bnode_findhash(tree, nidx++);
if (!node)
continue;
- if (atomic_read(&node->refcnt)) {
+ if (refcount_read(&node->refcnt)) {
res = 0;
break;
}
@@ -422,7 +422,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
node->tree = tree;
node->this = cnid;
set_bit(HFS_BNODE_NEW, &node->flags);
- atomic_set(&node->refcnt, 1);
+ refcount_set(&node->refcnt, 1);
hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
node->tree->cnid, node->this);
init_waitqueue_head(&node->lock_wq);
@@ -468,7 +468,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
struct hfs_bnode **p;
hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
- node->tree->cnid, node->this, atomic_read(&node->refcnt));
+ node->tree->cnid, node->this, refcount_read(&node->refcnt));
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash)
;
@@ -614,10 +614,10 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
void hfs_bnode_get(struct hfs_bnode *node)
{
if (node) {
- atomic_inc(&node->refcnt);
+ refcount_inc(&node->refcnt);
hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
node->tree->cnid, node->this,
- atomic_read(&node->refcnt));
+ refcount_read(&node->refcnt));
}
}
@@ -630,9 +630,9 @@ void hfs_bnode_put(struct hfs_bnode *node)
hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
node->tree->cnid, node->this,
- atomic_read(&node->refcnt));
- BUG_ON(!atomic_read(&node->refcnt));
- if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
+ refcount_read(&node->refcnt));
+ BUG_ON(!refcount_read(&node->refcnt));
+ if (!refcount_dec_and_lock(&node->refcnt, &tree->hash_lock))
return;
for (i = 0; i < tree->pages_per_bnode; i++) {
if (!node->page[i])
@@ -265,11 +265,11 @@ void hfs_btree_close(struct hfs_btree *tree)
for (i = 0; i < NODE_HASH_SIZE; i++) {
while ((node = tree->node_hash[i])) {
tree->node_hash[i] = node->next_hash;
- if (atomic_read(&node->refcnt))
+ if (refcount_read(&node->refcnt))
pr_crit("node %d:%d "
"still has %d user(s)!\n",
node->tree->cnid, node->this,
- atomic_read(&node->refcnt));
+ refcount_read(&node->refcnt));
hfs_bnode_free(node);
tree->node_hash_cnt--;
}
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
+#include <linux/refcount.h>
#include "hfsplus_raw.h"
#define DBG_BNODE_REFS 0x00000001
@@ -115,7 +116,7 @@ struct hfs_bnode {
struct hfs_bnode *next_hash;
unsigned long flags;
wait_queue_head_t lock_wq;
- atomic_t refcnt;
+ refcount_t refcnt;
unsigned int page_offset;
struct page *page[0];
};
@@ -94,7 +94,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
- else if (atomic_read(&node->refcnt))
+ else if (refcount_read(&node->refcnt))
res = 0;
if (res && node) {
hfs_bnode_unhash(node);
@@ -110,7 +110,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
node = hfs_bnode_findhash(tree, nidx++);
if (!node)
continue;
- if (atomic_read(&node->refcnt)) {
+ if (refcount_read(&node->refcnt)) {
res = 0;
break;
}