@@ -475,7 +475,7 @@ pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list,
mark_lseg_invalid(lseg, tmp_list);
rv = true;
}
- dprintk("%s:Return\n", __func__);
+ dprintk("%s:Return %d\n", __func__, rv);
return rv;
}
@@ -640,20 +640,26 @@ send_layoutget(struct pnfs_layout_hdr *lo,
return lseg;
}
-void nfs4_asynch_forget_layouts(struct pnfs_layout_hdr *lo,
+bool nfs4_asynch_forget_layouts(struct pnfs_layout_hdr *lo,
struct pnfs_layout_range *range,
int notify_bit, atomic_t *notify_count,
struct list_head *tmp_list)
{
+ bool rv = false;
struct pnfs_layout_segment *lseg, *tmp;
assert_spin_locked(&lo->inode->i_lock);
+ dprintk("%s\n", __func__);
list_for_each_entry_safe(lseg, tmp, &lo->segs, fi_list)
if (should_free_lseg(&lseg->range, range)) {
lseg->pls_notify_mask |= (1 << notify_bit);
atomic_inc(notify_count);
mark_lseg_invalid(lseg, tmp_list);
+ rv = true;
}
+
+ dprintk("%s:Return %d\n", __func__, rv);
+ return rv;
}
/* Return true if there is layout based io in progress in the given range.
@@ -298,7 +298,7 @@ void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
struct pnfs_layout_hdr *lo,
struct nfs4_state *open_state);
-void nfs4_asynch_forget_layouts(struct pnfs_layout_hdr *lo,
+bool nfs4_asynch_forget_layouts(struct pnfs_layout_hdr *lo,
struct pnfs_layout_range *range,
int notify_bit, atomic_t *notify_count,
struct list_head *tmp_list);