@@ -309,51 +309,16 @@ nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
}
}
-static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
- unsigned int max)
-{
- struct svc_cacherep *rp, *tmp;
- long freed = 0;
-
- list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
- /*
- * Don't free entries attached to calls that are still
- * in-progress, but do keep scanning the list.
- */
- if (rp->c_state == RC_INPROG)
- continue;
- if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
- time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
- break;
- nfsd_reply_cache_free_locked(b, rp, nn);
- if (max && freed++ > max)
- break;
- }
- return freed;
-}
-
-/*
- * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
- * Also prune the oldest ones when the total exceeds the max number of entries.
+/**
+ * nfsd_reply_cache_count - count_objects method for the DRC shrinker
+ * @shrink: our registered shrinker context
+ * @sc: garbage collection parameters
+ *
+ * Returns the total number of entries in the duplicate reply cache. To
+ * keep things simple and quick, this is not the number of expired entries
+ * in the cache (ie, the number that would be removed by a call to
+ * nfsd_reply_cache_scan).
*/
-static long
-prune_cache_entries(struct nfsd_net *nn)
-{
- unsigned int i;
- long freed = 0;
-
- for (i = 0; i < nn->drc_hashsize; i++) {
- struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
-
- if (list_empty(&b->lru_head))
- continue;
- spin_lock(&b->cache_lock);
- freed += prune_bucket(b, nn, 0);
- spin_unlock(&b->cache_lock);
- }
- return freed;
-}
-
static unsigned long
nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{
@@ -363,14 +328,45 @@ nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
return atomic_read(&nn->num_drc_entries);
}
+/**
+ * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
+ * @shrink: our registered shrinker context
+ * @sc: garbage collection parameters
+ *
+ * Free expired entries on each bucket's LRU list until we've released
+ * nr_to_scan freed objects. Nothing will be released if the cache
+ * has not exceeded it's max_drc_entries limit.
+ *
+ * Returns the number of entries released by this call.
+ */
static unsigned long
nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct nfsd_net *nn = container_of(shrink,
struct nfsd_net, nfsd_reply_cache_shrinker);
+ unsigned long freed = 0;
+ LIST_HEAD(dispose);
+ unsigned int i;
- return prune_cache_entries(nn);
+ for (i = 0; i < nn->drc_hashsize; i++) {
+ struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
+
+ if (list_empty(&b->lru_head))
+ continue;
+
+ spin_lock(&b->cache_lock);
+ nfsd_prune_bucket_locked(nn, b, 0, &dispose);
+ spin_unlock(&b->cache_lock);
+
+ freed += nfsd_cacherep_dispose(&dispose);
+ if (freed > sc->nr_to_scan)
+ break;
+ }
+
+ trace_nfsd_drc_gc(nn, freed);
+ return freed;
}
+
/*
* Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
*/