diff mbox series

[v2,2/8] refs/reftable: handle reloading stacks in the reftable backend

Message ID bab837e3733a982973bb96eedca15d073089693a.1730792627.git.ps@pks.im (mailing list archive)
State New
Headers show
Series refs/reftable: reuse iterators when reading refs | expand

Commit Message

Patrick Steinhardt Nov. 5, 2024, 9:12 a.m. UTC
When accessing a stack we almost always have to reload the stack before
reading data from it. This is mostly because Git does not have a
notification mechanism for when underlying data has been changed, and
thus we are forced to opportunistically reload the stack every single
time to account for any changes that may have happened concurrently.

Handle the reload internally in `backend_for()`. For one this forces
callsites to think about whether or not they need to reload the stack.
But second this makes the logic to access stacks more self-contained by
letting the `struct reftable_backend` manage themselves.

Signed-off-by: Patrick Steinhardt <ps@pks.im>
---
 refs/reftable-backend.c | 150 +++++++++++++++++++++++++---------------
 1 file changed, 93 insertions(+), 57 deletions(-)

Comments

Junio C Hamano Nov. 12, 2024, 6:41 a.m. UTC | #1
Patrick Steinhardt <ps@pks.im> writes:

> +static int backend_for(struct reftable_backend **out,
> +		       struct reftable_ref_store *store,
> +		       const char *refname,
> +		       const char **rewritten_ref,
> +		       int reload)
>  {
> +	struct reftable_backend *be;
>  	const char *wtname;
>  	int wtname_len;
>  
> -	if (!refname)
> -		return &store->main_backend;
> +	if (!refname) {
> +		be = &store->main_backend;
> +		goto out;
> +	}
>  
>  	switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
>  	case REF_WORKTREE_OTHER: {
>  		static struct strbuf wtname_buf = STRBUF_INIT;
>  		struct strbuf wt_dir = STRBUF_INIT;
> -		struct reftable_backend *be;
>  
>  		/*
>  		 * We're using a static buffer here so that we don't need to
> @@ -162,7 +166,7 @@ static struct reftable_backend *backend_for(struct reftable_ref_store *store,
>  		}
>  
>  		strbuf_release(&wt_dir);
> -		return be;
> +		goto out;

An interesting part of this function is not shown in the above
context, but we look up an existing backend from a strmap, and
allocate one if there isn't.  In either case, be points at the
backend to use.  Now be is not local to this block, we can access it
after jumping to "out" label.

> +out:
> +	if (reload) {
> +		int ret = reftable_stack_reload(be->stack);
> +		if (ret)
> +			return ret;
> +	}
> +	*out = be;
> +
> +	return 0;
>  }

> @@ -828,17 +845,17 @@ static int reftable_be_read_raw_ref(struct ref_store *ref_store,
>  {
>  	struct reftable_ref_store *refs =
>  		reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
> -	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
> +	struct reftable_backend *be;
>  	int ret;
>  
>  	if (refs->err < 0)
>  		return refs->err;
>  
> -	ret = reftable_stack_reload(stack);
> +	ret = backend_for(&be, refs, refname, &refname, 1);
>  	if (ret)
>  		return ret;

This one chooses to reload, so that the next one, i.e.
"without-reload", would not read stale information?

> -	ret = read_ref_without_reload(refs, stack, refname, oid, referent, type);
> +	ret = read_ref_without_reload(refs, be->stack, refname, oid, referent, type);


The following bit is curious.

> +	ret = backend_for(&be, refs, update->refname, NULL, 0);
> +	if (ret)
> +		return ret;
> +

We locate one without reloading, and ...

>  	/*
>  	 * Search for a preexisting stack update. If there is one then we add
>  	 * the update to it, otherwise we set up a new stack update.
>  	 */
>  	for (i = 0; !arg && i < tx_data->args_nr; i++)
> -		if (tx_data->args[i].stack == stack)
> +		if (tx_data->args[i].be == be)
>  			arg = &tx_data->args[i];
>  	if (!arg) {

... only when we cannot reuse preexisting one, ...

>  		struct reftable_addition *addition;
>  
> -		ret = reftable_stack_reload(stack);
> +		ret = backend_for(&be, refs, update->refname, NULL, 1);
>  		if (ret)
>  			return ret;

... instead of directly doing reload on the instance we already
have, we do another _for() to locate one, this time reload set to 1.

That looks like doing some redundant work?  I am confused.

> @@ -1048,7 +1070,11 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
>  		goto done;
>  	}
>  
> -	ret = read_ref_without_reload(refs, backend_for(refs, "HEAD", NULL)->stack, "HEAD",
> +	ret = backend_for(&be, refs, "HEAD", NULL, 0);
> +	if (ret)
> +		goto done;
> +
> +	ret = read_ref_without_reload(refs, be->stack, "HEAD",
>  				      &head_oid, &head_referent, &head_type);

This now takes into account the possibility that backend_for() might
fail.  The original code would have segfaulted when it happened, I
guess.

> @@ -1057,10 +1083,11 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
>  	for (i = 0; i < transaction->nr; i++) {
>  		struct ref_update *u = transaction->updates[i];
>  		struct object_id current_oid = {0};
> -		struct reftable_stack *stack;
>  		const char *rewritten_ref;
>  
> -		stack = backend_for(refs, u->refname, &rewritten_ref)->stack;
> +		ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0);
> +		if (ret)
> +			goto done;

Ditto, we would have segfaulted in the next hunk when stack got NULL
here ...

> @@ -1116,7 +1143,7 @@ static int reftable_be_transaction_prepare(struct ref_store *ref_store,
>  			string_list_insert(&affected_refnames, new_update->refname);
>  		}
>  
> -		ret = read_ref_without_reload(refs, stack, rewritten_ref,
> +		ret = read_ref_without_reload(refs, be->stack, rewritten_ref,
>  					      &current_oid, &referent, &u->type);

... here.

> @@ -1831,10 +1858,9 @@ static int reftable_be_copy_ref(struct ref_store *ref_store,
>  {
>  	struct reftable_ref_store *refs =
>  		reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
> -	struct reftable_stack *stack = backend_for(refs, newrefname, &newrefname)->stack;
> +	struct reftable_backend *be;
>  	struct write_copy_arg arg = {
>  		.refs = refs,
> -		.stack = stack,
>  		.oldname = oldrefname,
>  		.newname = newrefname,
>  		.logmsg = logmsg,
> @@ -1845,10 +1871,11 @@ static int reftable_be_copy_ref(struct ref_store *ref_store,
>  	if (ret < 0)
>  		goto done;
>  
> -	ret = reftable_stack_reload(stack);
> +	ret = backend_for(&be, refs, newrefname, &newrefname, 1);
>  	if (ret)
>  		goto done;

We used to grab "stack" upfront and then called reload here; we now
do backend_for() and let it do the reload as well, so they should be
equivalent.

> -	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
>  	struct reftable_log_record log = {0};
>  	struct reftable_iterator it = {0};
> +	struct reftable_backend *be;
>  	int ret;
>  
>  	if (refs->err < 0)
>  		return refs->err;
>  
> -	ret = reftable_stack_init_log_iterator(stack, &it);
> +	ret = backend_for(&be, refs, refname, &refname, 0);
> +	if (ret)
> +		goto done;
> +
> +	ret = reftable_stack_init_log_iterator(be->stack, &it);

Again, other than the fact that the new code carefully prepares for
the case where backend_for() fails to find be, the versions of the
code with and without the patch are equivalent.

> @@ -2052,16 +2083,20 @@ static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
>  {
>  	struct reftable_ref_store *refs =
>  		reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
> -	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
>  	struct reftable_log_record *logs = NULL;
>  	struct reftable_iterator it = {0};
> +	struct reftable_backend *be;
>  	size_t logs_alloc = 0, logs_nr = 0, i;
>  	int ret;
>  
>  	if (refs->err < 0)
>  		return refs->err;
>  
> -	ret = reftable_stack_init_log_iterator(stack, &it);
> +	ret = backend_for(&be, refs, refname, &refname, 0);
> +	if (ret)
> +		goto done;
> +
> +	ret = reftable_stack_init_log_iterator(be->stack, &it);

Ditto.

> @@ -2101,20 +2136,20 @@ static int reftable_be_reflog_exists(struct ref_store *ref_store,
>  {
>  	struct reftable_ref_store *refs =
>  		reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
> -	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
>  	struct reftable_log_record log = {0};
>  	struct reftable_iterator it = {0};
> +	struct reftable_backend *be;
>  	int ret;
>  
>  	ret = refs->err;
>  	if (ret < 0)
>  		goto done;
>  
> -	ret = reftable_stack_reload(stack);
> +	ret = backend_for(&be, refs, refname, &refname, 1);
>  	if (ret < 0)
>  		goto done;
>  
> -	ret = reftable_stack_init_log_iterator(stack, &it);
> +	ret = reftable_stack_init_log_iterator(be->stack, &it);
>  	if (ret < 0)
>  		goto done;

Ditto.

Overall they seem to be mostly equivalent, except that the new code
is a bit more careful against failing backend_for().  One part of
the code confused me (and still I am unsure), but other than that it
was a pleasant read.

Thanks.
Patrick Steinhardt Nov. 12, 2024, 9:05 a.m. UTC | #2
On Tue, Nov 12, 2024 at 03:41:48PM +0900, Junio C Hamano wrote:
> Patrick Steinhardt <ps@pks.im> writes:
> > -	ret = read_ref_without_reload(refs, stack, refname, oid, referent, type);
> > +	ret = read_ref_without_reload(refs, be->stack, refname, oid, referent, type);
> 
> 
> The following bit is curious.
> 
> > +	ret = backend_for(&be, refs, update->refname, NULL, 0);
> > +	if (ret)
> > +		return ret;
> > +
> 
> We locate one without reloading, and ...
> 
> >  	/*
> >  	 * Search for a preexisting stack update. If there is one then we add
> >  	 * the update to it, otherwise we set up a new stack update.
> >  	 */
> >  	for (i = 0; !arg && i < tx_data->args_nr; i++)
> > -		if (tx_data->args[i].stack == stack)
> > +		if (tx_data->args[i].be == be)
> >  			arg = &tx_data->args[i];
> >  	if (!arg) {
> 
> ... only when we cannot reuse preexisting one, ...
> 
> >  		struct reftable_addition *addition;
> >  
> > -		ret = reftable_stack_reload(stack);
> > +		ret = backend_for(&be, refs, update->refname, NULL, 1);
> >  		if (ret)
> >  			return ret;
> 
> ... instead of directly doing reload on the instance we already
> have, we do another _for() to locate one, this time reload set to 1.
> 
> That looks like doing some redundant work?  I am confused.

It indeed is redundant work, yes. And in fact it is redundant work that
isn't really required anymore. My first iteration didn't yet have the
`reftable_write_options::on_reload()` callback, and I instead tried to
catch reloads via `backend_for()`, so it was required to reload via that
function.

But now that we do have the callback that isn't needed anymore, and thus
we don't have to call `backend_for()` a second time here. I'll adapt
this accordingly.

Patrick
diff mbox series

Patch

diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
index 116cc5ec23..4a28dc8a9d 100644
--- a/refs/reftable-backend.c
+++ b/refs/reftable-backend.c
@@ -114,21 +114,25 @@  static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_sto
  * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
  * those references in their normalized form.
  */
-static struct reftable_backend *backend_for(struct reftable_ref_store *store,
-					    const char *refname,
-					    const char **rewritten_ref)
+static int backend_for(struct reftable_backend **out,
+		       struct reftable_ref_store *store,
+		       const char *refname,
+		       const char **rewritten_ref,
+		       int reload)
 {
+	struct reftable_backend *be;
 	const char *wtname;
 	int wtname_len;
 
-	if (!refname)
-		return &store->main_backend;
+	if (!refname) {
+		be = &store->main_backend;
+		goto out;
+	}
 
 	switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
 	case REF_WORKTREE_OTHER: {
 		static struct strbuf wtname_buf = STRBUF_INIT;
 		struct strbuf wt_dir = STRBUF_INIT;
-		struct reftable_backend *be;
 
 		/*
 		 * We're using a static buffer here so that we don't need to
@@ -162,7 +166,7 @@  static struct reftable_backend *backend_for(struct reftable_ref_store *store,
 		}
 
 		strbuf_release(&wt_dir);
-		return be;
+		goto out;
 	}
 	case REF_WORKTREE_CURRENT:
 		/*
@@ -170,14 +174,27 @@  static struct reftable_backend *backend_for(struct reftable_ref_store *store,
 		 * main worktree. We thus return the main stack in that case.
 		 */
 		if (!store->worktree_backend.stack)
-			return &store->main_backend;
-		return &store->worktree_backend;
+			be = &store->main_backend;
+		else
+			be = &store->worktree_backend;
+		goto out;
 	case REF_WORKTREE_MAIN:
 	case REF_WORKTREE_SHARED:
-		return &store->main_backend;
+		be = &store->main_backend;
+		goto out;
 	default:
 		BUG("unhandled worktree reference type");
 	}
+
+out:
+	if (reload) {
+		int ret = reftable_stack_reload(be->stack);
+		if (ret)
+			return ret;
+	}
+	*out = be;
+
+	return 0;
 }
 
 static int should_write_log(struct reftable_ref_store *refs, const char *refname)
@@ -828,17 +845,17 @@  static int reftable_be_read_raw_ref(struct ref_store *ref_store,
 {
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
-	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
+	struct reftable_backend *be;
 	int ret;
 
 	if (refs->err < 0)
 		return refs->err;
 
-	ret = reftable_stack_reload(stack);
+	ret = backend_for(&be, refs, refname, &refname, 1);
 	if (ret)
 		return ret;
 
-	ret = read_ref_without_reload(refs, stack, refname, oid, referent, type);
+	ret = read_ref_without_reload(refs, be->stack, refname, oid, referent, type);
 	if (ret < 0)
 		return ret;
 	if (ret > 0) {
@@ -855,15 +872,15 @@  static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
 {
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
-	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
 	struct reftable_ref_record ref = {0};
+	struct reftable_backend *be;
 	int ret;
 
-	ret = reftable_stack_reload(stack);
+	ret = backend_for(&be, refs, refname, &refname, 1);
 	if (ret)
 		return ret;
 
-	ret = reftable_stack_read_ref(stack, refname, &ref);
+	ret = reftable_stack_read_ref(be->stack, refname, &ref);
 	if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
 		strbuf_addstr(referent, ref.value.symref);
 	else
@@ -880,7 +897,7 @@  struct reftable_transaction_update {
 
 struct write_transaction_table_arg {
 	struct reftable_ref_store *refs;
-	struct reftable_stack *stack;
+	struct reftable_backend *be;
 	struct reftable_addition *addition;
 	struct reftable_transaction_update *updates;
 	size_t updates_nr;
@@ -915,27 +932,31 @@  static int prepare_transaction_update(struct write_transaction_table_arg **out,
 				      struct ref_update *update,
 				      struct strbuf *err)
 {
-	struct reftable_stack *stack = backend_for(refs, update->refname, NULL)->stack;
 	struct write_transaction_table_arg *arg = NULL;
+	struct reftable_backend *be;
 	size_t i;
 	int ret;
 
+	ret = backend_for(&be, refs, update->refname, NULL, 0);
+	if (ret)
+		return ret;
+
 	/*
 	 * Search for a preexisting stack update. If there is one then we add
 	 * the update to it, otherwise we set up a new stack update.
 	 */
 	for (i = 0; !arg && i < tx_data->args_nr; i++)
-		if (tx_data->args[i].stack == stack)
+		if (tx_data->args[i].be == be)
 			arg = &tx_data->args[i];
 
 	if (!arg) {
 		struct reftable_addition *addition;
 
-		ret = reftable_stack_reload(stack);
+		ret = backend_for(&be, refs, update->refname, NULL, 1);
 		if (ret)
 			return ret;
 
-		ret = reftable_stack_new_addition(&addition, stack,
+		ret = reftable_stack_new_addition(&addition, be->stack,
 						  REFTABLE_STACK_NEW_ADDITION_RELOAD);
 		if (ret) {
 			if (ret == REFTABLE_LOCK_ERROR)
@@ -947,7 +968,7 @@  static int prepare_transaction_update(struct write_transaction_table_arg **out,
 			   tx_data->args_alloc);
 		arg = &tx_data->args[tx_data->args_nr++];
 		arg->refs = refs;
-		arg->stack = stack;
+		arg->be = be;
 		arg->addition = addition;
 		arg->updates = NULL;
 		arg->updates_nr = 0;
@@ -1002,6 +1023,7 @@  static int reftable_be_transaction_prepare(struct ref_store *ref_store,
 	struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
 	struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
 	struct reftable_transaction_data *tx_data = NULL;
+	struct reftable_backend *be;
 	struct object_id head_oid;
 	unsigned int head_type = 0;
 	size_t i;
@@ -1048,7 +1070,11 @@  static int reftable_be_transaction_prepare(struct ref_store *ref_store,
 		goto done;
 	}
 
-	ret = read_ref_without_reload(refs, backend_for(refs, "HEAD", NULL)->stack, "HEAD",
+	ret = backend_for(&be, refs, "HEAD", NULL, 0);
+	if (ret)
+		goto done;
+
+	ret = read_ref_without_reload(refs, be->stack, "HEAD",
 				      &head_oid, &head_referent, &head_type);
 	if (ret < 0)
 		goto done;
@@ -1057,10 +1083,11 @@  static int reftable_be_transaction_prepare(struct ref_store *ref_store,
 	for (i = 0; i < transaction->nr; i++) {
 		struct ref_update *u = transaction->updates[i];
 		struct object_id current_oid = {0};
-		struct reftable_stack *stack;
 		const char *rewritten_ref;
 
-		stack = backend_for(refs, u->refname, &rewritten_ref)->stack;
+		ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0);
+		if (ret)
+			goto done;
 
 		/* Verify that the new object ID is valid. */
 		if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
@@ -1116,7 +1143,7 @@  static int reftable_be_transaction_prepare(struct ref_store *ref_store,
 			string_list_insert(&affected_refnames, new_update->refname);
 		}
 
-		ret = read_ref_without_reload(refs, stack, rewritten_ref,
+		ret = read_ref_without_reload(refs, be->stack, rewritten_ref,
 					      &current_oid, &referent, &u->type);
 		if (ret < 0)
 			goto done;
@@ -1318,7 +1345,7 @@  static int transaction_update_cmp(const void *a, const void *b)
 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
 {
 	struct write_transaction_table_arg *arg = cb_data;
-	uint64_t ts = reftable_stack_next_update_index(arg->stack);
+	uint64_t ts = reftable_stack_next_update_index(arg->be->stack);
 	struct reftable_log_record *logs = NULL;
 	struct ident_split committer_ident = {0};
 	size_t logs_nr = 0, logs_alloc = 0, i;
@@ -1354,7 +1381,7 @@  static int write_transaction_table(struct reftable_writer *writer, void *cb_data
 			struct reftable_log_record log = {0};
 			struct reftable_iterator it = {0};
 
-			ret = reftable_stack_init_log_iterator(arg->stack, &it);
+			ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
 			if (ret < 0)
 				goto done;
 
@@ -1799,10 +1826,9 @@  static int reftable_be_rename_ref(struct ref_store *ref_store,
 {
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
-	struct reftable_stack *stack = backend_for(refs, newrefname, &newrefname)->stack;
+	struct reftable_backend *be;
 	struct write_copy_arg arg = {
 		.refs = refs,
-		.stack = stack,
 		.oldname = oldrefname,
 		.newname = newrefname,
 		.logmsg = logmsg,
@@ -1814,10 +1840,11 @@  static int reftable_be_rename_ref(struct ref_store *ref_store,
 	if (ret < 0)
 		goto done;
 
-	ret = reftable_stack_reload(stack);
+	ret = backend_for(&be, refs, newrefname, &newrefname, 1);
 	if (ret)
 		goto done;
-	ret = reftable_stack_add(stack, &write_copy_table, &arg);
+	arg.stack = be->stack;
+	ret = reftable_stack_add(be->stack, &write_copy_table, &arg);
 
 done:
 	assert(ret != REFTABLE_API_ERROR);
@@ -1831,10 +1858,9 @@  static int reftable_be_copy_ref(struct ref_store *ref_store,
 {
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
-	struct reftable_stack *stack = backend_for(refs, newrefname, &newrefname)->stack;
+	struct reftable_backend *be;
 	struct write_copy_arg arg = {
 		.refs = refs,
-		.stack = stack,
 		.oldname = oldrefname,
 		.newname = newrefname,
 		.logmsg = logmsg,
@@ -1845,10 +1871,11 @@  static int reftable_be_copy_ref(struct ref_store *ref_store,
 	if (ret < 0)
 		goto done;
 
-	ret = reftable_stack_reload(stack);
+	ret = backend_for(&be, refs, newrefname, &newrefname, 1);
 	if (ret)
 		goto done;
-	ret = reftable_stack_add(stack, &write_copy_table, &arg);
+	arg.stack = be->stack;
+	ret = reftable_stack_add(be->stack, &write_copy_table, &arg);
 
 done:
 	assert(ret != REFTABLE_API_ERROR);
@@ -2012,15 +2039,19 @@  static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
 {
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
-	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
 	struct reftable_log_record log = {0};
 	struct reftable_iterator it = {0};
+	struct reftable_backend *be;
 	int ret;
 
 	if (refs->err < 0)
 		return refs->err;
 
-	ret = reftable_stack_init_log_iterator(stack, &it);
+	ret = backend_for(&be, refs, refname, &refname, 0);
+	if (ret)
+		goto done;
+
+	ret = reftable_stack_init_log_iterator(be->stack, &it);
 	if (ret < 0)
 		goto done;
 
@@ -2052,16 +2083,20 @@  static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
 {
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
-	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
 	struct reftable_log_record *logs = NULL;
 	struct reftable_iterator it = {0};
+	struct reftable_backend *be;
 	size_t logs_alloc = 0, logs_nr = 0, i;
 	int ret;
 
 	if (refs->err < 0)
 		return refs->err;
 
-	ret = reftable_stack_init_log_iterator(stack, &it);
+	ret = backend_for(&be, refs, refname, &refname, 0);
+	if (ret)
+		goto done;
+
+	ret = reftable_stack_init_log_iterator(be->stack, &it);
 	if (ret < 0)
 		goto done;
 
@@ -2101,20 +2136,20 @@  static int reftable_be_reflog_exists(struct ref_store *ref_store,
 {
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
-	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
 	struct reftable_log_record log = {0};
 	struct reftable_iterator it = {0};
+	struct reftable_backend *be;
 	int ret;
 
 	ret = refs->err;
 	if (ret < 0)
 		goto done;
 
-	ret = reftable_stack_reload(stack);
+	ret = backend_for(&be, refs, refname, &refname, 1);
 	if (ret < 0)
 		goto done;
 
-	ret = reftable_stack_init_log_iterator(stack, &it);
+	ret = reftable_stack_init_log_iterator(be->stack, &it);
 	if (ret < 0)
 		goto done;
 
@@ -2186,10 +2221,9 @@  static int reftable_be_create_reflog(struct ref_store *ref_store,
 {
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
-	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
+	struct reftable_backend *be;
 	struct write_reflog_existence_arg arg = {
 		.refs = refs,
-		.stack = stack,
 		.refname = refname,
 	};
 	int ret;
@@ -2198,11 +2232,12 @@  static int reftable_be_create_reflog(struct ref_store *ref_store,
 	if (ret < 0)
 		goto done;
 
-	ret = reftable_stack_reload(stack);
+	ret = backend_for(&be, refs, refname, &refname, 1);
 	if (ret)
 		goto done;
+	arg.stack = be->stack;
 
-	ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
+	ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg);
 
 done:
 	return ret;
@@ -2260,17 +2295,18 @@  static int reftable_be_delete_reflog(struct ref_store *ref_store,
 {
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
-	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
+	struct reftable_backend *be;
 	struct write_reflog_delete_arg arg = {
-		.stack = stack,
 		.refname = refname,
 	};
 	int ret;
 
-	ret = reftable_stack_reload(stack);
+	ret = backend_for(&be, refs, refname, &refname, 1);
 	if (ret)
 		return ret;
-	ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
+	arg.stack = be->stack;
+
+	ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg);
 
 	assert(ret != REFTABLE_API_ERROR);
 	return ret;
@@ -2369,13 +2405,13 @@  static int reftable_be_reflog_expire(struct ref_store *ref_store,
 	 */
 	struct reftable_ref_store *refs =
 		reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
-	struct reftable_stack *stack = backend_for(refs, refname, &refname)->stack;
 	struct reftable_log_record *logs = NULL;
 	struct reftable_log_record *rewritten = NULL;
 	struct reftable_ref_record ref_record = {0};
 	struct reftable_iterator it = {0};
 	struct reftable_addition *add = NULL;
 	struct reflog_expiry_arg arg = {0};
+	struct reftable_backend *be;
 	struct object_id oid = {0};
 	uint8_t *last_hash = NULL;
 	size_t logs_nr = 0, logs_alloc = 0, i;
@@ -2384,11 +2420,11 @@  static int reftable_be_reflog_expire(struct ref_store *ref_store,
 	if (refs->err < 0)
 		return refs->err;
 
-	ret = reftable_stack_reload(stack);
+	ret = backend_for(&be, refs, refname, &refname, 1);
 	if (ret < 0)
 		goto done;
 
-	ret = reftable_stack_init_log_iterator(stack, &it);
+	ret = reftable_stack_init_log_iterator(be->stack, &it);
 	if (ret < 0)
 		goto done;
 
@@ -2396,11 +2432,11 @@  static int reftable_be_reflog_expire(struct ref_store *ref_store,
 	if (ret < 0)
 		goto done;
 
-	ret = reftable_stack_new_addition(&add, stack, 0);
+	ret = reftable_stack_new_addition(&add, be->stack, 0);
 	if (ret < 0)
 		goto done;
 
-	ret = reftable_stack_read_ref(stack, refname, &ref_record);
+	ret = reftable_stack_read_ref(be->stack, refname, &ref_record);
 	if (ret < 0)
 		goto done;
 	if (reftable_ref_record_val1(&ref_record))
@@ -2479,7 +2515,7 @@  static int reftable_be_reflog_expire(struct ref_store *ref_store,
 	arg.refs = refs;
 	arg.records = rewritten;
 	arg.len = logs_nr;
-	arg.stack = stack,
+	arg.stack = be->stack,
 	arg.refname = refname,
 
 	ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);