@@ -128,7 +128,7 @@ static void clean_tracked_sparse_directories(struct repository *r)
* sparse index will not delete directories that contain
* conflicted entries or submodules.
*/
- if (!r->index->sparse_index) {
+ if (r->index->sparse_index == COMPLETELY_FULL) {
/*
* If something, such as a merge conflict or other concern,
* prevents us from converting to a sparse index, then do
@@ -310,6 +310,28 @@ struct untracked_cache;
struct progress;
struct pattern_list;
+enum sparse_index_mode {
+ /*
+ * COMPLETELY_FULL: there are no sparse directories
+ * in the index at all.
+ */
+ COMPLETELY_FULL = 0,
+
+ /*
+ * COLLAPSED: the index has already been collapsed to sparse
+ * directories whereever possible.
+ */
+ COLLAPSED = 1,
+
+ /*
+ * PARTIALLY_SPARSE: the sparse directories that exist are
+ * outside the sparse-checkout boundary, but it is possible
+ * that some file entries could collapse to sparse directory
+ * entries.
+ */
+ PARTIALLY_SPARSE = 2,
+};
+
struct index_state {
struct cache_entry **cache;
unsigned int version;
@@ -323,14 +345,8 @@ struct index_state {
drop_cache_tree : 1,
updated_workdir : 1,
updated_skipworktree : 1,
- fsmonitor_has_run_once : 1,
-
- /*
- * sparse_index == 1 when sparse-directory
- * entries exist. Requires sparse-checkout
- * in cone mode.
- */
- sparse_index : 1;
+ fsmonitor_has_run_once : 1;
+ enum sparse_index_mode sparse_index;
struct hashmap name_hash;
struct hashmap dir_hash;
struct object_id oid;
@@ -112,7 +112,7 @@ static const char *alternate_index_output;
static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
{
if (S_ISSPARSEDIR(ce->ce_mode))
- istate->sparse_index = 1;
+ istate->sparse_index = COLLAPSED;
istate->cache[nr] = ce;
add_name_hash(istate, ce);
@@ -1856,7 +1856,7 @@ static int read_index_extension(struct index_state *istate,
break;
case CACHE_EXT_SPARSE_DIRECTORIES:
/* no content, only an indicator */
- istate->sparse_index = 1;
+ istate->sparse_index = COLLAPSED;
break;
default:
if (*ext < 'A' || 'Z' < *ext)
@@ -3149,7 +3149,7 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l
unsigned flags)
{
int ret;
- int was_full = !istate->sparse_index;
+ int was_full = istate->sparse_index == COMPLETELY_FULL;
ret = convert_to_sparse(istate, 0);
@@ -173,7 +173,7 @@ int convert_to_sparse(struct index_state *istate, int flags)
* If the index is already sparse, empty, or otherwise
* cannot be converted to sparse, do not convert.
*/
- if (istate->sparse_index || !istate->cache_nr ||
+ if (istate->sparse_index == COLLAPSED || !istate->cache_nr ||
!is_sparse_index_allowed(istate, flags))
return 0;
@@ -214,7 +214,7 @@ int convert_to_sparse(struct index_state *istate, int flags)
FREE_AND_NULL(istate->fsmonitor_dirty);
FREE_AND_NULL(istate->fsmonitor_last_update);
- istate->sparse_index = 1;
+ istate->sparse_index = COLLAPSED;
trace2_region_leave("index", "convert_to_sparse", istate->repo);
return 0;
}
@@ -259,7 +259,7 @@ void expand_to_pattern_list(struct index_state *istate,
* If the index is already full, then keep it full. We will convert
* it to a sparse index on write, if possible.
*/
- if (!istate || !istate->sparse_index)
+ if (!istate || istate->sparse_index == COMPLETELY_FULL)
return;
/*