Message ID | 20241009150855.804605-13-richard.henderson@linaro.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | accel/tcg: Convert victim tlb to IntervalTree | expand |
On 10/9/24 08:08, Richard Henderson wrote: > Update the addr_write copy within each interval tree node. > Tidy the iteration within the other two loops as well. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > accel/tcg/cputlb.c | 19 +++++++++++-------- > 1 file changed, 11 insertions(+), 8 deletions(-) > > diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c > index 709ad75616..95f78afee6 100644 > --- a/accel/tcg/cputlb.c > +++ b/accel/tcg/cputlb.c > @@ -1024,17 +1024,20 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) > > qemu_spin_lock(&cpu->neg.tlb.c.lock); > for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { > - unsigned int i; > - unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]); > + CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; > + CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; > > - for (i = 0; i < n; i++) { > - tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i], > - start1, length); > + for (size_t i = 0, n = tlb_n_entries(fast); i < n; i++) { > + tlb_reset_dirty_range_locked(&fast->table[i], start1, length); > } > > - for (i = 0; i < CPU_VTLB_SIZE; i++) { > - tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i], > - start1, length); > + for (size_t i = 0; i < CPU_VTLB_SIZE; i++) { > + tlb_reset_dirty_range_locked(&desc->vtable[i], start1, length); > + } > + > + for (CPUTLBEntryTree *t = tlbtree_lookup_range(desc, 0, -1); t; > + t = tlbtree_lookup_range_next(t, 0, -1)) { How about introducing interval_tree_foreach function, that runs on every node? Running with [0,0xffffffff...] does the job, but it's not really obvious when reading the code. > + tlb_reset_dirty_range_locked(&t->copy, start1, length); > } > } > qemu_spin_unlock(&cpu->neg.tlb.c.lock);
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 709ad75616..95f78afee6 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1024,17 +1024,20 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) qemu_spin_lock(&cpu->neg.tlb.c.lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - unsigned int i; - unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]); + CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; + CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; - for (i = 0; i < n; i++) { - tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i], - start1, length); + for (size_t i = 0, n = tlb_n_entries(fast); i < n; i++) { + tlb_reset_dirty_range_locked(&fast->table[i], start1, length); } - for (i = 0; i < CPU_VTLB_SIZE; i++) { - tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i], - start1, length); + for (size_t i = 0; i < CPU_VTLB_SIZE; i++) { + tlb_reset_dirty_range_locked(&desc->vtable[i], start1, length); + } + + for (CPUTLBEntryTree *t = tlbtree_lookup_range(desc, 0, -1); t; + t = tlbtree_lookup_range_next(t, 0, -1)) { + tlb_reset_dirty_range_locked(&t->copy, start1, length); } } qemu_spin_unlock(&cpu->neg.tlb.c.lock);
Update the addr_write copy within each interval tree node. Tidy the iteration within the other two loops as well. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/cputlb.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-)