Message ID | 20241114160131.48616-10-richard.henderson@linaro.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | accel/tcg: Convert victim tlb to IntervalTree | expand |
On 11/14/24 08:00, Richard Henderson wrote: > Add the data structures for tracking softmmu pages via > a balanced interval tree. So far, only initialize and > destroy the data structure. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > include/hw/core/cpu.h | 3 +++ > accel/tcg/cputlb.c | 11 +++++++++++ > 2 files changed, 14 insertions(+) > > diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h > index db8a6fbc6e..1ebc999a73 100644 > --- a/include/hw/core/cpu.h > +++ b/include/hw/core/cpu.h > @@ -35,6 +35,7 @@ > #include "qemu/queue.h" > #include "qemu/lockcnt.h" > #include "qemu/thread.h" > +#include "qemu/interval-tree.h" > #include "qom/object.h" > > typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, > @@ -290,6 +291,8 @@ typedef struct CPUTLBDesc { > CPUTLBEntry vtable[CPU_VTLB_SIZE]; > CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE]; > CPUTLBEntryFull *fulltlb; > + /* All active tlb entries for this address space. */ > + IntervalTreeRoot iroot; > } CPUTLBDesc; > > /* > diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c > index 31c45a6213..aa51fc1d26 100644 > --- a/accel/tcg/cputlb.c > +++ b/accel/tcg/cputlb.c > @@ -89,6 +89,13 @@ QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data)); > QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); > #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) > > +/* Extra data required to manage CPUTLBEntryFull within an interval tree. */ > +typedef struct CPUTLBEntryTree { > + IntervalTreeNode itree; > + CPUTLBEntry copy; > + CPUTLBEntryFull full; > +} CPUTLBEntryTree; > + > static inline size_t tlb_n_entries(CPUTLBDescFast *fast) > { > return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; > @@ -305,6 +312,7 @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) > desc->large_page_mask = -1; > desc->vindex = 0; > memset(desc->vtable, -1, sizeof(desc->vtable)); > + interval_tree_free_nodes(&desc->iroot, offsetof(CPUTLBEntryTree, itree)); > } > > static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx, > @@ -326,6 +334,7 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) > fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; > fast->table = g_new(CPUTLBEntry, n_entries); > desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); > + memset(&desc->iroot, 0, sizeof(desc->iroot)); > tlb_mmu_flush_locked(desc, fast); > } > > @@ -365,6 +374,8 @@ void tlb_destroy(CPUState *cpu) > > g_free(fast->table); > g_free(desc->fulltlb); > + interval_tree_free_nodes(&cpu->neg.tlb.d[i].iroot, > + offsetof(CPUTLBEntryTree, itree)); > } > } > Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index db8a6fbc6e..1ebc999a73 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -35,6 +35,7 @@ #include "qemu/queue.h" #include "qemu/lockcnt.h" #include "qemu/thread.h" +#include "qemu/interval-tree.h" #include "qom/object.h" typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, @@ -290,6 +291,8 @@ typedef struct CPUTLBDesc { CPUTLBEntry vtable[CPU_VTLB_SIZE]; CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE]; CPUTLBEntryFull *fulltlb; + /* All active tlb entries for this address space. */ + IntervalTreeRoot iroot; } CPUTLBDesc; /* diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 31c45a6213..aa51fc1d26 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -89,6 +89,13 @@ QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data)); QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) +/* Extra data required to manage CPUTLBEntryFull within an interval tree. */ +typedef struct CPUTLBEntryTree { + IntervalTreeNode itree; + CPUTLBEntry copy; + CPUTLBEntryFull full; +} CPUTLBEntryTree; + static inline size_t tlb_n_entries(CPUTLBDescFast *fast) { return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; @@ -305,6 +312,7 @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) desc->large_page_mask = -1; desc->vindex = 0; memset(desc->vtable, -1, sizeof(desc->vtable)); + interval_tree_free_nodes(&desc->iroot, offsetof(CPUTLBEntryTree, itree)); } static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx, @@ -326,6 +334,7 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; fast->table = g_new(CPUTLBEntry, n_entries); desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); + memset(&desc->iroot, 0, sizeof(desc->iroot)); tlb_mmu_flush_locked(desc, fast); } @@ -365,6 +374,8 @@ void tlb_destroy(CPUState *cpu) g_free(fast->table); g_free(desc->fulltlb); + interval_tree_free_nodes(&cpu->neg.tlb.d[i].iroot, + offsetof(CPUTLBEntryTree, itree)); } }
Add the data structures for tracking softmmu pages via a balanced interval tree. So far, only initialize and destroy the data structure. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/hw/core/cpu.h | 3 +++ accel/tcg/cputlb.c | 11 +++++++++++ 2 files changed, 14 insertions(+)