Message ID | 20230913040635.28815-16-haitao.huang@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add Cgroup support for SGX EPC memory | expand |
On Wed Sep 13, 2023 at 7:06 AM EEST, Haitao Huang wrote: > Add sgx_can_reclaim() wrapper and encapsulate direct references to the > global LRU list in the reclaimer functions so that they can be called with > an LRU list per EPC cgroup. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com> > Signed-off-by: Haitao Huang <haitao.huang@linux.intel.com> > Cc: Sean Christopherson <seanjc@google.com> > --- > V4: > - Re-organized this patch to include all changes related to > encapsulation of the global LRU > - Moved this patch to precede the EPC cgroup patch > --- > arch/x86/kernel/cpu/sgx/main.c | 41 +++++++++++++++++++++++----------- > 1 file changed, 28 insertions(+), 13 deletions(-) > > diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c > index ce316bd5e5bb..3d396fe5ec09 100644 > --- a/arch/x86/kernel/cpu/sgx/main.c > +++ b/arch/x86/kernel/cpu/sgx/main.c > @@ -34,6 +34,16 @@ static DEFINE_XARRAY(sgx_epc_address_space); > */ > static struct sgx_epc_lru_lists sgx_global_lru; > > +static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct sgx_epc_page *epc_page) > +{ > + return &sgx_global_lru; > +} I'd simply export sgx_global_lru. > +static inline bool sgx_can_reclaim(void) > +{ > + return !list_empty(&sgx_global_lru.reclaimable); > +} Accessors for the object should be named so that this fact is reflected, e.g. sgx_global_lru_can_reclaim() in this case. I would just open code this to the call sites though. > + > static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); > > /* Nodes with one or more EPC sections. */ > @@ -339,6 +349,7 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) > struct sgx_backing backing[SGX_NR_TO_SCAN_MAX]; > struct sgx_epc_page *epc_page, *tmp; > struct sgx_encl_page *encl_page; > + struct sgx_epc_lru_lists *lru; > pgoff_t page_index; > LIST_HEAD(iso); > size_t ret; > @@ -372,10 +383,11 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) > continue; > > skip: > - spin_lock(&sgx_global_lru.lock); > + lru = sgx_lru_lists(epc_page); > + spin_lock(&lru->lock); > sgx_epc_page_set_state(epc_page, SGX_EPC_PAGE_RECLAIMABLE); > - list_move_tail(&epc_page->list, &sgx_global_lru.reclaimable); > - spin_unlock(&sgx_global_lru.lock); > + list_move_tail(&epc_page->list, &lru->reclaimable); > + spin_unlock(&lru->lock); > > kref_put(&encl_page->encl->refcount, sgx_encl_release); > } > @@ -399,7 +411,7 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) > static bool sgx_should_reclaim(unsigned long watermark) > { > return atomic_long_read(&sgx_nr_free_pages) < watermark && > - !list_empty(&sgx_global_lru.reclaimable); > + sgx_can_reclaim(); > } > > /* > @@ -529,14 +541,16 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) > */ > void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags) > { > - spin_lock(&sgx_global_lru.lock); > + struct sgx_epc_lru_lists *lru = sgx_lru_lists(page); > + > + spin_lock(&lru->lock); > WARN_ON_ONCE(sgx_epc_page_reclaimable(page->flags)); > page->flags |= flags; > if (sgx_epc_page_reclaimable(flags)) > - list_add_tail(&page->list, &sgx_global_lru.reclaimable); > + list_add_tail(&page->list, &lru->reclaimable); > else > - list_add_tail(&page->list, &sgx_global_lru.unreclaimable); > - spin_unlock(&sgx_global_lru.lock); > + list_add_tail(&page->list, &lru->unreclaimable); > + spin_unlock(&lru->lock); > } > > /** > @@ -551,15 +565,16 @@ void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags) > */ > int sgx_drop_epc_page(struct sgx_epc_page *page) > { > - spin_lock(&sgx_global_lru.lock); > + struct sgx_epc_lru_lists *lru = sgx_lru_lists(page); > + > + spin_lock(&lru->lock); > if (sgx_epc_page_reclaim_in_progress(page->flags)) { > - spin_unlock(&sgx_global_lru.lock); > + spin_unlock(&lru->lock); > return -EBUSY; > } > - > list_del(&page->list); > sgx_epc_page_reset_state(page); > - spin_unlock(&sgx_global_lru.lock); > + spin_unlock(&lru->lock); > > return 0; > } > @@ -592,7 +607,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) > break; > } > > - if (list_empty(&sgx_global_lru.reclaimable)) > + if (!sgx_can_reclaim()) > return ERR_PTR(-ENOMEM); > > if (!reclaim) { > -- > 2.25.1 BR, Jarkko
On Wed, 13 Sep 2023 10:42:52 -0500, Jarkko Sakkinen <jarkko@kernel.org> wrote: > On Wed Sep 13, 2023 at 7:06 AM EEST, Haitao Huang wrote: >> Add sgx_can_reclaim() wrapper and encapsulate direct references to the >> global LRU list in the reclaimer functions so that they can be called >> with >> an LRU list per EPC cgroup. >> >> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> >> Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com> >> Signed-off-by: Haitao Huang <haitao.huang@linux.intel.com> >> Cc: Sean Christopherson <seanjc@google.com> >> --- >> V4: >> - Re-organized this patch to include all changes related to >> encapsulation of the global LRU >> - Moved this patch to precede the EPC cgroup patch >> --- >> arch/x86/kernel/cpu/sgx/main.c | 41 +++++++++++++++++++++++----------- >> 1 file changed, 28 insertions(+), 13 deletions(-) >> >> diff --git a/arch/x86/kernel/cpu/sgx/main.c >> b/arch/x86/kernel/cpu/sgx/main.c >> index ce316bd5e5bb..3d396fe5ec09 100644 >> --- a/arch/x86/kernel/cpu/sgx/main.c >> +++ b/arch/x86/kernel/cpu/sgx/main.c >> @@ -34,6 +34,16 @@ static DEFINE_XARRAY(sgx_epc_address_space); >> */ >> static struct sgx_epc_lru_lists sgx_global_lru; >> >> +static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct >> sgx_epc_page *epc_page) >> +{ >> + return &sgx_global_lru; >> +} > > I'd simply export sgx_global_lru. > The purpose of this patch to to hide sgx_global_lru so later we can have LRU per cgroup. I'll update the commit message to make it clear this is not just for sgx_can_reclaim >> +static inline bool sgx_can_reclaim(void) >> +{ >> + return !list_empty(&sgx_global_lru.reclaimable); >> +} > > > Accessors for the object should be named so that this fact is reflected, > e.g. sgx_global_lru_can_reclaim() in this case. > > I would just open code this to the call sites though. > ditto Thanks Haitao
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index ce316bd5e5bb..3d396fe5ec09 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -34,6 +34,16 @@ static DEFINE_XARRAY(sgx_epc_address_space); */ static struct sgx_epc_lru_lists sgx_global_lru; +static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct sgx_epc_page *epc_page) +{ + return &sgx_global_lru; +} + +static inline bool sgx_can_reclaim(void) +{ + return !list_empty(&sgx_global_lru.reclaimable); +} + static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); /* Nodes with one or more EPC sections. */ @@ -339,6 +349,7 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) struct sgx_backing backing[SGX_NR_TO_SCAN_MAX]; struct sgx_epc_page *epc_page, *tmp; struct sgx_encl_page *encl_page; + struct sgx_epc_lru_lists *lru; pgoff_t page_index; LIST_HEAD(iso); size_t ret; @@ -372,10 +383,11 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) continue; skip: - spin_lock(&sgx_global_lru.lock); + lru = sgx_lru_lists(epc_page); + spin_lock(&lru->lock); sgx_epc_page_set_state(epc_page, SGX_EPC_PAGE_RECLAIMABLE); - list_move_tail(&epc_page->list, &sgx_global_lru.reclaimable); - spin_unlock(&sgx_global_lru.lock); + list_move_tail(&epc_page->list, &lru->reclaimable); + spin_unlock(&lru->lock); kref_put(&encl_page->encl->refcount, sgx_encl_release); } @@ -399,7 +411,7 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) static bool sgx_should_reclaim(unsigned long watermark) { return atomic_long_read(&sgx_nr_free_pages) < watermark && - !list_empty(&sgx_global_lru.reclaimable); + sgx_can_reclaim(); } /* @@ -529,14 +541,16 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) */ void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags) { - spin_lock(&sgx_global_lru.lock); + struct sgx_epc_lru_lists *lru = sgx_lru_lists(page); + + spin_lock(&lru->lock); WARN_ON_ONCE(sgx_epc_page_reclaimable(page->flags)); page->flags |= flags; if (sgx_epc_page_reclaimable(flags)) - list_add_tail(&page->list, &sgx_global_lru.reclaimable); + list_add_tail(&page->list, &lru->reclaimable); else - list_add_tail(&page->list, &sgx_global_lru.unreclaimable); - spin_unlock(&sgx_global_lru.lock); + list_add_tail(&page->list, &lru->unreclaimable); + spin_unlock(&lru->lock); } /** @@ -551,15 +565,16 @@ void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags) */ int sgx_drop_epc_page(struct sgx_epc_page *page) { - spin_lock(&sgx_global_lru.lock); + struct sgx_epc_lru_lists *lru = sgx_lru_lists(page); + + spin_lock(&lru->lock); if (sgx_epc_page_reclaim_in_progress(page->flags)) { - spin_unlock(&sgx_global_lru.lock); + spin_unlock(&lru->lock); return -EBUSY; } - list_del(&page->list); sgx_epc_page_reset_state(page); - spin_unlock(&sgx_global_lru.lock); + spin_unlock(&lru->lock); return 0; } @@ -592,7 +607,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) break; } - if (list_empty(&sgx_global_lru.reclaimable)) + if (!sgx_can_reclaim()) return ERR_PTR(-ENOMEM); if (!reclaim) {