Message ID | 20230923030657.16148-16-haitao.huang@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add Cgroup support for SGX EPC memory | expand |
On Fri, 2023-09-22 at 20:06 -0700, Haitao Huang wrote: > +static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct sgx_epc_page *epc_page) > +{ > + return &sgx_global_lru; > +} > + > +static inline bool sgx_can_reclaim(void) > +{ > + return !list_empty(&sgx_global_lru.reclaimable); > +} > + Shouldn't sgx_can_reclaim() also take a 'struct sgx_epc_lru_lists *'? I thought we also need to check whether a cgroup's LRU lists can be reclaimed?
On Thu, 05 Oct 2023 07:30:46 -0500, Huang, Kai <kai.huang@intel.com> wrote: > On Fri, 2023-09-22 at 20:06 -0700, Haitao Huang wrote: >> +static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct >> sgx_epc_page *epc_page) >> +{ >> + return &sgx_global_lru; >> +} >> + >> +static inline bool sgx_can_reclaim(void) >> +{ >> + return !list_empty(&sgx_global_lru.reclaimable); >> +} >> + > > Shouldn't sgx_can_reclaim() also take a 'struct sgx_epc_lru_lists *'? > > I thought we also need to check whether a cgroup's LRU lists can be > reclaimed? This is only used to check if any pages reclaimable at the top level in this file. Later sgx_epc_cgroup_lru_empty(NULL) is used in this function to recursively check all cgroups starting from the root. Haitao
On Thu, 2023-10-05 at 14:33 -0500, Haitao Huang wrote: > On Thu, 05 Oct 2023 07:30:46 -0500, Huang, Kai <kai.huang@intel.com> wrote: > > > On Fri, 2023-09-22 at 20:06 -0700, Haitao Huang wrote: > > > +static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct > > > sgx_epc_page *epc_page) > > > +{ > > > + return &sgx_global_lru; > > > +} > > > + > > > +static inline bool sgx_can_reclaim(void) > > > +{ > > > + return !list_empty(&sgx_global_lru.reclaimable); > > > +} > > > + > > > > Shouldn't sgx_can_reclaim() also take a 'struct sgx_epc_lru_lists *'? > > > > I thought we also need to check whether a cgroup's LRU lists can be > > reclaimed? > > This is only used to check if any pages reclaimable at the top level in > this file. Later sgx_epc_cgroup_lru_empty(NULL) is used in this function > to recursively check all cgroups starting from the root. > > This again falls to the "impossible to review unless review a later patch first" category. This patch says nothing about sgx_can_reclaim() will only be used at the top level. Even if it does, why cannot it take LRU lists as input? All this patch says is we need to prepare these functions to suit multiple LRU lists. Btw, why sgx_reclaim_epc_pages() doesn't take LRU lists as input either? Is it possible that it can be called across multiple LRU lists, or across different lists in one LRU? Why do we need to find some particular LRU lists by given EPC page? +static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct sgx_epc_page *epc_page) +{ + return &sgx_global_lru; +} + Maybe it's clear for other people, but to me it sounds some necessary design background is missing at least. Please try best to make the patch self-reviewable by justifying all of those.
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index b34ad3574c81..d37ef0dd865f 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -35,6 +35,16 @@ static DEFINE_XARRAY(sgx_epc_address_space); */ static struct sgx_epc_lru_lists sgx_global_lru; +static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct sgx_epc_page *epc_page) +{ + return &sgx_global_lru; +} + +static inline bool sgx_can_reclaim(void) +{ + return !list_empty(&sgx_global_lru.reclaimable); +} + static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); /* Nodes with one or more EPC sections. */ @@ -340,6 +350,7 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) struct sgx_backing backing[SGX_NR_TO_SCAN_MAX]; struct sgx_epc_page *epc_page, *tmp; struct sgx_encl_page *encl_page; + struct sgx_epc_lru_lists *lru; pgoff_t page_index; LIST_HEAD(iso); size_t ret, i; @@ -372,10 +383,11 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) continue; skip: - spin_lock(&sgx_global_lru.lock); + lru = sgx_lru_lists(epc_page); + spin_lock(&lru->lock); sgx_epc_page_set_state(epc_page, SGX_EPC_PAGE_RECLAIMABLE); - list_move_tail(&epc_page->list, &sgx_global_lru.reclaimable); - spin_unlock(&sgx_global_lru.lock); + list_move_tail(&epc_page->list, &lru->reclaimable); + spin_unlock(&lru->lock); kref_put(&encl_page->encl->refcount, sgx_encl_release); } @@ -400,7 +412,7 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) static bool sgx_should_reclaim(unsigned long watermark) { return atomic_long_read(&sgx_nr_free_pages) < watermark && - !list_empty(&sgx_global_lru.reclaimable); + sgx_can_reclaim(); } /* @@ -530,14 +542,16 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) */ void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags) { - spin_lock(&sgx_global_lru.lock); + struct sgx_epc_lru_lists *lru = sgx_lru_lists(page); + + spin_lock(&lru->lock); WARN_ON_ONCE(sgx_epc_page_reclaimable(page->flags)); page->flags |= flags; if (sgx_epc_page_reclaimable(flags)) - list_add_tail(&page->list, &sgx_global_lru.reclaimable); + list_add_tail(&page->list, &lru->reclaimable); else - list_add_tail(&page->list, &sgx_global_lru.unreclaimable); - spin_unlock(&sgx_global_lru.lock); + list_add_tail(&page->list, &lru->unreclaimable); + spin_unlock(&lru->lock); } /** @@ -552,15 +566,16 @@ void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags) */ int sgx_drop_epc_page(struct sgx_epc_page *page) { - spin_lock(&sgx_global_lru.lock); + struct sgx_epc_lru_lists *lru = sgx_lru_lists(page); + + spin_lock(&lru->lock); if (sgx_epc_page_reclaim_in_progress(page->flags)) { - spin_unlock(&sgx_global_lru.lock); + spin_unlock(&lru->lock); return -EBUSY; } - list_del(&page->list); sgx_epc_page_reset_state(page); - spin_unlock(&sgx_global_lru.lock); + spin_unlock(&lru->lock); return 0; } @@ -593,7 +608,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) break; } - if (list_empty(&sgx_global_lru.reclaimable)) + if (!sgx_can_reclaim()) return ERR_PTR(-ENOMEM); if (!reclaim) {