Message ID | 20240130020938.10025-6-haitao.huang@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add Cgroup support for SGX EPC memory | expand |
On Tue Jan 30, 2024 at 4:09 AM EET, Haitao Huang wrote: > From: Sean Christopherson <sean.j.christopherson@intel.com> > > Introduce a data structure to wrap the existing reclaimable list and its > spinlock. Each cgroup later will have one instance of this structure to > track EPC pages allocated for processes associated with the same cgroup. > Just like the global SGX reclaimer (ksgxd), an EPC cgroup reclaims pages > from the reclaimable list in this structure when its usage reaches near > its limit. > > Use this structure to encapsulate the LRU list and its lock used by the > global reclaimer. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > Co-developed-by: Kristen Carlson Accardi <kristen@linux.intel.com> > Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com> > Co-developed-by: Haitao Huang <haitao.huang@linux.intel.com> > Signed-off-by: Haitao Huang <haitao.huang@linux.intel.com> I'd put author as last sob but that said I'm not sure if there is rigid rule to do so. Thus not saying must here. > Cc: Sean Christopherson <seanjc@google.com> > --- > V6: > - removed introduction to unreclaimables in commit message. > > V4: > - Removed unneeded comments for the spinlock and the non-reclaimables. > (Kai, Jarkko) > - Revised the commit to add introduction comments for unreclaimables and > multiple LRU lists.(Kai) > - Reordered the patches: delay all changes for unreclaimables to > later, and this one becomes the first change in the SGX subsystem. > > V3: > - Removed the helper functions and revised commit messages. > --- > arch/x86/kernel/cpu/sgx/main.c | 39 +++++++++++++++++----------------- > arch/x86/kernel/cpu/sgx/sgx.h | 15 +++++++++++++ > 2 files changed, 35 insertions(+), 19 deletions(-) > > diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c > index c32f18b70c73..912959c7ecc9 100644 > --- a/arch/x86/kernel/cpu/sgx/main.c > +++ b/arch/x86/kernel/cpu/sgx/main.c > @@ -28,10 +28,9 @@ static DEFINE_XARRAY(sgx_epc_address_space); > > /* > * These variables are part of the state of the reclaimer, and must be accessed > - * with sgx_reclaimer_lock acquired. > + * with sgx_global_lru.lock acquired. > */ > -static LIST_HEAD(sgx_active_page_list); > -static DEFINE_SPINLOCK(sgx_reclaimer_lock); > +static struct sgx_epc_lru_list sgx_global_lru; > > static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); > > @@ -306,13 +305,13 @@ static void sgx_reclaim_pages(void) > int ret; > int i; > > - spin_lock(&sgx_reclaimer_lock); > + spin_lock(&sgx_global_lru.lock); > for (i = 0; i < SGX_NR_TO_SCAN; i++) { > - if (list_empty(&sgx_active_page_list)) > + epc_page = list_first_entry_or_null(&sgx_global_lru.reclaimable, > + struct sgx_epc_page, list); > + if (!epc_page) > break; > > - epc_page = list_first_entry(&sgx_active_page_list, > - struct sgx_epc_page, list); > list_del_init(&epc_page->list); > encl_page = epc_page->owner; > > @@ -324,7 +323,7 @@ static void sgx_reclaim_pages(void) > */ > epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; > } > - spin_unlock(&sgx_reclaimer_lock); > + spin_unlock(&sgx_global_lru.lock); > > for (i = 0; i < cnt; i++) { > epc_page = chunk[i]; > @@ -347,9 +346,9 @@ static void sgx_reclaim_pages(void) > continue; > > skip: > - spin_lock(&sgx_reclaimer_lock); > - list_add_tail(&epc_page->list, &sgx_active_page_list); > - spin_unlock(&sgx_reclaimer_lock); > + spin_lock(&sgx_global_lru.lock); > + list_add_tail(&epc_page->list, &sgx_global_lru.reclaimable); > + spin_unlock(&sgx_global_lru.lock); > > kref_put(&encl_page->encl->refcount, sgx_encl_release); > > @@ -380,7 +379,7 @@ static void sgx_reclaim_pages(void) > static bool sgx_should_reclaim(unsigned long watermark) > { > return atomic_long_read(&sgx_nr_free_pages) < watermark && > - !list_empty(&sgx_active_page_list); > + !list_empty(&sgx_global_lru.reclaimable); > } > > /* > @@ -432,6 +431,8 @@ static bool __init sgx_page_reclaimer_init(void) > > ksgxd_tsk = tsk; > > + sgx_lru_init(&sgx_global_lru); > + > return true; > } > > @@ -507,10 +508,10 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) > */ > void sgx_mark_page_reclaimable(struct sgx_epc_page *page) > { > - spin_lock(&sgx_reclaimer_lock); > + spin_lock(&sgx_global_lru.lock); > page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED; > - list_add_tail(&page->list, &sgx_active_page_list); > - spin_unlock(&sgx_reclaimer_lock); > + list_add_tail(&page->list, &sgx_global_lru.reclaimable); > + spin_unlock(&sgx_global_lru.lock); > } > > /** > @@ -525,18 +526,18 @@ void sgx_mark_page_reclaimable(struct sgx_epc_page *page) > */ > int sgx_unmark_page_reclaimable(struct sgx_epc_page *page) > { > - spin_lock(&sgx_reclaimer_lock); > + spin_lock(&sgx_global_lru.lock); > if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) { > /* The page is being reclaimed. */ > if (list_empty(&page->list)) { > - spin_unlock(&sgx_reclaimer_lock); > + spin_unlock(&sgx_global_lru.lock); > return -EBUSY; > } > > list_del(&page->list); > page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; > } > - spin_unlock(&sgx_reclaimer_lock); > + spin_unlock(&sgx_global_lru.lock); > > return 0; > } > @@ -578,7 +579,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) > break; > } > > - if (list_empty(&sgx_active_page_list)) { > + if (list_empty(&sgx_global_lru.reclaimable)) { > page = ERR_PTR(-ENOMEM); > break; > } > diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h > index a898d86dead0..0e99e9ae3a67 100644 > --- a/arch/x86/kernel/cpu/sgx/sgx.h > +++ b/arch/x86/kernel/cpu/sgx/sgx.h > @@ -88,6 +88,21 @@ static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page) > return section->virt_addr + index * PAGE_SIZE; > } > > +/* > + * Contains EPC pages tracked by the global reclaimer (ksgxd) or an EPC > + * cgroup. > + */ > +struct sgx_epc_lru_list { > + spinlock_t lock; > + struct list_head reclaimable; > +}; > + > +static inline void sgx_lru_init(struct sgx_epc_lru_list *lru) > +{ > + spin_lock_init(&lru->lock); > + INIT_LIST_HEAD(&lru->reclaimable); > +} > + > struct sgx_epc_page *__sgx_alloc_epc_page(void); > void sgx_free_epc_page(struct sgx_epc_page *page); > Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org> BR, Jarkko
On Thu, 01 Feb 2024 17:28:32 -0600, Jarkko Sakkinen <jarkko@kernel.org> wrote: > On Tue Jan 30, 2024 at 4:09 AM EET, Haitao Huang wrote: >> From: Sean Christopherson <sean.j.christopherson@intel.com> >> >> Introduce a data structure to wrap the existing reclaimable list and its >> spinlock. Each cgroup later will have one instance of this structure to >> track EPC pages allocated for processes associated with the same cgroup. >> Just like the global SGX reclaimer (ksgxd), an EPC cgroup reclaims pages >> from the reclaimable list in this structure when its usage reaches near >> its limit. >> >> Use this structure to encapsulate the LRU list and its lock used by the >> global reclaimer. >> >> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> >> Co-developed-by: Kristen Carlson Accardi <kristen@linux.intel.com> >> Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com> >> Co-developed-by: Haitao Huang <haitao.huang@linux.intel.com> >> Signed-off-by: Haitao Huang <haitao.huang@linux.intel.com> > > I'd put author as last sob but that said I'm not sure if there is rigid > rule to do so. Thus not saying must here. > The documentation says "the ordering of Signed-off-by: tags should reflect the chronological history of the patch insofar as possible, regardless of whether the author is attributed via From: or Co-developed-by:. Notably, the last Signed-off-by: must always be that of the developer submitting the patch." So this should be OK. [...] >> struct sgx_epc_page *__sgx_alloc_epc_page(void); >> void sgx_free_epc_page(struct sgx_epc_page *page); >> > > Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org> > > BR, Jarkko > Thank you! Haitao
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index c32f18b70c73..912959c7ecc9 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -28,10 +28,9 @@ static DEFINE_XARRAY(sgx_epc_address_space); /* * These variables are part of the state of the reclaimer, and must be accessed - * with sgx_reclaimer_lock acquired. + * with sgx_global_lru.lock acquired. */ -static LIST_HEAD(sgx_active_page_list); -static DEFINE_SPINLOCK(sgx_reclaimer_lock); +static struct sgx_epc_lru_list sgx_global_lru; static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); @@ -306,13 +305,13 @@ static void sgx_reclaim_pages(void) int ret; int i; - spin_lock(&sgx_reclaimer_lock); + spin_lock(&sgx_global_lru.lock); for (i = 0; i < SGX_NR_TO_SCAN; i++) { - if (list_empty(&sgx_active_page_list)) + epc_page = list_first_entry_or_null(&sgx_global_lru.reclaimable, + struct sgx_epc_page, list); + if (!epc_page) break; - epc_page = list_first_entry(&sgx_active_page_list, - struct sgx_epc_page, list); list_del_init(&epc_page->list); encl_page = epc_page->owner; @@ -324,7 +323,7 @@ static void sgx_reclaim_pages(void) */ epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; } - spin_unlock(&sgx_reclaimer_lock); + spin_unlock(&sgx_global_lru.lock); for (i = 0; i < cnt; i++) { epc_page = chunk[i]; @@ -347,9 +346,9 @@ static void sgx_reclaim_pages(void) continue; skip: - spin_lock(&sgx_reclaimer_lock); - list_add_tail(&epc_page->list, &sgx_active_page_list); - spin_unlock(&sgx_reclaimer_lock); + spin_lock(&sgx_global_lru.lock); + list_add_tail(&epc_page->list, &sgx_global_lru.reclaimable); + spin_unlock(&sgx_global_lru.lock); kref_put(&encl_page->encl->refcount, sgx_encl_release); @@ -380,7 +379,7 @@ static void sgx_reclaim_pages(void) static bool sgx_should_reclaim(unsigned long watermark) { return atomic_long_read(&sgx_nr_free_pages) < watermark && - !list_empty(&sgx_active_page_list); + !list_empty(&sgx_global_lru.reclaimable); } /* @@ -432,6 +431,8 @@ static bool __init sgx_page_reclaimer_init(void) ksgxd_tsk = tsk; + sgx_lru_init(&sgx_global_lru); + return true; } @@ -507,10 +508,10 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) */ void sgx_mark_page_reclaimable(struct sgx_epc_page *page) { - spin_lock(&sgx_reclaimer_lock); + spin_lock(&sgx_global_lru.lock); page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED; - list_add_tail(&page->list, &sgx_active_page_list); - spin_unlock(&sgx_reclaimer_lock); + list_add_tail(&page->list, &sgx_global_lru.reclaimable); + spin_unlock(&sgx_global_lru.lock); } /** @@ -525,18 +526,18 @@ void sgx_mark_page_reclaimable(struct sgx_epc_page *page) */ int sgx_unmark_page_reclaimable(struct sgx_epc_page *page) { - spin_lock(&sgx_reclaimer_lock); + spin_lock(&sgx_global_lru.lock); if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) { /* The page is being reclaimed. */ if (list_empty(&page->list)) { - spin_unlock(&sgx_reclaimer_lock); + spin_unlock(&sgx_global_lru.lock); return -EBUSY; } list_del(&page->list); page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; } - spin_unlock(&sgx_reclaimer_lock); + spin_unlock(&sgx_global_lru.lock); return 0; } @@ -578,7 +579,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) break; } - if (list_empty(&sgx_active_page_list)) { + if (list_empty(&sgx_global_lru.reclaimable)) { page = ERR_PTR(-ENOMEM); break; } diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h index a898d86dead0..0e99e9ae3a67 100644 --- a/arch/x86/kernel/cpu/sgx/sgx.h +++ b/arch/x86/kernel/cpu/sgx/sgx.h @@ -88,6 +88,21 @@ static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page) return section->virt_addr + index * PAGE_SIZE; } +/* + * Contains EPC pages tracked by the global reclaimer (ksgxd) or an EPC + * cgroup. + */ +struct sgx_epc_lru_list { + spinlock_t lock; + struct list_head reclaimable; +}; + +static inline void sgx_lru_init(struct sgx_epc_lru_list *lru) +{ + spin_lock_init(&lru->lock); + INIT_LIST_HEAD(&lru->reclaimable); +} + struct sgx_epc_page *__sgx_alloc_epc_page(void); void sgx_free_epc_page(struct sgx_epc_page *page);