Message ID | 1472191902-13344-1-git-send-email-rcojocaru@bitdefender.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: One general note first: I don't really like the term "sparse" here, as that suggests to me you want to skip address space holes. How about calling it "multi", "array", or some such? > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -1815,7 +1815,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, > * Set access type for a region of gfns. > * If gfn == INVALID_GFN, sets the default access type. > */ > -long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, > +long p2m_set_mem_access(struct domain *d, gfn_t gfn, xen_pfn_t *arr, uint32_t nr, const > @@ -1874,28 +1874,53 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, > if ( ap2m ) > p2m_lock(ap2m); > > - for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) > + if ( !arr ) > { > - if ( ap2m ) > + for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) > { > - rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); > - /* If the corresponding mfn is invalid we will just skip it */ > - if ( rc && rc != -ESRCH ) > - break; > - } > - else > - { > - mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL); > - rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1); > - if ( rc ) > + if ( ap2m ) > + { > + rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); > + /* If the corresponding mfn is invalid we will just skip it */ > + if ( rc && rc != -ESRCH ) > + break; > + } > + else > + { > + mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL); > + rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1); > + if ( rc ) > + break; > + } > + > + /* Check for continuation if it's not the last iteration. */ > + if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) > + { > + rc = start; > break; > + } I think it would help if you broke out some of the loop body into a helper function. > } > + } > + else > + { > + uint32_t i; > > - /* Check for continuation if it's not the last iteration. */ > - if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) > + for ( i = 0; i < nr; ++i ) > { > - rc = start; > - break; > + if ( ap2m ) > + { > + rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(arr[i])); > + /* If the corresponding mfn is invalid we will just skip it */ > + if ( rc && rc != -ESRCH ) > + break; > + } > + else > + { > + mfn = p2m->get_entry(p2m, arr[i], &t, &_a, 0, NULL, NULL); > + rc = p2m->set_entry(p2m, arr[i], mfn, PAGE_ORDER_4K, t, a, -1); > + if ( rc ) > + break; > + } > } Where is the preemption handling? > --- a/xen/common/compat/memory.c > +++ b/xen/common/compat/memory.c > @@ -15,7 +15,6 @@ CHECK_TYPE(domid); > #undef compat_domid_t > #undef xen_domid_t > > -CHECK_mem_access_op; > CHECK_vmemrange; You can't just delete this line. Some form of replacement is needed: Either you need to introduce compat mode translation, or you need to keep the line and suitably adjust the interface structure (which might be possible considering there's a [tools interface only] use of uint64_aligned_t already). > @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, > } > break; > > + case XENMEM_access_op_set_access_sparse: > + { > + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); > + > + // copy_from_guest(arr, mao.pfn_list, mao.nr); What is this (wrongly C++ style) comment about? I think this really wasn't meant to be a comment, so RFC or not - how do things work with this commented out? And where is the error checking for the allocation (which btw should be xmalloc_array(), but the need for an allocation here is questionable - the called function would better retrieve the GFNs one by one). > + rc = p2m_set_mem_access(d, _gfn(mao.pfn), arr, mao.nr, start_iter, > + MEMOP_CMD_MASK, mao.access, 0); Please don't pass mao.pfn here when it's meant to be ignore by the caller. Use GFN_INVALID instead. And for future extensibility please check that the caller put some pre-defined pattern here (not sure whether zero is appropriate in this case). Jan
On 08/26/16 10:18, Jan Beulich wrote: >>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: > > One general note first: I don't really like the term "sparse" here, > as that suggests to me you want to skip address space holes. > How about calling it "multi", "array", or some such? Fair enough, will rename it. >> --- a/xen/arch/x86/mm/p2m.c >> +++ b/xen/arch/x86/mm/p2m.c >> @@ -1815,7 +1815,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, >> * Set access type for a region of gfns. >> * If gfn == INVALID_GFN, sets the default access type. >> */ >> -long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, >> +long p2m_set_mem_access(struct domain *d, gfn_t gfn, xen_pfn_t *arr, uint32_t nr, > > const Will do. >> @@ -1874,28 +1874,53 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, >> if ( ap2m ) >> p2m_lock(ap2m); >> >> - for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) >> + if ( !arr ) >> { >> - if ( ap2m ) >> + for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) >> { >> - rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); >> - /* If the corresponding mfn is invalid we will just skip it */ >> - if ( rc && rc != -ESRCH ) >> - break; >> - } >> - else >> - { >> - mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL); >> - rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1); >> - if ( rc ) >> + if ( ap2m ) >> + { >> + rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); >> + /* If the corresponding mfn is invalid we will just skip it */ >> + if ( rc && rc != -ESRCH ) >> + break; >> + } >> + else >> + { >> + mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL); >> + rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1); >> + if ( rc ) >> + break; >> + } >> + >> + /* Check for continuation if it's not the last iteration. */ >> + if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) >> + { >> + rc = start; >> break; >> + } > > I think it would help if you broke out some of the loop body into a > helper function. I'll refactor it. >> } >> + } >> + else >> + { >> + uint32_t i; >> >> - /* Check for continuation if it's not the last iteration. */ >> - if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) >> + for ( i = 0; i < nr; ++i ) >> { >> - rc = start; >> - break; >> + if ( ap2m ) >> + { >> + rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(arr[i])); >> + /* If the corresponding mfn is invalid we will just skip it */ >> + if ( rc && rc != -ESRCH ) >> + break; >> + } >> + else >> + { >> + mfn = p2m->get_entry(p2m, arr[i], &t, &_a, 0, NULL, NULL); >> + rc = p2m->set_entry(p2m, arr[i], mfn, PAGE_ORDER_4K, t, a, -1); >> + if ( rc ) >> + break; >> + } >> } > > Where is the preemption handling? It's missing. I'll add it back in. >> --- a/xen/common/compat/memory.c >> +++ b/xen/common/compat/memory.c >> @@ -15,7 +15,6 @@ CHECK_TYPE(domid); >> #undef compat_domid_t >> #undef xen_domid_t >> >> -CHECK_mem_access_op; >> CHECK_vmemrange; > > You can't just delete this line. Some form of replacement is needed: > Either you need to introduce compat mode translation, or you need > to keep the line and suitably adjust the interface structure (which > might be possible considering there's a [tools interface only] use of > uint64_aligned_t already). I'll look into this. I'm not sure how to go about introducing compat mode translation, could you please tell me where to look for an example of doing that? Thanks! >> @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, >> } >> break; >> >> + case XENMEM_access_op_set_access_sparse: >> + { >> + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); >> + >> + // copy_from_guest(arr, mao.pfn_list, mao.nr); > > What is this (wrongly C++ style) comment about? I think this really > wasn't meant to be a comment, so RFC or not - how do things work > with this commented out? And where is the error checking for the > allocation (which btw should be xmalloc_array(), but the need for > an allocation here is questionable - the called function would better > retrieve the GFNs one by one). They don't work, I forgot that comment in (the line should not have been commented). I first wrote the patch on Xen 4.6 and there there was no CHECK_mem_access_op, so I was just trying to figure out what went wrong when I first saw the compile errors and tried this, then left it in accidentally. Indeed, there should also be a check for allocation failure. Do you mean that I would do better to just copy_from_guest(&gfn, mao.pfn_list + index, 1) in a for loop that sets mem_access restrictions? >> + rc = p2m_set_mem_access(d, _gfn(mao.pfn), arr, mao.nr, start_iter, >> + MEMOP_CMD_MASK, mao.access, 0); > > Please don't pass mao.pfn here when it's meant to be ignore by > the caller. Use GFN_INVALID instead. And for future extensibility > please check that the caller put some pre-defined pattern here > (not sure whether zero is appropriate in this case). Will do. Thanks for the review, Razvan
>>> On 26.08.16 at 09:40, <rcojocaru@bitdefender.com> wrote: > On 08/26/16 10:18, Jan Beulich wrote: >>>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >>> --- a/xen/common/compat/memory.c >>> +++ b/xen/common/compat/memory.c >>> @@ -15,7 +15,6 @@ CHECK_TYPE(domid); >>> #undef compat_domid_t >>> #undef xen_domid_t >>> >>> -CHECK_mem_access_op; >>> CHECK_vmemrange; >> >> You can't just delete this line. Some form of replacement is needed: >> Either you need to introduce compat mode translation, or you need >> to keep the line and suitably adjust the interface structure (which >> might be possible considering there's a [tools interface only] use of >> uint64_aligned_t already). > > I'll look into this. I'm not sure how to go about introducing compat > mode translation, could you please tell me where to look for an example > of doing that? Thanks! Well, the first option to consider should be avoiding translation (and hence keeping the check macro invocation in place), the more that this is a tools only interface (you're certainly aware that e.g domctl and sysctl also avoid translation). Examples of translation can be found (you could have guessed that) in xen/common/compat/memory.c. >>> @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, >>> } >>> break; >>> >>> + case XENMEM_access_op_set_access_sparse: >>> + { >>> + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); >>> + >>> + // copy_from_guest(arr, mao.pfn_list, mao.nr); >> >> What is this (wrongly C++ style) comment about? I think this really >> wasn't meant to be a comment, so RFC or not - how do things work >> with this commented out? And where is the error checking for the >> allocation (which btw should be xmalloc_array(), but the need for >> an allocation here is questionable - the called function would better >> retrieve the GFNs one by one). > > They don't work, I forgot that comment in (the line should not have been > commented). I first wrote the patch on Xen 4.6 and there there was no > CHECK_mem_access_op, so I was just trying to figure out what went wrong > when I first saw the compile errors and tried this, then left it in > accidentally. > > Indeed, there should also be a check for allocation failure. > > Do you mean that I would do better to just copy_from_guest(&gfn, > mao.pfn_list + index, 1) in a for loop that sets mem_access restrictions? Yes, albeit it is copy_from_guest_offset(&gfn, mao.pfn_list, index, 1). Jan
On Fri, Aug 26, 2016 at 09:11:42AM +0300, Razvan Cojocaru wrote: > Currently it is only possible to set mem_access restrictions only for > a contiguous range of GFNs (or, as a particular case, for a single GFN). > This patch introduces a new libxc function taking an array of GFNs. > The alternative would be to set each page in turn, using a userspace-HV > roundtrip for each call, and triggering a TLB flush per page set. > > Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com> > --- > tools/libxc/include/xenctrl.h | 4 +++ > tools/libxc/xc_mem_access.c | 32 +++++++++++++++++++++++ > xen/arch/x86/hvm/hvm.c | 2 +- > xen/arch/x86/mm/p2m.c | 59 ++++++++++++++++++++++++++++++------------- > xen/common/compat/memory.c | 1 - > xen/common/mem_access.c | 13 +++++++++- > xen/include/public/memory.h | 6 +++++ > xen/include/xen/p2m-common.h | 6 ++--- > 8 files changed, 100 insertions(+), 23 deletions(-) > > diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h > index 560ce7b..ac84908 100644 > --- a/tools/libxc/include/xenctrl.h > +++ b/tools/libxc/include/xenctrl.h > @@ -2125,6 +2125,10 @@ int xc_set_mem_access(xc_interface *xch, domid_t domain_id, > xenmem_access_t access, uint64_t first_pfn, > uint32_t nr); > > +int xc_set_mem_access_sparse(xc_interface *xch, domid_t domain_id, > + xenmem_access_t access, xen_pfn_t *pages, > + uint32_t nr); > + > /* > * Gets the mem access for the given page (returned in access on success) > */ > diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c > index eee088c..73b1caa 100644 > --- a/tools/libxc/xc_mem_access.c > +++ b/tools/libxc/xc_mem_access.c > @@ -41,6 +41,38 @@ int xc_set_mem_access(xc_interface *xch, > return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao)); > } > > +int xc_set_mem_access_sparse(xc_interface *xch, > + domid_t domain_id, > + xenmem_access_t access, > + xen_pfn_t *pages, > + uint32_t nr) > +{ > + DECLARE_HYPERCALL_BOUNCE(pages, nr * sizeof(xen_pfn_t), XC_HYPERCALL_BUFFER_BOUNCE_IN); Line too long. There isn't much else to say because it looks like to be a rather straightforward hypercall wrapper. Wei.
On Fri, Aug 26, 2016 at 12:11 AM, Razvan Cojocaru <rcojocaru@bitdefender.com> wrote: > Currently it is only possible to set mem_access restrictions only for > a contiguous range of GFNs (or, as a particular case, for a single GFN). > This patch introduces a new libxc function taking an array of GFNs. > The alternative would be to set each page in turn, using a userspace-HV > roundtrip for each call, and triggering a TLB flush per page set. I think this is a useful addition. > > Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com> > --- > tools/libxc/include/xenctrl.h | 4 +++ > tools/libxc/xc_mem_access.c | 32 +++++++++++++++++++++++ > xen/arch/x86/hvm/hvm.c | 2 +- > xen/arch/x86/mm/p2m.c | 59 ++++++++++++++++++++++++++++++------------- > xen/common/compat/memory.c | 1 - > xen/common/mem_access.c | 13 +++++++++- > xen/include/public/memory.h | 6 +++++ > xen/include/xen/p2m-common.h | 6 ++--- > 8 files changed, 100 insertions(+), 23 deletions(-) > > diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h > index 560ce7b..ac84908 100644 > --- a/tools/libxc/include/xenctrl.h > +++ b/tools/libxc/include/xenctrl.h > @@ -2125,6 +2125,10 @@ int xc_set_mem_access(xc_interface *xch, domid_t domain_id, > xenmem_access_t access, uint64_t first_pfn, > uint32_t nr); > > +int xc_set_mem_access_sparse(xc_interface *xch, domid_t domain_id, > + xenmem_access_t access, xen_pfn_t *pages, > + uint32_t nr); > + I guess adding a comment here would be nice for people just looking at the header to be able to tell that pages = array, nr = array size. Also, wouldn't it make sense to make "access" an array as well, so you could one-set multiple pages to different permissions? Tamas
On 08/26/16 23:33, Tamas K Lengyel wrote: > On Fri, Aug 26, 2016 at 12:11 AM, Razvan Cojocaru > <rcojocaru@bitdefender.com> wrote: >> Currently it is only possible to set mem_access restrictions only for >> a contiguous range of GFNs (or, as a particular case, for a single GFN). >> This patch introduces a new libxc function taking an array of GFNs. >> The alternative would be to set each page in turn, using a userspace-HV >> roundtrip for each call, and triggering a TLB flush per page set. > > I think this is a useful addition. Thanks! >> Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com> >> --- >> tools/libxc/include/xenctrl.h | 4 +++ >> tools/libxc/xc_mem_access.c | 32 +++++++++++++++++++++++ >> xen/arch/x86/hvm/hvm.c | 2 +- >> xen/arch/x86/mm/p2m.c | 59 ++++++++++++++++++++++++++++++------------- >> xen/common/compat/memory.c | 1 - >> xen/common/mem_access.c | 13 +++++++++- >> xen/include/public/memory.h | 6 +++++ >> xen/include/xen/p2m-common.h | 6 ++--- >> 8 files changed, 100 insertions(+), 23 deletions(-) >> >> diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h >> index 560ce7b..ac84908 100644 >> --- a/tools/libxc/include/xenctrl.h >> +++ b/tools/libxc/include/xenctrl.h >> @@ -2125,6 +2125,10 @@ int xc_set_mem_access(xc_interface *xch, domid_t domain_id, >> xenmem_access_t access, uint64_t first_pfn, >> uint32_t nr); >> >> +int xc_set_mem_access_sparse(xc_interface *xch, domid_t domain_id, >> + xenmem_access_t access, xen_pfn_t *pages, >> + uint32_t nr); >> + > > I guess adding a comment here would be nice for people just looking at > the header to be able to tell that pages = array, nr = array size. > Also, wouldn't it make sense to make "access" an array as well, so you > could one-set multiple pages to different permissions? Sure, I'll add a comment (actually even rename that parameter to size). Yes, making access an array as well does make sense, I'll look into that. I could then just iterate over both arrays and basically use p2m_set_mem_access() for each single element (with p2m_lock() / p2m_unlock() around the loop to prevent TLB flushes on each iteration). Thanks, Razvan
On 08/26/2016 10:18 AM, Jan Beulich wrote: >>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >> + rc = p2m_set_mem_access(d, _gfn(mao.pfn), arr, mao.nr, start_iter, >> + MEMOP_CMD_MASK, mao.access, 0); > > Please don't pass mao.pfn here when it's meant to be ignore by > the caller. Use GFN_INVALID instead. And for future extensibility > please check that the caller put some pre-defined pattern here > (not sure whether zero is appropriate in this case). GFN_INVALID has it's own semantics in p2m_set_mem_access(): /* If request to set default access. */ if ( gfn_eq(gfn, INVALID_GFN) ) { p2m->default_access = a; return 0; } I'll replace the condition with "if ( !arr && gfn_eq(gfn, INVALID_GFN) )" and use GFN_INVALID in the XENMEM_access_op_set_access_multi (renamed from XENMEM_access_op_set_access_sparse), as recommended. Thanks, Razvan
On 08/29/2016 03:11 PM, Razvan Cojocaru wrote: > On 08/26/2016 10:18 AM, Jan Beulich wrote: >>>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >>> + rc = p2m_set_mem_access(d, _gfn(mao.pfn), arr, mao.nr, start_iter, >>> + MEMOP_CMD_MASK, mao.access, 0); >> >> Please don't pass mao.pfn here when it's meant to be ignore by >> the caller. Use GFN_INVALID instead. And for future extensibility >> please check that the caller put some pre-defined pattern here >> (not sure whether zero is appropriate in this case). > > GFN_INVALID has it's own semantics in p2m_set_mem_access(): > > /* If request to set default access. */ > if ( gfn_eq(gfn, INVALID_GFN) ) > { > p2m->default_access = a; > return 0; > } > > I'll replace the condition with "if ( !arr && gfn_eq(gfn, INVALID_GFN) > )" and use GFN_INVALID in the XENMEM_access_op_set_access_multi (renamed > from XENMEM_access_op_set_access_sparse), as recommended. Sorry for the reversals, it obviously should say "INVALID_GFN" everywhere. :) Thanks, Razvan
On 08/26/2016 11:14 AM, Jan Beulich wrote: >>>> On 26.08.16 at 09:40, <rcojocaru@bitdefender.com> wrote: >> On 08/26/16 10:18, Jan Beulich wrote: >>>>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >>>> --- a/xen/common/compat/memory.c >>>> +++ b/xen/common/compat/memory.c >>>> @@ -15,7 +15,6 @@ CHECK_TYPE(domid); >>>> #undef compat_domid_t >>>> #undef xen_domid_t >>>> >>>> -CHECK_mem_access_op; >>>> CHECK_vmemrange; >>> >>> You can't just delete this line. Some form of replacement is needed: >>> Either you need to introduce compat mode translation, or you need >>> to keep the line and suitably adjust the interface structure (which >>> might be possible considering there's a [tools interface only] use of >>> uint64_aligned_t already). >> >> I'll look into this. I'm not sure how to go about introducing compat >> mode translation, could you please tell me where to look for an example >> of doing that? Thanks! > > Well, the first option to consider should be avoiding translation (and > hence keeping the check macro invocation in place), the more that > this is a tools only interface (you're certainly aware that e.g domctl > and sysctl also avoid translation). Examples of translation can be > found (you could have guessed that) in xen/common/compat/memory.c. > >>>> @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, >>>> } >>>> break; >>>> >>>> + case XENMEM_access_op_set_access_sparse: >>>> + { >>>> + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); >>>> + >>>> + // copy_from_guest(arr, mao.pfn_list, mao.nr); >>> >>> What is this (wrongly C++ style) comment about? I think this really >>> wasn't meant to be a comment, so RFC or not - how do things work >>> with this commented out? And where is the error checking for the >>> allocation (which btw should be xmalloc_array(), but the need for >>> an allocation here is questionable - the called function would better >>> retrieve the GFNs one by one). >> >> They don't work, I forgot that comment in (the line should not have been >> commented). I first wrote the patch on Xen 4.6 and there there was no >> CHECK_mem_access_op, so I was just trying to figure out what went wrong >> when I first saw the compile errors and tried this, then left it in >> accidentally. >> >> Indeed, there should also be a check for allocation failure. >> >> Do you mean that I would do better to just copy_from_guest(&gfn, >> mao.pfn_list + index, 1) in a for loop that sets mem_access restrictions? > > Yes, albeit it is copy_from_guest_offset(&gfn, mao.pfn_list, index, 1). Avoiding translation, to the best of my understanding (and tested with the latest version of the patch I'm working on) would then require replacing copy_from_guest() with copy_from_user(), which does not have a copy_from_user_offset() alternative. Would keeping the dynamic GFNs buffer allocation be acceptable in this case? Thanks, Razvan
>>> On 29.08.16 at 17:29, <rcojocaru@bitdefender.com> wrote: > On 08/26/2016 11:14 AM, Jan Beulich wrote: >>>>> On 26.08.16 at 09:40, <rcojocaru@bitdefender.com> wrote: >>> On 08/26/16 10:18, Jan Beulich wrote: >>>>>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >>>>> @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, >>>>> } >>>>> break; >>>>> >>>>> + case XENMEM_access_op_set_access_sparse: >>>>> + { >>>>> + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); >>>>> + >>>>> + // copy_from_guest(arr, mao.pfn_list, mao.nr); >>>> >>>> What is this (wrongly C++ style) comment about? I think this really >>>> wasn't meant to be a comment, so RFC or not - how do things work >>>> with this commented out? And where is the error checking for the >>>> allocation (which btw should be xmalloc_array(), but the need for >>>> an allocation here is questionable - the called function would better >>>> retrieve the GFNs one by one). >>> >>> They don't work, I forgot that comment in (the line should not have been >>> commented). I first wrote the patch on Xen 4.6 and there there was no >>> CHECK_mem_access_op, so I was just trying to figure out what went wrong >>> when I first saw the compile errors and tried this, then left it in >>> accidentally. >>> >>> Indeed, there should also be a check for allocation failure. >>> >>> Do you mean that I would do better to just copy_from_guest(&gfn, >>> mao.pfn_list + index, 1) in a for loop that sets mem_access restrictions? >> >> Yes, albeit it is copy_from_guest_offset(&gfn, mao.pfn_list, index, 1). > > Avoiding translation, to the best of my understanding (and tested with > the latest version of the patch I'm working on) would then require > replacing copy_from_guest() with copy_from_user(), which does not have a > copy_from_user_offset() alternative. I don't follow - where did you see copy_from_user() used in such a case? If you go through the existing examples, you'll find that with some #define-s (re-vectoring to copy_from_compat_offset()) this can easily be taken care of. Jan
On 08/29/16 18:42, Jan Beulich wrote: >>>> On 29.08.16 at 17:29, <rcojocaru@bitdefender.com> wrote: >> On 08/26/2016 11:14 AM, Jan Beulich wrote: >>>>>> On 26.08.16 at 09:40, <rcojocaru@bitdefender.com> wrote: >>>> On 08/26/16 10:18, Jan Beulich wrote: >>>>>>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >>>>>> @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, >>>>>> } >>>>>> break; >>>>>> >>>>>> + case XENMEM_access_op_set_access_sparse: >>>>>> + { >>>>>> + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); >>>>>> + >>>>>> + // copy_from_guest(arr, mao.pfn_list, mao.nr); >>>>> >>>>> What is this (wrongly C++ style) comment about? I think this really >>>>> wasn't meant to be a comment, so RFC or not - how do things work >>>>> with this commented out? And where is the error checking for the >>>>> allocation (which btw should be xmalloc_array(), but the need for >>>>> an allocation here is questionable - the called function would better >>>>> retrieve the GFNs one by one). >>>> >>>> They don't work, I forgot that comment in (the line should not have been >>>> commented). I first wrote the patch on Xen 4.6 and there there was no >>>> CHECK_mem_access_op, so I was just trying to figure out what went wrong >>>> when I first saw the compile errors and tried this, then left it in >>>> accidentally. >>>> >>>> Indeed, there should also be a check for allocation failure. >>>> >>>> Do you mean that I would do better to just copy_from_guest(&gfn, >>>> mao.pfn_list + index, 1) in a for loop that sets mem_access restrictions? >>> >>> Yes, albeit it is copy_from_guest_offset(&gfn, mao.pfn_list, index, 1). >> >> Avoiding translation, to the best of my understanding (and tested with >> the latest version of the patch I'm working on) would then require >> replacing copy_from_guest() with copy_from_user(), which does not have a >> copy_from_user_offset() alternative. > > I don't follow - where did you see copy_from_user() used in such a > case? If you go through the existing examples, you'll find that with > some #define-s (re-vectoring to copy_from_compat_offset()) this > can easily be taken care of. I was looking at xc_mem_paging_memop(), where the buffer parameter is being sent via mpo.buffer, which is an uint64_aligned_t, which I thought was what you meant. On the HV side, it's being copied_from_user(). In the interest of preventing furher misunderstanding, could you please point out a specific example you have in mind? Thanks, Razvan
>>> On 29.08.16 at 18:02, <rcojocaru@bitdefender.com> wrote: > On 08/29/16 18:42, Jan Beulich wrote: >>>>> On 29.08.16 at 17:29, <rcojocaru@bitdefender.com> wrote: >>> On 08/26/2016 11:14 AM, Jan Beulich wrote: >>>>>>> On 26.08.16 at 09:40, <rcojocaru@bitdefender.com> wrote: >>>>> On 08/26/16 10:18, Jan Beulich wrote: >>>>>>>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >>>>>>> @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, >>>>>>> } >>>>>>> break; >>>>>>> >>>>>>> + case XENMEM_access_op_set_access_sparse: >>>>>>> + { >>>>>>> + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); >>>>>>> + >>>>>>> + // copy_from_guest(arr, mao.pfn_list, mao.nr); >>>>>> >>>>>> What is this (wrongly C++ style) comment about? I think this really >>>>>> wasn't meant to be a comment, so RFC or not - how do things work >>>>>> with this commented out? And where is the error checking for the >>>>>> allocation (which btw should be xmalloc_array(), but the need for >>>>>> an allocation here is questionable - the called function would better >>>>>> retrieve the GFNs one by one). >>>>> >>>>> They don't work, I forgot that comment in (the line should not have been >>>>> commented). I first wrote the patch on Xen 4.6 and there there was no >>>>> CHECK_mem_access_op, so I was just trying to figure out what went wrong >>>>> when I first saw the compile errors and tried this, then left it in >>>>> accidentally. >>>>> >>>>> Indeed, there should also be a check for allocation failure. >>>>> >>>>> Do you mean that I would do better to just copy_from_guest(&gfn, >>>>> mao.pfn_list + index, 1) in a for loop that sets mem_access restrictions? >>>> >>>> Yes, albeit it is copy_from_guest_offset(&gfn, mao.pfn_list, index, 1). >>> >>> Avoiding translation, to the best of my understanding (and tested with >>> the latest version of the patch I'm working on) would then require >>> replacing copy_from_guest() with copy_from_user(), which does not have a >>> copy_from_user_offset() alternative. >> >> I don't follow - where did you see copy_from_user() used in such a >> case? If you go through the existing examples, you'll find that with >> some #define-s (re-vectoring to copy_from_compat_offset()) this >> can easily be taken care of. > > I was looking at xc_mem_paging_memop(), where the buffer parameter is > being sent via mpo.buffer, which is an uint64_aligned_t, which I thought > was what you meant. On the HV side, it's being copied_from_user(). > > In the interest of preventing furher misunderstanding, could you please > point out a specific example you have in mind? Actually, having looked again more closely, I'm not sure you need to use the compat version of the copying routine; you may need a thin handler in compat/memory.c. Before going to further search for a suitable example, would you mind pointing out what it is that does not work with copy_from_guest_offset()? Jan
On 08/29/16 19:20, Jan Beulich wrote: >>>> On 29.08.16 at 18:02, <rcojocaru@bitdefender.com> wrote: >> On 08/29/16 18:42, Jan Beulich wrote: >>>>>> On 29.08.16 at 17:29, <rcojocaru@bitdefender.com> wrote: >>>> On 08/26/2016 11:14 AM, Jan Beulich wrote: >>>>>>>> On 26.08.16 at 09:40, <rcojocaru@bitdefender.com> wrote: >>>>>> On 08/26/16 10:18, Jan Beulich wrote: >>>>>>>>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >>>>>>>> @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, >>>>>>>> } >>>>>>>> break; >>>>>>>> >>>>>>>> + case XENMEM_access_op_set_access_sparse: >>>>>>>> + { >>>>>>>> + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); >>>>>>>> + >>>>>>>> + // copy_from_guest(arr, mao.pfn_list, mao.nr); >>>>>>> >>>>>>> What is this (wrongly C++ style) comment about? I think this really >>>>>>> wasn't meant to be a comment, so RFC or not - how do things work >>>>>>> with this commented out? And where is the error checking for the >>>>>>> allocation (which btw should be xmalloc_array(), but the need for >>>>>>> an allocation here is questionable - the called function would better >>>>>>> retrieve the GFNs one by one). >>>>>> >>>>>> They don't work, I forgot that comment in (the line should not have been >>>>>> commented). I first wrote the patch on Xen 4.6 and there there was no >>>>>> CHECK_mem_access_op, so I was just trying to figure out what went wrong >>>>>> when I first saw the compile errors and tried this, then left it in >>>>>> accidentally. >>>>>> >>>>>> Indeed, there should also be a check for allocation failure. >>>>>> >>>>>> Do you mean that I would do better to just copy_from_guest(&gfn, >>>>>> mao.pfn_list + index, 1) in a for loop that sets mem_access restrictions? >>>>> >>>>> Yes, albeit it is copy_from_guest_offset(&gfn, mao.pfn_list, index, 1). >>>> >>>> Avoiding translation, to the best of my understanding (and tested with >>>> the latest version of the patch I'm working on) would then require >>>> replacing copy_from_guest() with copy_from_user(), which does not have a >>>> copy_from_user_offset() alternative. >>> >>> I don't follow - where did you see copy_from_user() used in such a >>> case? If you go through the existing examples, you'll find that with >>> some #define-s (re-vectoring to copy_from_compat_offset()) this >>> can easily be taken care of. >> >> I was looking at xc_mem_paging_memop(), where the buffer parameter is >> being sent via mpo.buffer, which is an uint64_aligned_t, which I thought >> was what you meant. On the HV side, it's being copied_from_user(). >> >> In the interest of preventing furher misunderstanding, could you please >> point out a specific example you have in mind? > > Actually, having looked again more closely, I'm not sure you need > to use the compat version of the copying routine; you may need a > thin handler in compat/memory.c. Before going to further search > for a suitable example, would you mind pointing out what it is that > does not work with copy_from_guest_offset()? With this change: @@ -452,6 +453,11 @@ struct xen_mem_access_op { * ~0ull is used to set and get the default access for pages */ uint64_aligned_t pfn; + /* + * List of pfns to set access for + * Used only with XENMEM_access_op_set_access_multi + */ + uint64_aligned_t pfn_list; }; combined with this change: @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, } break; + case XENMEM_access_op_set_access_multi: + { + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); + + copy_from_guest(arr, (void *)mao.pfn_list, mao.nr * sizeof(xen_pfn_t)); + rc = p2m_set_mem_access(d, INVALID_GFN, arr, mao.nr, start_iter, + MEMOP_CMD_MASK, mao.access, 0); + xfree(arr); + break; + } I get this compile-time error: In file included from /home/red/work/xen.git/xen/include/xen/guest_access.h:10:0, from mem_access.c:24: mem_access.c: In function ‘mem_access_memop’: /home/red/work/xen.git/xen/include/asm/guest_access.h:99:37: error: request for member ‘p’ in something not a structure or union const typeof(*(ptr)) *_s = (hnd).p; \ ^ /home/red/work/xen.git/xen/include/xen/guest_access.h:18:5: note: in expansion of macro ‘copy_from_guest_offset’ copy_from_guest_offset(ptr, hnd, 0, nr) ^ mem_access.c:83:9: note: in expansion of macro ‘copy_from_guest’ copy_from_guest(arr, (void *)mao.pfn_list, mao.nr * sizeof(xen_pfn_t)); ^ In order for this to work, I need to either change from copy_to_guest() to copy_from_user(), or change from uint64_aligned_t pfn_list; to XEN_GUEST_HANDLE(xen_pfn_t) pfn_list;. Thanks, Razvan
>>> On 29.08.16 at 18:42, <rcojocaru@bitdefender.com> wrote: > On 08/29/16 19:20, Jan Beulich wrote: >>>>> On 29.08.16 at 18:02, <rcojocaru@bitdefender.com> wrote: >>> On 08/29/16 18:42, Jan Beulich wrote: >>>>>>> On 29.08.16 at 17:29, <rcojocaru@bitdefender.com> wrote: >>>>> On 08/26/2016 11:14 AM, Jan Beulich wrote: >>>>>>>>> On 26.08.16 at 09:40, <rcojocaru@bitdefender.com> wrote: >>>>>>> On 08/26/16 10:18, Jan Beulich wrote: >>>>>>>>>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >>>>>>>>> @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, >>>>>>>>> } >>>>>>>>> break; >>>>>>>>> >>>>>>>>> + case XENMEM_access_op_set_access_sparse: >>>>>>>>> + { >>>>>>>>> + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); >>>>>>>>> + >>>>>>>>> + // copy_from_guest(arr, mao.pfn_list, mao.nr); >>>>>>>> >>>>>>>> What is this (wrongly C++ style) comment about? I think this really >>>>>>>> wasn't meant to be a comment, so RFC or not - how do things work >>>>>>>> with this commented out? And where is the error checking for the >>>>>>>> allocation (which btw should be xmalloc_array(), but the need for >>>>>>>> an allocation here is questionable - the called function would better >>>>>>>> retrieve the GFNs one by one). >>>>>>> >>>>>>> They don't work, I forgot that comment in (the line should not have been >>>>>>> commented). I first wrote the patch on Xen 4.6 and there there was no >>>>>>> CHECK_mem_access_op, so I was just trying to figure out what went wrong >>>>>>> when I first saw the compile errors and tried this, then left it in >>>>>>> accidentally. >>>>>>> >>>>>>> Indeed, there should also be a check for allocation failure. >>>>>>> >>>>>>> Do you mean that I would do better to just copy_from_guest(&gfn, >>>>>>> mao.pfn_list + index, 1) in a for loop that sets mem_access restrictions? >>>>>> >>>>>> Yes, albeit it is copy_from_guest_offset(&gfn, mao.pfn_list, index, 1). >>>>> >>>>> Avoiding translation, to the best of my understanding (and tested with >>>>> the latest version of the patch I'm working on) would then require >>>>> replacing copy_from_guest() with copy_from_user(), which does not have a >>>>> copy_from_user_offset() alternative. >>>> >>>> I don't follow - where did you see copy_from_user() used in such a >>>> case? If you go through the existing examples, you'll find that with >>>> some #define-s (re-vectoring to copy_from_compat_offset()) this >>>> can easily be taken care of. >>> >>> I was looking at xc_mem_paging_memop(), where the buffer parameter is >>> being sent via mpo.buffer, which is an uint64_aligned_t, which I thought >>> was what you meant. On the HV side, it's being copied_from_user(). >>> >>> In the interest of preventing furher misunderstanding, could you please >>> point out a specific example you have in mind? >> >> Actually, having looked again more closely, I'm not sure you need >> to use the compat version of the copying routine; you may need a >> thin handler in compat/memory.c. Before going to further search >> for a suitable example, would you mind pointing out what it is that >> does not work with copy_from_guest_offset()? > > With this change: > > @@ -452,6 +453,11 @@ struct xen_mem_access_op { > * ~0ull is used to set and get the default access for pages > */ > uint64_aligned_t pfn; > + /* > + * List of pfns to set access for > + * Used only with XENMEM_access_op_set_access_multi > + */ > + uint64_aligned_t pfn_list; > }; Well, this can't possibly be right. You absolutely need to retain the handle here that you had in the patch; to avoid the need for compat translation you will want to make this a 64-bit handle, though. Jan
On 08/30/2016 09:39 AM, Jan Beulich wrote: >>>> On 29.08.16 at 18:42, <rcojocaru@bitdefender.com> wrote: >> On 08/29/16 19:20, Jan Beulich wrote: >>>>>> On 29.08.16 at 18:02, <rcojocaru@bitdefender.com> wrote: >>>> On 08/29/16 18:42, Jan Beulich wrote: >>>>>>>> On 29.08.16 at 17:29, <rcojocaru@bitdefender.com> wrote: >>>>>> On 08/26/2016 11:14 AM, Jan Beulich wrote: >>>>>>>>>> On 26.08.16 at 09:40, <rcojocaru@bitdefender.com> wrote: >>>>>>>> On 08/26/16 10:18, Jan Beulich wrote: >>>>>>>>>>>> On 26.08.16 at 08:11, <rcojocaru@bitdefender.com> wrote: >>>>>>>>>> @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, >>>>>>>>>> } >>>>>>>>>> break; >>>>>>>>>> >>>>>>>>>> + case XENMEM_access_op_set_access_sparse: >>>>>>>>>> + { >>>>>>>>>> + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); >>>>>>>>>> + >>>>>>>>>> + // copy_from_guest(arr, mao.pfn_list, mao.nr); >>>>>>>>> >>>>>>>>> What is this (wrongly C++ style) comment about? I think this really >>>>>>>>> wasn't meant to be a comment, so RFC or not - how do things work >>>>>>>>> with this commented out? And where is the error checking for the >>>>>>>>> allocation (which btw should be xmalloc_array(), but the need for >>>>>>>>> an allocation here is questionable - the called function would better >>>>>>>>> retrieve the GFNs one by one). >>>>>>>> >>>>>>>> They don't work, I forgot that comment in (the line should not have been >>>>>>>> commented). I first wrote the patch on Xen 4.6 and there there was no >>>>>>>> CHECK_mem_access_op, so I was just trying to figure out what went wrong >>>>>>>> when I first saw the compile errors and tried this, then left it in >>>>>>>> accidentally. >>>>>>>> >>>>>>>> Indeed, there should also be a check for allocation failure. >>>>>>>> >>>>>>>> Do you mean that I would do better to just copy_from_guest(&gfn, >>>>>>>> mao.pfn_list + index, 1) in a for loop that sets mem_access restrictions? >>>>>>> >>>>>>> Yes, albeit it is copy_from_guest_offset(&gfn, mao.pfn_list, index, 1). >>>>>> >>>>>> Avoiding translation, to the best of my understanding (and tested with >>>>>> the latest version of the patch I'm working on) would then require >>>>>> replacing copy_from_guest() with copy_from_user(), which does not have a >>>>>> copy_from_user_offset() alternative. >>>>> >>>>> I don't follow - where did you see copy_from_user() used in such a >>>>> case? If you go through the existing examples, you'll find that with >>>>> some #define-s (re-vectoring to copy_from_compat_offset()) this >>>>> can easily be taken care of. >>>> >>>> I was looking at xc_mem_paging_memop(), where the buffer parameter is >>>> being sent via mpo.buffer, which is an uint64_aligned_t, which I thought >>>> was what you meant. On the HV side, it's being copied_from_user(). >>>> >>>> In the interest of preventing furher misunderstanding, could you please >>>> point out a specific example you have in mind? >>> >>> Actually, having looked again more closely, I'm not sure you need >>> to use the compat version of the copying routine; you may need a >>> thin handler in compat/memory.c. Before going to further search >>> for a suitable example, would you mind pointing out what it is that >>> does not work with copy_from_guest_offset()? >> >> With this change: >> >> @@ -452,6 +453,11 @@ struct xen_mem_access_op { >> * ~0ull is used to set and get the default access for pages >> */ >> uint64_aligned_t pfn; >> + /* >> + * List of pfns to set access for >> + * Used only with XENMEM_access_op_set_access_multi >> + */ >> + uint64_aligned_t pfn_list; >> }; > > Well, this can't possibly be right. You absolutely need to retain > the handle here that you had in the patch; to avoid the need for > compat translation you will want to make this a 64-bit handle, > though. That was my first thought as well, but any combination: XEN_GUEST_HANDLE(uint64_aligned_t) pfn_list; XEN_GUEST_HANDLE_64(uint64_aligned_t) pfn_list; XEN_GUEST_HANDLE_64(xen_pfn_t) pfn_list; triggers this error: ~/work/xen.git/xen/include/xen/compat.h:134:32: error: size of array ‘__checkSstruct_mem_access_op’ is negative #define CHECK_NAME_(k, n, tag) __check ## tag ## k ## _ ## n ^ ~/work/xen.git/xen/include/xen/compat.h:155:17: note: in expansion of macro ‘CHECK_NAME_’ typedef int CHECK_NAME_(k, n, S)[1 - (sizeof(k xen_ ## n) != \ ^ ~/work/xen.git/xen/include/compat/xlat.h:672:5: note: in expansion of macro ‘CHECK_SIZE_’ CHECK_SIZE_(struct, mem_access_op); \ ^ compat/memory.c:18:1: note: in expansion of macro ‘CHECK_mem_access_op’ CHECK_mem_access_op; ^ compat/memory.c: In function ‘__checkFstruct_mem_access_op__pfn_list’: ~/work/xen.git/xen/include/xen/compat.h:170:18: error: comparison of distinct pointer types lacks a cast [-Werror] return &x->f == &c->f; \ ^ ~/work/xen.git/xen/include/xen/compat.h:176:5: note: in expansion of macro ‘CHECK_FIELD_COMMON_’ CHECK_FIELD_COMMON_(k, CHECK_NAME_(k, n ## __ ## f, F), n, f) ^ ~/work/xen.git/xen/include/compat/xlat.h:678:5: note: in expansion of macro ‘CHECK_FIELD_’ CHECK_FIELD_(struct, mem_access_op, pfn_list) ^ compat/memory.c:18:1: note: in expansion of macro ‘CHECK_mem_access_op’ CHECK_mem_access_op; ^ cc1: all warnings being treated as errors It would appear that either I'm missing something, or the only way to continue is to manually add the required translation considering the modifications. Thanks, Razvan
>>> On 30.08.16 at 10:34, <rcojocaru@bitdefender.com> wrote: > It would appear that either I'm missing something, or the only way to > continue is to manually add the required translation considering the > modifications. At least that's likely the easiest (and most consistent route). See e.g. the handling of XENMEM_get_vnumainfo for an example. That said, I can't see how the compat handling of XENMEM_access_op is supposed to work (moved there by commit 213fd25109). Afaict this is dead code right now. I'll submit a patch, albeit that's going to be one only compile tested. Jan
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h index 560ce7b..ac84908 100644 --- a/tools/libxc/include/xenctrl.h +++ b/tools/libxc/include/xenctrl.h @@ -2125,6 +2125,10 @@ int xc_set_mem_access(xc_interface *xch, domid_t domain_id, xenmem_access_t access, uint64_t first_pfn, uint32_t nr); +int xc_set_mem_access_sparse(xc_interface *xch, domid_t domain_id, + xenmem_access_t access, xen_pfn_t *pages, + uint32_t nr); + /* * Gets the mem access for the given page (returned in access on success) */ diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c index eee088c..73b1caa 100644 --- a/tools/libxc/xc_mem_access.c +++ b/tools/libxc/xc_mem_access.c @@ -41,6 +41,38 @@ int xc_set_mem_access(xc_interface *xch, return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao)); } +int xc_set_mem_access_sparse(xc_interface *xch, + domid_t domain_id, + xenmem_access_t access, + xen_pfn_t *pages, + uint32_t nr) +{ + DECLARE_HYPERCALL_BOUNCE(pages, nr * sizeof(xen_pfn_t), XC_HYPERCALL_BUFFER_BOUNCE_IN); + int rc; + + xen_mem_access_op_t mao = + { + .op = XENMEM_access_op_set_access_sparse, + .domid = domain_id, + .access = access, + .nr = nr + }; + + if ( xc_hypercall_bounce_pre(xch, pages) ) + { + PERROR("Could not bounce memory for XENMEM_access_op_set_access_sparse"); + return -1; + } + + set_xen_guest_handle(mao.pfn_list, pages); + + rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao)); + + xc_hypercall_bounce_post(xch, pages); + + return rc; +} + int xc_get_mem_access(xc_interface *xch, domid_t domain_id, uint64_t pfn, diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 0180f26..03461e5 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -5323,7 +5323,7 @@ static int do_altp2m_op( if ( a.u.set_mem_access.pad ) rc = -EINVAL; else - rc = p2m_set_mem_access(d, _gfn(a.u.set_mem_access.gfn), 1, 0, 0, + rc = p2m_set_mem_access(d, _gfn(a.u.set_mem_access.gfn), NULL, 1, 0, 0, a.u.set_mem_access.hvmmem_access, a.u.set_mem_access.view); break; diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 812dbf6..2c45cc6 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -1815,7 +1815,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, * Set access type for a region of gfns. * If gfn == INVALID_GFN, sets the default access type. */ -long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, +long p2m_set_mem_access(struct domain *d, gfn_t gfn, xen_pfn_t *arr, uint32_t nr, uint32_t start, uint32_t mask, xenmem_access_t access, unsigned int altp2m_idx) { @@ -1874,28 +1874,53 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, if ( ap2m ) p2m_lock(ap2m); - for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) + if ( !arr ) { - if ( ap2m ) + for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) { - rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); - /* If the corresponding mfn is invalid we will just skip it */ - if ( rc && rc != -ESRCH ) - break; - } - else - { - mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL); - rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1); - if ( rc ) + if ( ap2m ) + { + rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); + /* If the corresponding mfn is invalid we will just skip it */ + if ( rc && rc != -ESRCH ) + break; + } + else + { + mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL); + rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1); + if ( rc ) + break; + } + + /* Check for continuation if it's not the last iteration. */ + if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) + { + rc = start; break; + } } + } + else + { + uint32_t i; - /* Check for continuation if it's not the last iteration. */ - if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) + for ( i = 0; i < nr; ++i ) { - rc = start; - break; + if ( ap2m ) + { + rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(arr[i])); + /* If the corresponding mfn is invalid we will just skip it */ + if ( rc && rc != -ESRCH ) + break; + } + else + { + mfn = p2m->get_entry(p2m, arr[i], &t, &_a, 0, NULL, NULL); + rc = p2m->set_entry(p2m, arr[i], mfn, PAGE_ORDER_4K, t, a, -1); + if ( rc ) + break; + } } } diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c index 20c7671..664b8fe 100644 --- a/xen/common/compat/memory.c +++ b/xen/common/compat/memory.c @@ -15,7 +15,6 @@ CHECK_TYPE(domid); #undef compat_domid_t #undef xen_domid_t -CHECK_mem_access_op; CHECK_vmemrange; #ifdef CONFIG_HAS_PASSTHROUGH diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c index b4033f0..1768020 100644 --- a/xen/common/mem_access.c +++ b/xen/common/mem_access.c @@ -66,7 +66,7 @@ int mem_access_memop(unsigned long cmd, ((mao.pfn + mao.nr - 1) > domain_get_maximum_gpfn(d))) ) break; - rc = p2m_set_mem_access(d, _gfn(mao.pfn), mao.nr, start_iter, + rc = p2m_set_mem_access(d, _gfn(mao.pfn), NULL, mao.nr, start_iter, MEMOP_CMD_MASK, mao.access, 0); if ( rc > 0 ) { @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, } break; + case XENMEM_access_op_set_access_sparse: + { + xen_pfn_t *arr = xmalloc_bytes(sizeof(xen_pfn_t) * mao.nr); + + // copy_from_guest(arr, mao.pfn_list, mao.nr); + rc = p2m_set_mem_access(d, _gfn(mao.pfn), arr, mao.nr, start_iter, + MEMOP_CMD_MASK, mao.access, 0); + xfree(arr); + break; + } + case XENMEM_access_op_get_access: { xenmem_access_t access; diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index 3badfb9..2e224e3 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -410,6 +410,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t); * #define XENMEM_access_op_enable_emulate 2 * #define XENMEM_access_op_disable_emulate 3 */ +#define XENMEM_access_op_set_access_sparse 4 typedef enum { XENMEM_access_n, @@ -452,6 +453,11 @@ struct xen_mem_access_op { * ~0ull is used to set and get the default access for pages */ uint64_aligned_t pfn; + /* + * List of pfns to set access for + * Used only with XENMEM_access_op_set_access_sparse + */ + XEN_GUEST_HANDLE(xen_pfn_t) pfn_list; }; typedef struct xen_mem_access_op xen_mem_access_op_t; DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t); diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h index b4f9077..c6723b6 100644 --- a/xen/include/xen/p2m-common.h +++ b/xen/include/xen/p2m-common.h @@ -49,9 +49,9 @@ int unmap_mmio_regions(struct domain *d, * Set access type for a region of gfns. * If gfn == INVALID_GFN, sets the default access type. */ -long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, - uint32_t start, uint32_t mask, xenmem_access_t access, - unsigned int altp2m_idx); +long p2m_set_mem_access(struct domain *d, gfn_t gfn, xen_pfn_t *arr, + uint32_t nr, uint32_t start, uint32_t mask, + xenmem_access_t access, unsigned int altp2m_idx); /* * Get access type for a gfn.
Currently it is only possible to set mem_access restrictions only for a contiguous range of GFNs (or, as a particular case, for a single GFN). This patch introduces a new libxc function taking an array of GFNs. The alternative would be to set each page in turn, using a userspace-HV roundtrip for each call, and triggering a TLB flush per page set. Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com> --- tools/libxc/include/xenctrl.h | 4 +++ tools/libxc/xc_mem_access.c | 32 +++++++++++++++++++++++ xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/mm/p2m.c | 59 ++++++++++++++++++++++++++++++------------- xen/common/compat/memory.c | 1 - xen/common/mem_access.c | 13 +++++++++- xen/include/public/memory.h | 6 +++++ xen/include/xen/p2m-common.h | 6 ++--- 8 files changed, 100 insertions(+), 23 deletions(-)