Message ID | 20171006114624.10771-2-maxime.coquelin@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 06/10/2017 13:46, Maxime Coquelin wrote: > + hwaddr page_mask = ~TARGET_PAGE_MASK; > + hwaddr plen = (hwaddr)(-1); > + > + if (plen_out) { > + plen = *plen_out; > + } > > for (;;) { > section = address_space_translate_internal( > flatview_to_dispatch(fv), addr, &addr, > - plen, is_mmio); > + &plen, is_mmio); > > iommu_mr = memory_region_get_iommu(section->mr); > if (!iommu_mr) { > @@ -496,7 +520,8 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv, > IOMMU_WO : IOMMU_RO); > addr = ((iotlb.translated_addr & ~iotlb.addr_mask) > | (addr & iotlb.addr_mask)); > - *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); > + page_mask = iotlb.addr_mask; Should this be "page_mask &= iotlb.addr_mask"? If you have multiple IOMMUs on top of each other (yeah, I know...) I think the smallest size should win. This is also consistent with the MIN in the line below. Otherwise looks good. Paolo > + plen = MIN(plen, (addr | iotlb.addr_mask) - addr + 1); > if (!(iotlb.perm & (1 << is_write))) { > goto translate_fail;
On 10/06/2017 02:31 PM, Paolo Bonzini wrote: > On 06/10/2017 13:46, Maxime Coquelin wrote: >> + hwaddr page_mask = ~TARGET_PAGE_MASK; >> + hwaddr plen = (hwaddr)(-1); >> + >> + if (plen_out) { >> + plen = *plen_out; >> + } >> >> for (;;) { >> section = address_space_translate_internal( >> flatview_to_dispatch(fv), addr, &addr, >> - plen, is_mmio); >> + &plen, is_mmio); >> >> iommu_mr = memory_region_get_iommu(section->mr); >> if (!iommu_mr) { >> @@ -496,7 +520,8 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv, >> IOMMU_WO : IOMMU_RO); >> addr = ((iotlb.translated_addr & ~iotlb.addr_mask) >> | (addr & iotlb.addr_mask)); >> - *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); >> + page_mask = iotlb.addr_mask; > > Should this be "page_mask &= iotlb.addr_mask"? > > If you have multiple IOMMUs on top of each other (yeah, I know...) I > think the smallest size should win. This is also consistent with the > MIN in the line below. I agree, but changin to "page_mask &= iotlb.addr_mask" will not be enough, we also have to change the init value. Else we will always end up with 0xfff. Maybe we could do as plen was handled before, i.e. setting page_mask init value to (hwaddr)(-1), and after the loop set it to ~TARGET_PAGE_MASK if it hasn't been changed. Does that sound reasonable? Thanks, Maxime > > Otherwise looks good. > > Paolo > >> + plen = MIN(plen, (addr | iotlb.addr_mask) - addr + 1); >> if (!(iotlb.perm & (1 << is_write))) { >> goto translate_fail; >
On 06/10/2017 14:46, Maxime Coquelin wrote: >>> addr = ((iotlb.translated_addr & ~iotlb.addr_mask) >>> | (addr & iotlb.addr_mask)); >>> - *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); >>> + page_mask = iotlb.addr_mask; >> >> Should this be "page_mask &= iotlb.addr_mask"? >> >> If you have multiple IOMMUs on top of each other (yeah, I know...) I >> think the smallest size should win. This is also consistent with the >> MIN in the line below. > > I agree, but changin to "page_mask &= iotlb.addr_mask" will not be > enough, we also have to change the init value. Else we will always end > up with 0xfff. > > Maybe we could do as plen was handled before, i.e. setting page_mask > init value to (hwaddr)(-1), and after the loop set it to > ~TARGET_PAGE_MASK if it hasn't been changed. > > Does that sound reasonable? True that, in fact it makes sense for the "IOTLB entry" to represent all of memory if there's no IOMMU at all. Thanks, Paolo
On 10/06/2017 02:48 PM, Paolo Bonzini wrote: > On 06/10/2017 14:46, Maxime Coquelin wrote: >>>> addr = ((iotlb.translated_addr & ~iotlb.addr_mask) >>>> | (addr & iotlb.addr_mask)); >>>> - *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); >>>> + page_mask = iotlb.addr_mask; >>> >>> Should this be "page_mask &= iotlb.addr_mask"? >>> >>> If you have multiple IOMMUs on top of each other (yeah, I know...) I >>> think the smallest size should win. This is also consistent with the >>> MIN in the line below. >> >> I agree, but changin to "page_mask &= iotlb.addr_mask" will not be >> enough, we also have to change the init value. Else we will always end >> up with 0xfff. >> >> Maybe we could do as plen was handled before, i.e. setting page_mask >> init value to (hwaddr)(-1), and after the loop set it to >> ~TARGET_PAGE_MASK if it hasn't been changed. >> >> Does that sound reasonable? > > True that, in fact it makes sense for the "IOTLB entry" to represent all > of memory if there's no IOMMU at all. Indeed, that makes sense as no iommu means identity mapping. It would moreover improve performance, as the vhost backend will only have a single IOTLB entry in its cache. Maybe it is better to wait for Peter to understand the reason he limited it to the target page size? Thanks, Maxime > Thanks, > > Paolo >
On Fri, Oct 06, 2017 at 03:03:50PM +0200, Maxime Coquelin wrote: > > > On 10/06/2017 02:48 PM, Paolo Bonzini wrote: > >On 06/10/2017 14:46, Maxime Coquelin wrote: > >>>> addr = ((iotlb.translated_addr & ~iotlb.addr_mask) > >>>> | (addr & iotlb.addr_mask)); > >>>>- *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); > >>>>+ page_mask = iotlb.addr_mask; > >>> > >>>Should this be "page_mask &= iotlb.addr_mask"? > >>> > >>>If you have multiple IOMMUs on top of each other (yeah, I know...) I > >>>think the smallest size should win. This is also consistent with the > >>>MIN in the line below. > >> > >>I agree, but changin to "page_mask &= iotlb.addr_mask" will not be > >>enough, we also have to change the init value. Else we will always end > >>up with 0xfff. > >> > >>Maybe we could do as plen was handled before, i.e. setting page_mask > >>init value to (hwaddr)(-1), and after the loop set it to > >>~TARGET_PAGE_MASK if it hasn't been changed. > >> > >>Does that sound reasonable? > > > >True that, in fact it makes sense for the "IOTLB entry" to represent all > >of memory if there's no IOMMU at all. > > Indeed, that makes sense as no iommu means identity mapping. It would > moreover improve performance, as the vhost backend will only have a > single IOTLB entry in its cache. > > Maybe it is better to wait for Peter to understand the reason he limited > it to the target page size? Sorry, just came back from a long holiday. I was trying to use 4K as default to be safe (but yes the mask was not correct, thanks for fixing that!), to make sure the translated range covered by the IOMMUTLBEntry will always be safe to access (I thought that was how IOTLB was defined, but I may be wrong). Using (-1) is good especially from performance POV as long as the caller knows the real memory boundary, but I'm not sure whether it'll break the IOTLB scemantic somehow. If we want to make it -1 for transparent mappings, maybe worth commenting it in definition of IOMMUTLBEntry.page_mask? (Btw, thanks again for moving these patches forward; I tried to, but I failed :)
Hi Peter, On 10/09/2017 07:17 AM, Peter Xu wrote: > On Fri, Oct 06, 2017 at 03:03:50PM +0200, Maxime Coquelin wrote: >> >> >> On 10/06/2017 02:48 PM, Paolo Bonzini wrote: >>> On 06/10/2017 14:46, Maxime Coquelin wrote: >>>>>> addr = ((iotlb.translated_addr & ~iotlb.addr_mask) >>>>>> | (addr & iotlb.addr_mask)); >>>>>> - *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); >>>>>> + page_mask = iotlb.addr_mask; >>>>> >>>>> Should this be "page_mask &= iotlb.addr_mask"? >>>>> >>>>> If you have multiple IOMMUs on top of each other (yeah, I know...) I >>>>> think the smallest size should win. This is also consistent with the >>>>> MIN in the line below. >>>> >>>> I agree, but changin to "page_mask &= iotlb.addr_mask" will not be >>>> enough, we also have to change the init value. Else we will always end >>>> up with 0xfff. >>>> >>>> Maybe we could do as plen was handled before, i.e. setting page_mask >>>> init value to (hwaddr)(-1), and after the loop set it to >>>> ~TARGET_PAGE_MASK if it hasn't been changed. >>>> >>>> Does that sound reasonable? >>> >>> True that, in fact it makes sense for the "IOTLB entry" to represent all >>> of memory if there's no IOMMU at all. >> >> Indeed, that makes sense as no iommu means identity mapping. It would >> moreover improve performance, as the vhost backend will only have a >> single IOTLB entry in its cache. >> >> Maybe it is better to wait for Peter to understand the reason he limited >> it to the target page size? > > Sorry, just came back from a long holiday. No problem. > I was trying to use 4K as default to be safe (but yes the mask was not > correct, thanks for fixing that!), to make sure the translated range > covered by the IOMMUTLBEntry will always be safe to access (I thought > that was how IOTLB was defined, but I may be wrong). Using (-1) is > good especially from performance POV as long as the caller knows the > real memory boundary, but I'm not sure whether it'll break the IOTLB > scemantic somehow. Good point. Maybe it would be safer to wrap the IOTLB entry to the memory region? > If we want to make it -1 for transparent mappings, maybe worth > commenting it in definition of IOMMUTLBEntry.page_mask? Yes, that makes sense. > (Btw, thanks again for moving these patches forward; I tried to, but I > failed :) I'm a bit faulty not to have reviewed/tested it in the first place ;) Thanks, Maxime
On Mon, Oct 09, 2017 at 10:30:07AM +0200, Maxime Coquelin wrote: > Hi Peter, > > On 10/09/2017 07:17 AM, Peter Xu wrote: > >On Fri, Oct 06, 2017 at 03:03:50PM +0200, Maxime Coquelin wrote: > >> > >> > >>On 10/06/2017 02:48 PM, Paolo Bonzini wrote: > >>>On 06/10/2017 14:46, Maxime Coquelin wrote: > >>>>>> addr = ((iotlb.translated_addr & ~iotlb.addr_mask) > >>>>>> | (addr & iotlb.addr_mask)); > >>>>>>- *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); > >>>>>>+ page_mask = iotlb.addr_mask; > >>>>> > >>>>>Should this be "page_mask &= iotlb.addr_mask"? > >>>>> > >>>>>If you have multiple IOMMUs on top of each other (yeah, I know...) I > >>>>>think the smallest size should win. This is also consistent with the > >>>>>MIN in the line below. > >>>> > >>>>I agree, but changin to "page_mask &= iotlb.addr_mask" will not be > >>>>enough, we also have to change the init value. Else we will always end > >>>>up with 0xfff. > >>>> > >>>>Maybe we could do as plen was handled before, i.e. setting page_mask > >>>>init value to (hwaddr)(-1), and after the loop set it to > >>>>~TARGET_PAGE_MASK if it hasn't been changed. > >>>> > >>>>Does that sound reasonable? > >>> > >>>True that, in fact it makes sense for the "IOTLB entry" to represent all > >>>of memory if there's no IOMMU at all. > >> > >>Indeed, that makes sense as no iommu means identity mapping. It would > >>moreover improve performance, as the vhost backend will only have a > >>single IOTLB entry in its cache. > >> > >>Maybe it is better to wait for Peter to understand the reason he limited > >>it to the target page size? > > > >Sorry, just came back from a long holiday. > > No problem. > > >I was trying to use 4K as default to be safe (but yes the mask was not > >correct, thanks for fixing that!), to make sure the translated range > >covered by the IOMMUTLBEntry will always be safe to access (I thought > >that was how IOTLB was defined, but I may be wrong). Using (-1) is > >good especially from performance POV as long as the caller knows the > >real memory boundary, but I'm not sure whether it'll break the IOTLB > >scemantic somehow. > > Good point. > Maybe it would be safer to wrap the IOTLB entry to the memory region? The problem is that MR size may not be aligned with address masks. I see it less meaningful if we need to further man-made a smaller mask. And wait, since you mentioned about MR... I think using -1 here may be wrong. Although current MR is transparently mapped (the MR that covers the address to be translated), it does not mean the whole address space is transparently mapped. SPAPR should be a good example that some ranges of the address space are mapped by IOMMU but some are not. > > >If we want to make it -1 for transparent mappings, maybe worth > >commenting it in definition of IOMMUTLBEntry.page_mask? > > Yes, that makes sense. According to above, I would vote for your previous solution: first use -1 to get the minimum mask, then switch to PAGE_MASK before returning when needed. > > >(Btw, thanks again for moving these patches forward; I tried to, but I > > failed :) > > I'm a bit faulty not to have reviewed/tested it in the first place ;) :-) Thanks! > > Thanks, > Maxime
diff --git a/exec.c b/exec.c index 7a80460725..a5f3828445 100644 --- a/exec.c +++ b/exec.c @@ -467,11 +467,29 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x return section; } -/* Called from RCU critical section */ +/** + * flatview_do_translate - translate an address in FlatView + * + * @fv: the flat view that we want to translate on + * @addr: the address to be translated in above address space + * @xlat: the translated address offset within memory region. It + * cannot be @NULL. + * @plen_out: valid read/write length of the translated address. It + * can be @NULL when we don't care about it. + * @page_mask_out: page mask for the translated address. This + * should only be meaningful for IOMMU translated + * addresses, since there may be huge pages that this bit + * would tell. It can be @NULL if we don't care about it. + * @is_write: whether the translation operation is for write + * @is_mmio: whether this can be MMIO, set true if it can + * + * This function is called from RCU critical section + */ static MemoryRegionSection flatview_do_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, - hwaddr *plen, + hwaddr *plen_out, + hwaddr *page_mask_out, bool is_write, bool is_mmio, AddressSpace **target_as) @@ -480,11 +498,17 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv, MemoryRegionSection *section; IOMMUMemoryRegion *iommu_mr; IOMMUMemoryRegionClass *imrc; + hwaddr page_mask = ~TARGET_PAGE_MASK; + hwaddr plen = (hwaddr)(-1); + + if (plen_out) { + plen = *plen_out; + } for (;;) { section = address_space_translate_internal( flatview_to_dispatch(fv), addr, &addr, - plen, is_mmio); + &plen, is_mmio); iommu_mr = memory_region_get_iommu(section->mr); if (!iommu_mr) { @@ -496,7 +520,8 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv, IOMMU_WO : IOMMU_RO); addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | (addr & iotlb.addr_mask)); - *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); + page_mask = iotlb.addr_mask; + plen = MIN(plen, (addr | iotlb.addr_mask) - addr + 1); if (!(iotlb.perm & (1 << is_write))) { goto translate_fail; } @@ -507,6 +532,14 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv, *xlat = addr; + if (page_mask_out) { + *page_mask_out = page_mask; + } + + if (plen_out) { + *plen_out = plen; + } + return *section; translate_fail: @@ -525,7 +558,7 @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, /* This can never be MMIO. */ section = flatview_do_translate(address_space_to_flatview(as), addr, - &xlat, &plen, is_write, false, &as); + &xlat, &plen, NULL, is_write, false, &as); /* Illegal translation */ if (section.mr == &io_mem_unassigned) { @@ -569,7 +602,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, AddressSpace *as = NULL; /* This can be MMIO, so setup MMIO bit. */ - section = flatview_do_translate(fv, addr, xlat, plen, is_write, true, &as); + section = flatview_do_translate(fv, addr, xlat, plen, NULL, + is_write, true, &as); mr = section.mr; if (xen_enabled() && memory_access_is_direct(mr, is_write)) {