diff mbox series

vhost/vsock: add IOTLB API support

Message ID 20201029174351.134173-1-sgarzare@redhat.com (mailing list archive)
State New, archived
Headers show
Series vhost/vsock: add IOTLB API support | expand

Commit Message

Stefano Garzarella Oct. 29, 2020, 5:43 p.m. UTC
This patch enables the IOTLB API support for vhost-vsock devices,
allowing the userspace to emulate an IOMMU for the guest.

These changes were made following vhost-net, in details this patch:
- exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
  device if the feature is acked
- implements VHOST_GET_BACKEND_FEATURES and
  VHOST_SET_BACKEND_FEATURES ioctls
- calls vq_meta_prefetch() before vq processing to prefetch vq
  metadata address in IOTLB
- provides .read_iter, .write_iter, and .poll callbacks for the
  chardev; they are used by the userspace to exchange IOTLB messages

This patch was tested with QEMU and a patch applied [1] to fix a
simple issue:
    $ qemu -M q35,accel=kvm,kernel-irqchip=split \
           -drive file=fedora.qcow2,format=qcow2,if=virtio \
           -device intel-iommu,intremap=on \
           -device vhost-vsock-pci,guest-cid=3,iommu_platform=on

[1] https://lists.gnu.org/archive/html/qemu-devel/2020-10/msg09077.html

Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
---
 drivers/vhost/vsock.c | 68 +++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 65 insertions(+), 3 deletions(-)

Comments

Stefan Hajnoczi Oct. 30, 2020, 9:10 a.m. UTC | #1
On Thu, Oct 29, 2020 at 06:43:51PM +0100, Stefano Garzarella wrote:
> This patch enables the IOTLB API support for vhost-vsock devices,
> allowing the userspace to emulate an IOMMU for the guest.
> 
> These changes were made following vhost-net, in details this patch:
> - exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
>   device if the feature is acked
> - implements VHOST_GET_BACKEND_FEATURES and
>   VHOST_SET_BACKEND_FEATURES ioctls
> - calls vq_meta_prefetch() before vq processing to prefetch vq
>   metadata address in IOTLB
> - provides .read_iter, .write_iter, and .poll callbacks for the
>   chardev; they are used by the userspace to exchange IOTLB messages
> 
> This patch was tested with QEMU and a patch applied [1] to fix a
> simple issue:
>     $ qemu -M q35,accel=kvm,kernel-irqchip=split \
>            -drive file=fedora.qcow2,format=qcow2,if=virtio \
>            -device intel-iommu,intremap=on \
>            -device vhost-vsock-pci,guest-cid=3,iommu_platform=on
> 
> [1] https://lists.gnu.org/archive/html/qemu-devel/2020-10/msg09077.html
> 
> Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
> ---
>  drivers/vhost/vsock.c | 68 +++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 65 insertions(+), 3 deletions(-)

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Jason Wang Oct. 30, 2020, 10:02 a.m. UTC | #2
On 2020/10/30 上午1:43, Stefano Garzarella wrote:
> This patch enables the IOTLB API support for vhost-vsock devices,
> allowing the userspace to emulate an IOMMU for the guest.
>
> These changes were made following vhost-net, in details this patch:
> - exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
>    device if the feature is acked
> - implements VHOST_GET_BACKEND_FEATURES and
>    VHOST_SET_BACKEND_FEATURES ioctls
> - calls vq_meta_prefetch() before vq processing to prefetch vq
>    metadata address in IOTLB
> - provides .read_iter, .write_iter, and .poll callbacks for the
>    chardev; they are used by the userspace to exchange IOTLB messages
>
> This patch was tested with QEMU and a patch applied [1] to fix a
> simple issue:
>      $ qemu -M q35,accel=kvm,kernel-irqchip=split \
>             -drive file=fedora.qcow2,format=qcow2,if=virtio \
>             -device intel-iommu,intremap=on \
>             -device vhost-vsock-pci,guest-cid=3,iommu_platform=on


Patch looks good, but a question:

It looks to me you don't enable ATS which means vhost won't get any 
invalidation request or did I miss anything?

Thanks


>
> [1] https://lists.gnu.org/archive/html/qemu-devel/2020-10/msg09077.html
>
> Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
> ---
>   drivers/vhost/vsock.c | 68 +++++++++++++++++++++++++++++++++++++++++--
>   1 file changed, 65 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> index a483cec31d5c..5e78fb719602 100644
> --- a/drivers/vhost/vsock.c
> +++ b/drivers/vhost/vsock.c
> @@ -30,7 +30,12 @@
>   #define VHOST_VSOCK_PKT_WEIGHT 256
>   
>   enum {
> -	VHOST_VSOCK_FEATURES = VHOST_FEATURES,
> +	VHOST_VSOCK_FEATURES = VHOST_FEATURES |
> +			       (1ULL << VIRTIO_F_ACCESS_PLATFORM)
> +};
> +
> +enum {
> +	VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
>   };
>   
>   /* Used to track all the vhost_vsock instances on the system. */
> @@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
>   	if (!vhost_vq_get_backend(vq))
>   		goto out;
>   
> +	if (!vq_meta_prefetch(vq))
> +		goto out;
> +
>   	/* Avoid further vmexits, we're already processing the virtqueue */
>   	vhost_disable_notify(&vsock->dev, vq);
>   
> @@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
>   	if (!vhost_vq_get_backend(vq))
>   		goto out;
>   
> +	if (!vq_meta_prefetch(vq))
> +		goto out;
> +
>   	vhost_disable_notify(&vsock->dev, vq);
>   	do {
>   		u32 len;
> @@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
>   	mutex_lock(&vsock->dev.mutex);
>   	if ((features & (1 << VHOST_F_LOG_ALL)) &&
>   	    !vhost_log_access_ok(&vsock->dev)) {
> -		mutex_unlock(&vsock->dev.mutex);
> -		return -EFAULT;
> +		goto err;
> +	}
> +
> +	if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
> +		if (vhost_init_device_iotlb(&vsock->dev, true))
> +			goto err;
>   	}
>   
>   	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
> @@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
>   	}
>   	mutex_unlock(&vsock->dev.mutex);
>   	return 0;
> +
> +err:
> +	mutex_unlock(&vsock->dev.mutex);
> +	return -EFAULT;
>   }
>   
>   static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
> @@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
>   		if (copy_from_user(&features, argp, sizeof(features)))
>   			return -EFAULT;
>   		return vhost_vsock_set_features(vsock, features);
> +	case VHOST_GET_BACKEND_FEATURES:
> +		features = VHOST_VSOCK_BACKEND_FEATURES;
> +		if (copy_to_user(argp, &features, sizeof(features)))
> +			return -EFAULT;
> +		return 0;
> +	case VHOST_SET_BACKEND_FEATURES:
> +		if (copy_from_user(&features, argp, sizeof(features)))
> +			return -EFAULT;
> +		if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
> +			return -EOPNOTSUPP;
> +		vhost_set_backend_features(&vsock->dev, features);
> +		return 0;
>   	default:
>   		mutex_lock(&vsock->dev.mutex);
>   		r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
> @@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
>   	}
>   }
>   
> +static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
> +{
> +	struct file *file = iocb->ki_filp;
> +	struct vhost_vsock *vsock = file->private_data;
> +	struct vhost_dev *dev = &vsock->dev;
> +	int noblock = file->f_flags & O_NONBLOCK;
> +
> +	return vhost_chr_read_iter(dev, to, noblock);
> +}
> +
> +static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
> +					struct iov_iter *from)
> +{
> +	struct file *file = iocb->ki_filp;
> +	struct vhost_vsock *vsock = file->private_data;
> +	struct vhost_dev *dev = &vsock->dev;
> +
> +	return vhost_chr_write_iter(dev, from);
> +}
> +
> +static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
> +{
> +	struct vhost_vsock *vsock = file->private_data;
> +	struct vhost_dev *dev = &vsock->dev;
> +
> +	return vhost_chr_poll(file, dev, wait);
> +}
> +
>   static const struct file_operations vhost_vsock_fops = {
>   	.owner          = THIS_MODULE,
>   	.open           = vhost_vsock_dev_open,
> @@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
>   	.llseek		= noop_llseek,
>   	.unlocked_ioctl = vhost_vsock_dev_ioctl,
>   	.compat_ioctl   = compat_ptr_ioctl,
> +	.read_iter      = vhost_vsock_chr_read_iter,
> +	.write_iter     = vhost_vsock_chr_write_iter,
> +	.poll           = vhost_vsock_chr_poll,
>   };
>   
>   static struct miscdevice vhost_vsock_misc = {
Stefano Garzarella Oct. 30, 2020, 10:54 a.m. UTC | #3
On Fri, Oct 30, 2020 at 06:02:18PM +0800, Jason Wang wrote:
>
>On 2020/10/30 上午1:43, Stefano Garzarella wrote:
>>This patch enables the IOTLB API support for vhost-vsock devices,
>>allowing the userspace to emulate an IOMMU for the guest.
>>
>>These changes were made following vhost-net, in details this patch:
>>- exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
>>   device if the feature is acked
>>- implements VHOST_GET_BACKEND_FEATURES and
>>   VHOST_SET_BACKEND_FEATURES ioctls
>>- calls vq_meta_prefetch() before vq processing to prefetch vq
>>   metadata address in IOTLB
>>- provides .read_iter, .write_iter, and .poll callbacks for the
>>   chardev; they are used by the userspace to exchange IOTLB messages
>>
>>This patch was tested with QEMU and a patch applied [1] to fix a
>>simple issue:
>>     $ qemu -M q35,accel=kvm,kernel-irqchip=split \
>>            -drive file=fedora.qcow2,format=qcow2,if=virtio \
>>            -device intel-iommu,intremap=on \
>>            -device vhost-vsock-pci,guest-cid=3,iommu_platform=on
>
>
>Patch looks good, but a question:
>
>It looks to me you don't enable ATS which means vhost won't get any 
>invalidation request or did I miss anything?
>

You're right, I didn't see invalidation requests, only miss and updates.
Now I have tried to enable 'ats' and 'device-iotlb' but I still don't 
see any invalidation.

How can I test it? (Sorry but I don't have much experience yet with 
vIOMMU)

Thanks,
Stefano
Jason Wang Oct. 30, 2020, 11:44 a.m. UTC | #4
On 2020/10/30 下午6:54, Stefano Garzarella wrote:
> On Fri, Oct 30, 2020 at 06:02:18PM +0800, Jason Wang wrote:
>>
>> On 2020/10/30 上午1:43, Stefano Garzarella wrote:
>>> This patch enables the IOTLB API support for vhost-vsock devices,
>>> allowing the userspace to emulate an IOMMU for the guest.
>>>
>>> These changes were made following vhost-net, in details this patch:
>>> - exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
>>>   device if the feature is acked
>>> - implements VHOST_GET_BACKEND_FEATURES and
>>>   VHOST_SET_BACKEND_FEATURES ioctls
>>> - calls vq_meta_prefetch() before vq processing to prefetch vq
>>>   metadata address in IOTLB
>>> - provides .read_iter, .write_iter, and .poll callbacks for the
>>>   chardev; they are used by the userspace to exchange IOTLB messages
>>>
>>> This patch was tested with QEMU and a patch applied [1] to fix a
>>> simple issue:
>>>     $ qemu -M q35,accel=kvm,kernel-irqchip=split \
>>>            -drive file=fedora.qcow2,format=qcow2,if=virtio \
>>>            -device intel-iommu,intremap=on \
>>>            -device vhost-vsock-pci,guest-cid=3,iommu_platform=on
>>
>>
>> Patch looks good, but a question:
>>
>> It looks to me you don't enable ATS which means vhost won't get any 
>> invalidation request or did I miss anything?
>>
>
> You're right, I didn't see invalidation requests, only miss and updates.
> Now I have tried to enable 'ats' and 'device-iotlb' but I still don't 
> see any invalidation.
>
> How can I test it? (Sorry but I don't have much experience yet with 
> vIOMMU)


I guess it's because the batched unmap. Maybe you can try to use 
"intel_iommu=strict" in guest kernel command line to see if it works.

Btw, make sure the qemu contains the patch [1]. Otherwise ATS won't be 
enabled for recent Linux Kernel in the guest.

Thanks

[1] https://patchew.org/QEMU/20200909081731.24688-1-jasowang@redhat.com/

>
> Thanks,
> Stefano
>
Stefano Garzarella Oct. 30, 2020, 4:19 p.m. UTC | #5
On Fri, Oct 30, 2020 at 07:44:43PM +0800, Jason Wang wrote:
>
>On 2020/10/30 下午6:54, Stefano Garzarella wrote:
>>On Fri, Oct 30, 2020 at 06:02:18PM +0800, Jason Wang wrote:
>>>
>>>On 2020/10/30 上午1:43, Stefano Garzarella wrote:
>>>>This patch enables the IOTLB API support for vhost-vsock devices,
>>>>allowing the userspace to emulate an IOMMU for the guest.
>>>>
>>>>These changes were made following vhost-net, in details this patch:
>>>>- exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
>>>>  device if the feature is acked
>>>>- implements VHOST_GET_BACKEND_FEATURES and
>>>>  VHOST_SET_BACKEND_FEATURES ioctls
>>>>- calls vq_meta_prefetch() before vq processing to prefetch vq
>>>>  metadata address in IOTLB
>>>>- provides .read_iter, .write_iter, and .poll callbacks for the
>>>>  chardev; they are used by the userspace to exchange IOTLB messages
>>>>
>>>>This patch was tested with QEMU and a patch applied [1] to fix a
>>>>simple issue:
>>>>    $ qemu -M q35,accel=kvm,kernel-irqchip=split \
>>>>           -drive file=fedora.qcow2,format=qcow2,if=virtio \
>>>>           -device intel-iommu,intremap=on \
>>>>           -device vhost-vsock-pci,guest-cid=3,iommu_platform=on
>>>
>>>
>>>Patch looks good, but a question:
>>>
>>>It looks to me you don't enable ATS which means vhost won't get 
>>>any invalidation request or did I miss anything?
>>>
>>
>>You're right, I didn't see invalidation requests, only miss and updates.
>>Now I have tried to enable 'ats' and 'device-iotlb' but I still 
>>don't see any invalidation.
>>
>>How can I test it? (Sorry but I don't have much experience yet with 
>>vIOMMU)
>
>
>I guess it's because the batched unmap. Maybe you can try to use 
>"intel_iommu=strict" in guest kernel command line to see if it works.
>
>Btw, make sure the qemu contains the patch [1]. Otherwise ATS won't be 
>enabled for recent Linux Kernel in the guest.
>

I tried with "intel_iommu=strict" in the guest kernel and QEMU patch 
applied, but I didn't see any invalidation.

Maybe I did something wrong, you know it is friday, KVM Forum is ending, 
etc... ;-)

I'll investigate better next week.

Thanks for the useful info,
Stefano
Stefano Garzarella Nov. 2, 2020, 5:11 p.m. UTC | #6
On Fri, Oct 30, 2020 at 07:44:43PM +0800, Jason Wang wrote:
>
>On 2020/10/30 下午6:54, Stefano Garzarella wrote:
>>On Fri, Oct 30, 2020 at 06:02:18PM +0800, Jason Wang wrote:
>>>
>>>On 2020/10/30 上午1:43, Stefano Garzarella wrote:
>>>>This patch enables the IOTLB API support for vhost-vsock devices,
>>>>allowing the userspace to emulate an IOMMU for the guest.
>>>>
>>>>These changes were made following vhost-net, in details this patch:
>>>>- exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
>>>>  device if the feature is acked
>>>>- implements VHOST_GET_BACKEND_FEATURES and
>>>>  VHOST_SET_BACKEND_FEATURES ioctls
>>>>- calls vq_meta_prefetch() before vq processing to prefetch vq
>>>>  metadata address in IOTLB
>>>>- provides .read_iter, .write_iter, and .poll callbacks for the
>>>>  chardev; they are used by the userspace to exchange IOTLB messages
>>>>
>>>>This patch was tested with QEMU and a patch applied [1] to fix a
>>>>simple issue:
>>>>    $ qemu -M q35,accel=kvm,kernel-irqchip=split \
>>>>           -drive file=fedora.qcow2,format=qcow2,if=virtio \
>>>>           -device intel-iommu,intremap=on \
>>>>           -device vhost-vsock-pci,guest-cid=3,iommu_platform=on
>>>
>>>
>>>Patch looks good, but a question:
>>>
>>>It looks to me you don't enable ATS which means vhost won't get 
>>>any invalidation request or did I miss anything?
>>>
>>
>>You're right, I didn't see invalidation requests, only miss and updates.
>>Now I have tried to enable 'ats' and 'device-iotlb' but I still 
>>don't see any invalidation.
>>
>>How can I test it? (Sorry but I don't have much experience yet with 
>>vIOMMU)
>
>
>I guess it's because the batched unmap. Maybe you can try to use 
>"intel_iommu=strict" in guest kernel command line to see if it works.
>
>Btw, make sure the qemu contains the patch [1]. Otherwise ATS won't be 
>enabled for recent Linux Kernel in the guest.

The problem was my kernel, it was built with a tiny configuration.
Using fedora stock kernel I can see the 'invalidate' requests, but I 
also had the following issues.

Do they make you ring any bells?

$ ./qemu -m 4G -smp 4 -M q35,accel=kvm,kernel-irqchip=split \
     -drive file=fedora.qcow2,format=qcow2,if=virtio \
     -device intel-iommu,intremap=on,device-iotlb=on \
     -device vhost-vsock-pci,guest-cid=6,iommu_platform=on,ats=on,id=v1

     qemu-system-x86_64: vtd_iova_to_slpte: detected IOVA overflow 
     (iova=0x1d40000030c0)
     qemu-system-x86_64: vtd_iommu_translate: detected translation 
     failure (dev=00:03:00, iova=0x1d40000030c0)
     qemu-system-x86_64: New fault is not recorded due to compression of 
     faults

Guest kernel messages:
     [   44.940872] DMAR: DRHD: handling fault status reg 2
     [   44.941989] DMAR: [DMA Read] Request device [00:03.0] PASID 
     ffffffff fault addr ffff88W
     [   49.785884] DMAR: DRHD: handling fault status reg 2
     [   49.788874] DMAR: [DMA Read] Request device [00:03.0] PASID 
     ffffffff fault addr ffff88W


QEMU: b149dea55c Merge remote-tracking branch 
'remotes/cschoenebeck/tags/pull-9p-20201102' into staging

Linux guest: 5.8.16-200.fc32.x86_64


Thanks,
Stefano
Jason Wang Nov. 3, 2020, 9:04 a.m. UTC | #7
On 2020/11/3 上午1:11, Stefano Garzarella wrote:
> On Fri, Oct 30, 2020 at 07:44:43PM +0800, Jason Wang wrote:
>>
>> On 2020/10/30 下午6:54, Stefano Garzarella wrote:
>>> On Fri, Oct 30, 2020 at 06:02:18PM +0800, Jason Wang wrote:
>>>>
>>>> On 2020/10/30 上午1:43, Stefano Garzarella wrote:
>>>>> This patch enables the IOTLB API support for vhost-vsock devices,
>>>>> allowing the userspace to emulate an IOMMU for the guest.
>>>>>
>>>>> These changes were made following vhost-net, in details this patch:
>>>>> - exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
>>>>>   device if the feature is acked
>>>>> - implements VHOST_GET_BACKEND_FEATURES and
>>>>>   VHOST_SET_BACKEND_FEATURES ioctls
>>>>> - calls vq_meta_prefetch() before vq processing to prefetch vq
>>>>>   metadata address in IOTLB
>>>>> - provides .read_iter, .write_iter, and .poll callbacks for the
>>>>>   chardev; they are used by the userspace to exchange IOTLB messages
>>>>>
>>>>> This patch was tested with QEMU and a patch applied [1] to fix a
>>>>> simple issue:
>>>>>     $ qemu -M q35,accel=kvm,kernel-irqchip=split \
>>>>>            -drive file=fedora.qcow2,format=qcow2,if=virtio \
>>>>>            -device intel-iommu,intremap=on \
>>>>>            -device vhost-vsock-pci,guest-cid=3,iommu_platform=on
>>>>
>>>>
>>>> Patch looks good, but a question:
>>>>
>>>> It looks to me you don't enable ATS which means vhost won't get any 
>>>> invalidation request or did I miss anything?
>>>>
>>>
>>> You're right, I didn't see invalidation requests, only miss and 
>>> updates.
>>> Now I have tried to enable 'ats' and 'device-iotlb' but I still 
>>> don't see any invalidation.
>>>
>>> How can I test it? (Sorry but I don't have much experience yet with 
>>> vIOMMU)
>>
>>
>> I guess it's because the batched unmap. Maybe you can try to use 
>> "intel_iommu=strict" in guest kernel command line to see if it works.
>>
>> Btw, make sure the qemu contains the patch [1]. Otherwise ATS won't 
>> be enabled for recent Linux Kernel in the guest.
>
> The problem was my kernel, it was built with a tiny configuration.
> Using fedora stock kernel I can see the 'invalidate' requests, but I 
> also had the following issues.
>
> Do they make you ring any bells?
>
> $ ./qemu -m 4G -smp 4 -M q35,accel=kvm,kernel-irqchip=split \
>     -drive file=fedora.qcow2,format=qcow2,if=virtio \
>     -device intel-iommu,intremap=on,device-iotlb=on \
>     -device vhost-vsock-pci,guest-cid=6,iommu_platform=on,ats=on,id=v1
>
>     qemu-system-x86_64: vtd_iova_to_slpte: detected IOVA overflow     
> (iova=0x1d40000030c0)


It's a hint that IOVA exceeds the AW. It might be worth to check whether 
the missed IOVA reported from IOTLB is legal.

Thanks


> qemu-system-x86_64: vtd_iommu_translate: detected translation failure 
> (dev=00:03:00, iova=0x1d40000030c0)
>     qemu-system-x86_64: New fault is not recorded due to compression 
> of     faults
>
> Guest kernel messages:
>     [   44.940872] DMAR: DRHD: handling fault status reg 2
>     [   44.941989] DMAR: [DMA Read] Request device [00:03.0] PASID     
> ffffffff fault addr ffff88W
>     [   49.785884] DMAR: DRHD: handling fault status reg 2
>     [   49.788874] DMAR: [DMA Read] Request device [00:03.0] PASID     
> ffffffff fault addr ffff88W
>
>
> QEMU: b149dea55c Merge remote-tracking branch 
> 'remotes/cschoenebeck/tags/pull-9p-20201102' into staging
>
> Linux guest: 5.8.16-200.fc32.x86_64
>
>
> Thanks,
> Stefano
>
Peter Xu Nov. 3, 2020, 7:46 p.m. UTC | #8
On Tue, Nov 03, 2020 at 05:04:23PM +0800, Jason Wang wrote:
> 
> On 2020/11/3 上午1:11, Stefano Garzarella wrote:
> > On Fri, Oct 30, 2020 at 07:44:43PM +0800, Jason Wang wrote:
> > > 
> > > On 2020/10/30 下午6:54, Stefano Garzarella wrote:
> > > > On Fri, Oct 30, 2020 at 06:02:18PM +0800, Jason Wang wrote:
> > > > > 
> > > > > On 2020/10/30 上午1:43, Stefano Garzarella wrote:
> > > > > > This patch enables the IOTLB API support for vhost-vsock devices,
> > > > > > allowing the userspace to emulate an IOMMU for the guest.
> > > > > > 
> > > > > > These changes were made following vhost-net, in details this patch:
> > > > > > - exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
> > > > > >   device if the feature is acked
> > > > > > - implements VHOST_GET_BACKEND_FEATURES and
> > > > > >   VHOST_SET_BACKEND_FEATURES ioctls
> > > > > > - calls vq_meta_prefetch() before vq processing to prefetch vq
> > > > > >   metadata address in IOTLB
> > > > > > - provides .read_iter, .write_iter, and .poll callbacks for the
> > > > > >   chardev; they are used by the userspace to exchange IOTLB messages
> > > > > > 
> > > > > > This patch was tested with QEMU and a patch applied [1] to fix a
> > > > > > simple issue:
> > > > > >     $ qemu -M q35,accel=kvm,kernel-irqchip=split \
> > > > > >            -drive file=fedora.qcow2,format=qcow2,if=virtio \
> > > > > >            -device intel-iommu,intremap=on \
> > > > > >            -device vhost-vsock-pci,guest-cid=3,iommu_platform=on
> > > > > 
> > > > > 
> > > > > Patch looks good, but a question:
> > > > > 
> > > > > It looks to me you don't enable ATS which means vhost won't
> > > > > get any invalidation request or did I miss anything?
> > > > > 
> > > > 
> > > > You're right, I didn't see invalidation requests, only miss and
> > > > updates.
> > > > Now I have tried to enable 'ats' and 'device-iotlb' but I still
> > > > don't see any invalidation.
> > > > 
> > > > How can I test it? (Sorry but I don't have much experience yet
> > > > with vIOMMU)
> > > 
> > > 
> > > I guess it's because the batched unmap. Maybe you can try to use
> > > "intel_iommu=strict" in guest kernel command line to see if it
> > > works.
> > > 
> > > Btw, make sure the qemu contains the patch [1]. Otherwise ATS won't
> > > be enabled for recent Linux Kernel in the guest.
> > 
> > The problem was my kernel, it was built with a tiny configuration.
> > Using fedora stock kernel I can see the 'invalidate' requests, but I
> > also had the following issues.
> > 
> > Do they make you ring any bells?
> > 
> > $ ./qemu -m 4G -smp 4 -M q35,accel=kvm,kernel-irqchip=split \
> >     -drive file=fedora.qcow2,format=qcow2,if=virtio \
> >     -device intel-iommu,intremap=on,device-iotlb=on \
> >     -device vhost-vsock-pci,guest-cid=6,iommu_platform=on,ats=on,id=v1
> > 
> >     qemu-system-x86_64: vtd_iova_to_slpte: detected IOVA overflow    
> > (iova=0x1d40000030c0)
> 
> 
> It's a hint that IOVA exceeds the AW. It might be worth to check whether the
> missed IOVA reported from IOTLB is legal.

Yeah.  By default the QEMU vIOMMU should only support 39bits width for guest
iova address space.  To extend it, we can use:

  -device intel-iommu,aw-bits=48

So we'll enable 4-level iommu pgtable.

Here the iova is obvious longer than this, so it'll be interesting to know why
that iova is allocated in the guest driver since the driver should know somehow
that this iova is beyond what's supported (guest iommu driver should be able to
probe viommu capability on this width information too).
Stefano Garzarella Nov. 4, 2020, 9:33 a.m. UTC | #9
On Tue, Nov 03, 2020 at 02:46:13PM -0500, Peter Xu wrote:
>On Tue, Nov 03, 2020 at 05:04:23PM +0800, Jason Wang wrote:
>>
>> On 2020/11/3 上午1:11, Stefano Garzarella wrote:
>> > On Fri, Oct 30, 2020 at 07:44:43PM +0800, Jason Wang wrote:
>> > >
>> > > On 2020/10/30 下午6:54, Stefano Garzarella wrote:
>> > > > On Fri, Oct 30, 2020 at 06:02:18PM +0800, Jason Wang wrote:
>> > > > >
>> > > > > On 2020/10/30 上午1:43, Stefano Garzarella wrote:
>> > > > > > This patch enables the IOTLB API support for vhost-vsock devices,
>> > > > > > allowing the userspace to emulate an IOMMU for the guest.
>> > > > > >
>> > > > > > These changes were made following vhost-net, in details this patch:
>> > > > > > - exposes VIRTIO_F_ACCESS_PLATFORM feature and inits the iotlb
>> > > > > >   device if the feature is acked
>> > > > > > - implements VHOST_GET_BACKEND_FEATURES and
>> > > > > >   VHOST_SET_BACKEND_FEATURES ioctls
>> > > > > > - calls vq_meta_prefetch() before vq processing to prefetch vq
>> > > > > >   metadata address in IOTLB
>> > > > > > - provides .read_iter, .write_iter, and .poll callbacks for the
>> > > > > >   chardev; they are used by the userspace to exchange IOTLB messages
>> > > > > >
>> > > > > > This patch was tested with QEMU and a patch applied [1] to fix a
>> > > > > > simple issue:
>> > > > > >     $ qemu -M q35,accel=kvm,kernel-irqchip=split \
>> > > > > >            -drive file=fedora.qcow2,format=qcow2,if=virtio \
>> > > > > >            -device intel-iommu,intremap=on \
>> > > > > >            -device vhost-vsock-pci,guest-cid=3,iommu_platform=on
>> > > > >
>> > > > >
>> > > > > Patch looks good, but a question:
>> > > > >
>> > > > > It looks to me you don't enable ATS which means vhost won't
>> > > > > get any invalidation request or did I miss anything?
>> > > > >
>> > > >
>> > > > You're right, I didn't see invalidation requests, only miss and
>> > > > updates.
>> > > > Now I have tried to enable 'ats' and 'device-iotlb' but I still
>> > > > don't see any invalidation.
>> > > >
>> > > > How can I test it? (Sorry but I don't have much experience yet
>> > > > with vIOMMU)
>> > >
>> > >
>> > > I guess it's because the batched unmap. Maybe you can try to use
>> > > "intel_iommu=strict" in guest kernel command line to see if it
>> > > works.
>> > >
>> > > Btw, make sure the qemu contains the patch [1]. Otherwise ATS won't
>> > > be enabled for recent Linux Kernel in the guest.
>> >
>> > The problem was my kernel, it was built with a tiny configuration.
>> > Using fedora stock kernel I can see the 'invalidate' requests, but I
>> > also had the following issues.
>> >
>> > Do they make you ring any bells?
>> >
>> > $ ./qemu -m 4G -smp 4 -M q35,accel=kvm,kernel-irqchip=split \
>> >     -drive file=fedora.qcow2,format=qcow2,if=virtio \
>> >     -device intel-iommu,intremap=on,device-iotlb=on \
>> >     -device vhost-vsock-pci,guest-cid=6,iommu_platform=on,ats=on,id=v1
>> >
>> >     qemu-system-x86_64: vtd_iova_to_slpte: detected IOVA overflow    
>> > (iova=0x1d40000030c0)
>>
>>
>> It's a hint that IOVA exceeds the AW. It might be worth to check whether the
>> missed IOVA reported from IOTLB is legal.
>
>Yeah.  By default the QEMU vIOMMU should only support 39bits width for guest
>iova address space.  To extend it, we can use:
>
>  -device intel-iommu,aw-bits=48
>
>So we'll enable 4-level iommu pgtable.
>
>Here the iova is obvious longer than this, so it'll be interesting to know why
>that iova is allocated in the guest driver since the driver should know somehow
>that this iova is beyond what's supported (guest iommu driver should be able to
>probe viommu capability on this width information too).
>

Peter, Jason, thanks for the hints!

I'll try to understand what is going on in the guest driver.

Stefano
diff mbox series

Patch

diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a483cec31d5c..5e78fb719602 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -30,7 +30,12 @@ 
 #define VHOST_VSOCK_PKT_WEIGHT 256
 
 enum {
-	VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+	VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+			       (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+	VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
 };
 
 /* Used to track all the vhost_vsock instances on the system. */
@@ -94,6 +99,9 @@  vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
 	if (!vhost_vq_get_backend(vq))
 		goto out;
 
+	if (!vq_meta_prefetch(vq))
+		goto out;
+
 	/* Avoid further vmexits, we're already processing the virtqueue */
 	vhost_disable_notify(&vsock->dev, vq);
 
@@ -449,6 +457,9 @@  static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
 	if (!vhost_vq_get_backend(vq))
 		goto out;
 
+	if (!vq_meta_prefetch(vq))
+		goto out;
+
 	vhost_disable_notify(&vsock->dev, vq);
 	do {
 		u32 len;
@@ -766,8 +777,12 @@  static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
 	mutex_lock(&vsock->dev.mutex);
 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
 	    !vhost_log_access_ok(&vsock->dev)) {
-		mutex_unlock(&vsock->dev.mutex);
-		return -EFAULT;
+		goto err;
+	}
+
+	if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+		if (vhost_init_device_iotlb(&vsock->dev, true))
+			goto err;
 	}
 
 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@@ -778,6 +793,10 @@  static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
 	}
 	mutex_unlock(&vsock->dev.mutex);
 	return 0;
+
+err:
+	mutex_unlock(&vsock->dev.mutex);
+	return -EFAULT;
 }
 
 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@@ -811,6 +830,18 @@  static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
 		if (copy_from_user(&features, argp, sizeof(features)))
 			return -EFAULT;
 		return vhost_vsock_set_features(vsock, features);
+	case VHOST_GET_BACKEND_FEATURES:
+		features = VHOST_VSOCK_BACKEND_FEATURES;
+		if (copy_to_user(argp, &features, sizeof(features)))
+			return -EFAULT;
+		return 0;
+	case VHOST_SET_BACKEND_FEATURES:
+		if (copy_from_user(&features, argp, sizeof(features)))
+			return -EFAULT;
+		if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+			return -EOPNOTSUPP;
+		vhost_set_backend_features(&vsock->dev, features);
+		return 0;
 	default:
 		mutex_lock(&vsock->dev.mutex);
 		r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@@ -823,6 +854,34 @@  static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
 	}
 }
 
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+	struct file *file = iocb->ki_filp;
+	struct vhost_vsock *vsock = file->private_data;
+	struct vhost_dev *dev = &vsock->dev;
+	int noblock = file->f_flags & O_NONBLOCK;
+
+	return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+					struct iov_iter *from)
+{
+	struct file *file = iocb->ki_filp;
+	struct vhost_vsock *vsock = file->private_data;
+	struct vhost_dev *dev = &vsock->dev;
+
+	return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+	struct vhost_vsock *vsock = file->private_data;
+	struct vhost_dev *dev = &vsock->dev;
+
+	return vhost_chr_poll(file, dev, wait);
+}
+
 static const struct file_operations vhost_vsock_fops = {
 	.owner          = THIS_MODULE,
 	.open           = vhost_vsock_dev_open,
@@ -830,6 +889,9 @@  static const struct file_operations vhost_vsock_fops = {
 	.llseek		= noop_llseek,
 	.unlocked_ioctl = vhost_vsock_dev_ioctl,
 	.compat_ioctl   = compat_ptr_ioctl,
+	.read_iter      = vhost_vsock_chr_read_iter,
+	.write_iter     = vhost_vsock_chr_write_iter,
+	.poll           = vhost_vsock_chr_poll,
 };
 
 static struct miscdevice vhost_vsock_misc = {