diff mbox series

[v5,07/15] drm: Add a prefetching memcpy_from_wc

Message ID 20210527144710.1975553-8-thomas.hellstrom@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Move LMEM (VRAM) management over to TTM | expand

Commit Message

Thomas Hellstrom May 27, 2021, 2:47 p.m. UTC
Reading out of write-combining mapped memory is typically very slow
since the CPU doesn't prefetch. However some archs have special
instructions to do this.

So add a best-effort memcpy_from_wc taking dma-buf-map pointer
arguments that attempts to use a fast prefetching memcpy and
otherwise falls back to ordinary memcopies, taking the iomem tagging
into account.

The code is largely copied from i915_memcpy_from_wc.

Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Christian König <christian.koenig@amd.com>
Suggested-by: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
 Documentation/gpu/drm-mm.rst |   2 +-
 drivers/gpu/drm/drm_cache.c  | 138 +++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/drm_drv.c    |   2 +
 include/drm/drm_cache.h      |   7 ++
 4 files changed, 148 insertions(+), 1 deletion(-)

Comments

Christian König May 28, 2021, 2:19 p.m. UTC | #1
Am 27.05.21 um 16:47 schrieb Thomas Hellström:
> Reading out of write-combining mapped memory is typically very slow
> since the CPU doesn't prefetch. However some archs have special
> instructions to do this.
>
> So add a best-effort memcpy_from_wc taking dma-buf-map pointer
> arguments that attempts to use a fast prefetching memcpy and
> otherwise falls back to ordinary memcopies, taking the iomem tagging
> into account.
>
> The code is largely copied from i915_memcpy_from_wc.
>
> Cc: Daniel Vetter <daniel@ffwll.ch>
> Cc: Christian König <christian.koenig@amd.com>
> Suggested-by: Daniel Vetter <daniel@ffwll.ch>
> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> ---
>   Documentation/gpu/drm-mm.rst |   2 +-
>   drivers/gpu/drm/drm_cache.c  | 138 +++++++++++++++++++++++++++++++++++
>   drivers/gpu/drm/drm_drv.c    |   2 +
>   include/drm/drm_cache.h      |   7 ++
>   4 files changed, 148 insertions(+), 1 deletion(-)
>
> diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
> index 21be6deadc12..c66058c5bce7 100644
> --- a/Documentation/gpu/drm-mm.rst
> +++ b/Documentation/gpu/drm-mm.rst
> @@ -469,7 +469,7 @@ DRM MM Range Allocator Function References
>   .. kernel-doc:: drivers/gpu/drm/drm_mm.c
>      :export:
>   
> -DRM Cache Handling
> +DRM Cache Handling and Fast WC memcpy()
>   ==================
>   
>   .. kernel-doc:: drivers/gpu/drm/drm_cache.c
> diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
> index 79a50ef1250f..08614f7fdd8d 100644
> --- a/drivers/gpu/drm/drm_cache.c
> +++ b/drivers/gpu/drm/drm_cache.c
> @@ -28,6 +28,7 @@
>    * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
>    */
>   
> +#include <linux/dma-buf-map.h>
>   #include <linux/export.h>
>   #include <linux/highmem.h>
>   #include <linux/mem_encrypt.h>
> @@ -35,6 +36,9 @@
>   
>   #include <drm/drm_cache.h>
>   
> +/* A small bounce buffer that fits on the stack. */
> +#define MEMCPY_BOUNCE_SIZE 128
> +
>   #if defined(CONFIG_X86)
>   #include <asm/smp.h>
>   
> @@ -209,3 +213,137 @@ bool drm_need_swiotlb(int dma_bits)
>   	return max_iomem > ((u64)1 << dma_bits);
>   }
>   EXPORT_SYMBOL(drm_need_swiotlb);
> +
> +#ifdef CONFIG_X86
> +
> +static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
> +
> +static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
> +{
> +	kernel_fpu_begin();
> +
> +	while (len >= 4) {
> +		asm("movntdqa	(%0), %%xmm0\n"
> +		    "movntdqa 16(%0), %%xmm1\n"
> +		    "movntdqa 32(%0), %%xmm2\n"
> +		    "movntdqa 48(%0), %%xmm3\n"
> +		    "movaps %%xmm0,   (%1)\n"
> +		    "movaps %%xmm1, 16(%1)\n"
> +		    "movaps %%xmm2, 32(%1)\n"
> +		    "movaps %%xmm3, 48(%1)\n"
> +		    :: "r" (src), "r" (dst) : "memory");
> +		src += 64;
> +		dst += 64;
> +		len -= 4;
> +	}
> +	while (len--) {
> +		asm("movntdqa (%0), %%xmm0\n"
> +		    "movaps %%xmm0, (%1)\n"
> +		    :: "r" (src), "r" (dst) : "memory");
> +		src += 16;
> +		dst += 16;
> +	}
> +
> +	kernel_fpu_end();
> +}
> +
> +/*
> + * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
> + * non-temporal instructions where available. Note that all arguments
> + * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
> + * of 16.
> + */
> +static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len)
> +{
> +	if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
> +		memcpy(dst, src, len);
> +	else if (likely(len))
> +		__memcpy_ntdqa(dst, src, len >> 4);
> +}
> +#endif
> +
> +static void memcpy_fallback(struct dma_buf_map *dst,
> +			    const struct dma_buf_map *src,
> +			    unsigned long len)
> +{
> +	if (!dst->is_iomem && !src->is_iomem) {
> +		memcpy(dst->vaddr, src->vaddr, len);
> +	} else if (!src->is_iomem) {
> +		dma_buf_map_memcpy_to(dst, src->vaddr, len);
> +	} else if (!dst->is_iomem) {
> +		memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
> +	} else {
> +		/*
> +		 * Bounce size is not performance tuned, but using a
> +		 * bounce buffer like this is significantly faster than
> +		 * resorting to ioreadxx() + iowritexx().
> +		 */
> +		char bounce[MEMCPY_BOUNCE_SIZE];
> +		void __iomem *_src = src->vaddr_iomem;
> +		void __iomem *_dst = dst->vaddr_iomem;
> +
> +		while (len >= MEMCPY_BOUNCE_SIZE) {
> +			memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
> +			memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
> +			_src += MEMCPY_BOUNCE_SIZE;
> +			_dst += MEMCPY_BOUNCE_SIZE;
> +			len -= MEMCPY_BOUNCE_SIZE;
> +		}
> +		if (len) {
> +			memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
> +			memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
> +		}
> +	}
> +}
> +
> +/**
> + * drm_memcpy_from_wc - Perform the fastest available memcpy from a source
> + * that may be WC.
> + * @dst: The destination pointer
> + * @src: The source pointer
> + * @len: The size of the area o transfer in bytes
> + *
> + * Tries an arch optimized memcpy for prefetching reading out of a WC region,
> + * and if no such beast is available, falls back to a normal memcpy.
> + */
> +void drm_memcpy_from_wc(struct dma_buf_map *dst,
> +			const struct dma_buf_map *src,
> +			unsigned long len)
> +{
> +	if (WARN_ON(in_interrupt()))
> +		return;

I would either make it a BUG_ON() or at least use the fallback memcpy.

Just returning without doing anything isn't really nice.

Christian.

> +
> +	if (IS_ENABLED(CONFIG_X86) && static_branch_likely(&has_movntdqa)) {
> +		__drm_memcpy_from_wc(dst->is_iomem ?
> +				     (void __force *)dst->vaddr_iomem :
> +				     dst->vaddr,
> +				     src->is_iomem ?
> +				     (void const __force *)src->vaddr_iomem :
> +				     src->vaddr,
> +				     len);
> +		return;
> +	}
> +
> +	memcpy_fallback(dst, src, len);
> +}
> +EXPORT_SYMBOL(drm_memcpy_from_wc);
> +
> +#ifdef CONFIG_X86
> +/**
> + * drm_memcpy_init_early - One time initialization of the WC memcpy code
> + */
> +void drm_memcpy_init_early(void)
> +{
> +	/*
> +	 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
> +	 * emulation. So don't enable movntdqa in hypervisor guest.
> +	 */
> +	if (static_cpu_has(X86_FEATURE_XMM4_1) &&
> +	    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
> +		static_branch_enable(&has_movntdqa);
> +}
> +#else
> +void drm_memcpy_init_early(void)
> +{
> +}
> +#endif
> diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
> index 3d8d68a98b95..8804ec7d3215 100644
> --- a/drivers/gpu/drm/drm_drv.c
> +++ b/drivers/gpu/drm/drm_drv.c
> @@ -35,6 +35,7 @@
>   #include <linux/slab.h>
>   #include <linux/srcu.h>
>   
> +#include <drm/drm_cache.h>
>   #include <drm/drm_client.h>
>   #include <drm/drm_color_mgmt.h>
>   #include <drm/drm_drv.h>
> @@ -1041,6 +1042,7 @@ static int __init drm_core_init(void)
>   
>   	drm_connector_ida_init();
>   	idr_init(&drm_minors_idr);
> +	drm_memcpy_init_early();
>   
>   	ret = drm_sysfs_init();
>   	if (ret < 0) {
> diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
> index e9ad4863d915..cc9de1632dd3 100644
> --- a/include/drm/drm_cache.h
> +++ b/include/drm/drm_cache.h
> @@ -35,6 +35,8 @@
>   
>   #include <linux/scatterlist.h>
>   
> +struct dma_buf_map;
> +
>   void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
>   void drm_clflush_sg(struct sg_table *st);
>   void drm_clflush_virt_range(void *addr, unsigned long length);
> @@ -70,4 +72,9 @@ static inline bool drm_arch_can_wc_memory(void)
>   #endif
>   }
>   
> +void drm_memcpy_init_early(void);
> +
> +void drm_memcpy_from_wc(struct dma_buf_map *dst,
> +			const struct dma_buf_map *src,
> +			unsigned long len);
>   #endif
Thomas Hellstrom May 28, 2021, 3:10 p.m. UTC | #2
On 5/28/21 4:19 PM, Christian König wrote:
> Am 27.05.21 um 16:47 schrieb Thomas Hellström:
>> Reading out of write-combining mapped memory is typically very slow
>> since the CPU doesn't prefetch. However some archs have special
>> instructions to do this.
>>
>> So add a best-effort memcpy_from_wc taking dma-buf-map pointer
>> arguments that attempts to use a fast prefetching memcpy and
>> otherwise falls back to ordinary memcopies, taking the iomem tagging
>> into account.
>>
>> The code is largely copied from i915_memcpy_from_wc.
>>
>> Cc: Daniel Vetter <daniel@ffwll.ch>
>> Cc: Christian König <christian.koenig@amd.com>
>> Suggested-by: Daniel Vetter <daniel@ffwll.ch>
>> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> ---
>>   Documentation/gpu/drm-mm.rst |   2 +-
>>   drivers/gpu/drm/drm_cache.c  | 138 +++++++++++++++++++++++++++++++++++
>>   drivers/gpu/drm/drm_drv.c    |   2 +
>>   include/drm/drm_cache.h      |   7 ++
>>   4 files changed, 148 insertions(+), 1 deletion(-)
>>
>> diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
>> index 21be6deadc12..c66058c5bce7 100644
>> --- a/Documentation/gpu/drm-mm.rst
>> +++ b/Documentation/gpu/drm-mm.rst
>> @@ -469,7 +469,7 @@ DRM MM Range Allocator Function References
>>   .. kernel-doc:: drivers/gpu/drm/drm_mm.c
>>      :export:
>>   -DRM Cache Handling
>> +DRM Cache Handling and Fast WC memcpy()
>>   ==================
>>     .. kernel-doc:: drivers/gpu/drm/drm_cache.c
>> diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
>> index 79a50ef1250f..08614f7fdd8d 100644
>> --- a/drivers/gpu/drm/drm_cache.c
>> +++ b/drivers/gpu/drm/drm_cache.c
>> @@ -28,6 +28,7 @@
>>    * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
>>    */
>>   +#include <linux/dma-buf-map.h>
>>   #include <linux/export.h>
>>   #include <linux/highmem.h>
>>   #include <linux/mem_encrypt.h>
>> @@ -35,6 +36,9 @@
>>     #include <drm/drm_cache.h>
>>   +/* A small bounce buffer that fits on the stack. */
>> +#define MEMCPY_BOUNCE_SIZE 128
>> +
>>   #if defined(CONFIG_X86)
>>   #include <asm/smp.h>
>>   @@ -209,3 +213,137 @@ bool drm_need_swiotlb(int dma_bits)
>>       return max_iomem > ((u64)1 << dma_bits);
>>   }
>>   EXPORT_SYMBOL(drm_need_swiotlb);
>> +
>> +#ifdef CONFIG_X86
>> +
>> +static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
>> +
>> +static void __memcpy_ntdqa(void *dst, const void *src, unsigned long 
>> len)
>> +{
>> +    kernel_fpu_begin();
>> +
>> +    while (len >= 4) {
>> +        asm("movntdqa    (%0), %%xmm0\n"
>> +            "movntdqa 16(%0), %%xmm1\n"
>> +            "movntdqa 32(%0), %%xmm2\n"
>> +            "movntdqa 48(%0), %%xmm3\n"
>> +            "movaps %%xmm0,   (%1)\n"
>> +            "movaps %%xmm1, 16(%1)\n"
>> +            "movaps %%xmm2, 32(%1)\n"
>> +            "movaps %%xmm3, 48(%1)\n"
>> +            :: "r" (src), "r" (dst) : "memory");
>> +        src += 64;
>> +        dst += 64;
>> +        len -= 4;
>> +    }
>> +    while (len--) {
>> +        asm("movntdqa (%0), %%xmm0\n"
>> +            "movaps %%xmm0, (%1)\n"
>> +            :: "r" (src), "r" (dst) : "memory");
>> +        src += 16;
>> +        dst += 16;
>> +    }
>> +
>> +    kernel_fpu_end();
>> +}
>> +
>> +/*
>> + * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
>> + * non-temporal instructions where available. Note that all arguments
>> + * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
>> + * of 16.
>> + */
>> +static void __drm_memcpy_from_wc(void *dst, const void *src, 
>> unsigned long len)
>> +{
>> +    if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
>> +        memcpy(dst, src, len);
>> +    else if (likely(len))
>> +        __memcpy_ntdqa(dst, src, len >> 4);
>> +}
>> +#endif
>> +
>> +static void memcpy_fallback(struct dma_buf_map *dst,
>> +                const struct dma_buf_map *src,
>> +                unsigned long len)
>> +{
>> +    if (!dst->is_iomem && !src->is_iomem) {
>> +        memcpy(dst->vaddr, src->vaddr, len);
>> +    } else if (!src->is_iomem) {
>> +        dma_buf_map_memcpy_to(dst, src->vaddr, len);
>> +    } else if (!dst->is_iomem) {
>> +        memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
>> +    } else {
>> +        /*
>> +         * Bounce size is not performance tuned, but using a
>> +         * bounce buffer like this is significantly faster than
>> +         * resorting to ioreadxx() + iowritexx().
>> +         */
>> +        char bounce[MEMCPY_BOUNCE_SIZE];
>> +        void __iomem *_src = src->vaddr_iomem;
>> +        void __iomem *_dst = dst->vaddr_iomem;
>> +
>> +        while (len >= MEMCPY_BOUNCE_SIZE) {
>> +            memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
>> +            memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
>> +            _src += MEMCPY_BOUNCE_SIZE;
>> +            _dst += MEMCPY_BOUNCE_SIZE;
>> +            len -= MEMCPY_BOUNCE_SIZE;
>> +        }
>> +        if (len) {
>> +            memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
>> +            memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
>> +        }
>> +    }
>> +}
>> +
>> +/**
>> + * drm_memcpy_from_wc - Perform the fastest available memcpy from a 
>> source
>> + * that may be WC.
>> + * @dst: The destination pointer
>> + * @src: The source pointer
>> + * @len: The size of the area o transfer in bytes
>> + *
>> + * Tries an arch optimized memcpy for prefetching reading out of a 
>> WC region,
>> + * and if no such beast is available, falls back to a normal memcpy.
>> + */
>> +void drm_memcpy_from_wc(struct dma_buf_map *dst,
>> +            const struct dma_buf_map *src,
>> +            unsigned long len)
>> +{
>> +    if (WARN_ON(in_interrupt()))
>> +        return;
>
> I would either make it a BUG_ON() or at least use the fallback memcpy.
>
> Just returning without doing anything isn't really nice.

Hmm, Yes, Daniel suggested this on IRC. I would have gone for the 
fallback which he didn't like, and I think crashing the kernel with a 
BUG_ON in an interrupt which from experience might result in a 
completely silent hang without a trace of what went wrong unless 
possibly with a serial console is not really acceptable either.... 
Perhaps we can go for a WARN_ON + fallback, which still forces the 
caller to come up with something else...

/Thomas

>
> Christian.
>
>> +
>> +    if (IS_ENABLED(CONFIG_X86) && 
>> static_branch_likely(&has_movntdqa)) {
>> +        __drm_memcpy_from_wc(dst->is_iomem ?
>> +                     (void __force *)dst->vaddr_iomem :
>> +                     dst->vaddr,
>> +                     src->is_iomem ?
>> +                     (void const __force *)src->vaddr_iomem :
>> +                     src->vaddr,
>> +                     len);
>> +        return;
>> +    }
>> +
>> +    memcpy_fallback(dst, src, len);
>> +}
>> +EXPORT_SYMBOL(drm_memcpy_from_wc);
>> +
>> +#ifdef CONFIG_X86
>> +/**
>> + * drm_memcpy_init_early - One time initialization of the WC memcpy 
>> code
>> + */
>> +void drm_memcpy_init_early(void)
>> +{
>> +    /*
>> +     * Some hypervisors (e.g. KVM) don't support VEX-prefix 
>> instructions
>> +     * emulation. So don't enable movntdqa in hypervisor guest.
>> +     */
>> +    if (static_cpu_has(X86_FEATURE_XMM4_1) &&
>> +        !boot_cpu_has(X86_FEATURE_HYPERVISOR))
>> +        static_branch_enable(&has_movntdqa);
>> +}
>> +#else
>> +void drm_memcpy_init_early(void)
>> +{
>> +}
>> +#endif
>> diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
>> index 3d8d68a98b95..8804ec7d3215 100644
>> --- a/drivers/gpu/drm/drm_drv.c
>> +++ b/drivers/gpu/drm/drm_drv.c
>> @@ -35,6 +35,7 @@
>>   #include <linux/slab.h>
>>   #include <linux/srcu.h>
>>   +#include <drm/drm_cache.h>
>>   #include <drm/drm_client.h>
>>   #include <drm/drm_color_mgmt.h>
>>   #include <drm/drm_drv.h>
>> @@ -1041,6 +1042,7 @@ static int __init drm_core_init(void)
>>         drm_connector_ida_init();
>>       idr_init(&drm_minors_idr);
>> +    drm_memcpy_init_early();
>>         ret = drm_sysfs_init();
>>       if (ret < 0) {
>> diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
>> index e9ad4863d915..cc9de1632dd3 100644
>> --- a/include/drm/drm_cache.h
>> +++ b/include/drm/drm_cache.h
>> @@ -35,6 +35,8 @@
>>     #include <linux/scatterlist.h>
>>   +struct dma_buf_map;
>> +
>>   void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
>>   void drm_clflush_sg(struct sg_table *st);
>>   void drm_clflush_virt_range(void *addr, unsigned long length);
>> @@ -70,4 +72,9 @@ static inline bool drm_arch_can_wc_memory(void)
>>   #endif
>>   }
>>   +void drm_memcpy_init_early(void);
>> +
>> +void drm_memcpy_from_wc(struct dma_buf_map *dst,
>> +            const struct dma_buf_map *src,
>> +            unsigned long len);
>>   #endif
>
Christian König May 28, 2021, 3:25 p.m. UTC | #3
Am 28.05.21 um 17:10 schrieb Thomas Hellström:
>
> On 5/28/21 4:19 PM, Christian König wrote:
>> Am 27.05.21 um 16:47 schrieb Thomas Hellström:
>>> Reading out of write-combining mapped memory is typically very slow
>>> since the CPU doesn't prefetch. However some archs have special
>>> instructions to do this.
>>>
>>> So add a best-effort memcpy_from_wc taking dma-buf-map pointer
>>> arguments that attempts to use a fast prefetching memcpy and
>>> otherwise falls back to ordinary memcopies, taking the iomem tagging
>>> into account.
>>>
>>> The code is largely copied from i915_memcpy_from_wc.
>>>
>>> Cc: Daniel Vetter <daniel@ffwll.ch>
>>> Cc: Christian König <christian.koenig@amd.com>
>>> Suggested-by: Daniel Vetter <daniel@ffwll.ch>
>>> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> ---
>>>   Documentation/gpu/drm-mm.rst |   2 +-
>>>   drivers/gpu/drm/drm_cache.c  | 138 
>>> +++++++++++++++++++++++++++++++++++
>>>   drivers/gpu/drm/drm_drv.c    |   2 +
>>>   include/drm/drm_cache.h      |   7 ++
>>>   4 files changed, 148 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/Documentation/gpu/drm-mm.rst 
>>> b/Documentation/gpu/drm-mm.rst
>>> index 21be6deadc12..c66058c5bce7 100644
>>> --- a/Documentation/gpu/drm-mm.rst
>>> +++ b/Documentation/gpu/drm-mm.rst
>>> @@ -469,7 +469,7 @@ DRM MM Range Allocator Function References
>>>   .. kernel-doc:: drivers/gpu/drm/drm_mm.c
>>>      :export:
>>>   -DRM Cache Handling
>>> +DRM Cache Handling and Fast WC memcpy()
>>>   ==================
>>>     .. kernel-doc:: drivers/gpu/drm/drm_cache.c
>>> diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
>>> index 79a50ef1250f..08614f7fdd8d 100644
>>> --- a/drivers/gpu/drm/drm_cache.c
>>> +++ b/drivers/gpu/drm/drm_cache.c
>>> @@ -28,6 +28,7 @@
>>>    * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
>>>    */
>>>   +#include <linux/dma-buf-map.h>
>>>   #include <linux/export.h>
>>>   #include <linux/highmem.h>
>>>   #include <linux/mem_encrypt.h>
>>> @@ -35,6 +36,9 @@
>>>     #include <drm/drm_cache.h>
>>>   +/* A small bounce buffer that fits on the stack. */
>>> +#define MEMCPY_BOUNCE_SIZE 128
>>> +
>>>   #if defined(CONFIG_X86)
>>>   #include <asm/smp.h>
>>>   @@ -209,3 +213,137 @@ bool drm_need_swiotlb(int dma_bits)
>>>       return max_iomem > ((u64)1 << dma_bits);
>>>   }
>>>   EXPORT_SYMBOL(drm_need_swiotlb);
>>> +
>>> +#ifdef CONFIG_X86
>>> +
>>> +static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
>>> +
>>> +static void __memcpy_ntdqa(void *dst, const void *src, unsigned 
>>> long len)
>>> +{
>>> +    kernel_fpu_begin();
>>> +
>>> +    while (len >= 4) {
>>> +        asm("movntdqa    (%0), %%xmm0\n"
>>> +            "movntdqa 16(%0), %%xmm1\n"
>>> +            "movntdqa 32(%0), %%xmm2\n"
>>> +            "movntdqa 48(%0), %%xmm3\n"
>>> +            "movaps %%xmm0,   (%1)\n"
>>> +            "movaps %%xmm1, 16(%1)\n"
>>> +            "movaps %%xmm2, 32(%1)\n"
>>> +            "movaps %%xmm3, 48(%1)\n"
>>> +            :: "r" (src), "r" (dst) : "memory");
>>> +        src += 64;
>>> +        dst += 64;
>>> +        len -= 4;
>>> +    }
>>> +    while (len--) {
>>> +        asm("movntdqa (%0), %%xmm0\n"
>>> +            "movaps %%xmm0, (%1)\n"
>>> +            :: "r" (src), "r" (dst) : "memory");
>>> +        src += 16;
>>> +        dst += 16;
>>> +    }
>>> +
>>> +    kernel_fpu_end();
>>> +}
>>> +
>>> +/*
>>> + * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
>>> + * non-temporal instructions where available. Note that all arguments
>>> + * (@src, @dst) must be aligned to 16 bytes and @len must be a 
>>> multiple
>>> + * of 16.
>>> + */
>>> +static void __drm_memcpy_from_wc(void *dst, const void *src, 
>>> unsigned long len)
>>> +{
>>> +    if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 
>>> 15))
>>> +        memcpy(dst, src, len);
>>> +    else if (likely(len))
>>> +        __memcpy_ntdqa(dst, src, len >> 4);
>>> +}
>>> +#endif
>>> +
>>> +static void memcpy_fallback(struct dma_buf_map *dst,
>>> +                const struct dma_buf_map *src,
>>> +                unsigned long len)
>>> +{
>>> +    if (!dst->is_iomem && !src->is_iomem) {
>>> +        memcpy(dst->vaddr, src->vaddr, len);
>>> +    } else if (!src->is_iomem) {
>>> +        dma_buf_map_memcpy_to(dst, src->vaddr, len);
>>> +    } else if (!dst->is_iomem) {
>>> +        memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
>>> +    } else {
>>> +        /*
>>> +         * Bounce size is not performance tuned, but using a
>>> +         * bounce buffer like this is significantly faster than
>>> +         * resorting to ioreadxx() + iowritexx().
>>> +         */
>>> +        char bounce[MEMCPY_BOUNCE_SIZE];
>>> +        void __iomem *_src = src->vaddr_iomem;
>>> +        void __iomem *_dst = dst->vaddr_iomem;
>>> +
>>> +        while (len >= MEMCPY_BOUNCE_SIZE) {
>>> +            memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
>>> +            memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
>>> +            _src += MEMCPY_BOUNCE_SIZE;
>>> +            _dst += MEMCPY_BOUNCE_SIZE;
>>> +            len -= MEMCPY_BOUNCE_SIZE;
>>> +        }
>>> +        if (len) {
>>> +            memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
>>> +            memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
>>> +        }
>>> +    }
>>> +}
>>> +
>>> +/**
>>> + * drm_memcpy_from_wc - Perform the fastest available memcpy from a 
>>> source
>>> + * that may be WC.
>>> + * @dst: The destination pointer
>>> + * @src: The source pointer
>>> + * @len: The size of the area o transfer in bytes
>>> + *
>>> + * Tries an arch optimized memcpy for prefetching reading out of a 
>>> WC region,
>>> + * and if no such beast is available, falls back to a normal memcpy.
>>> + */
>>> +void drm_memcpy_from_wc(struct dma_buf_map *dst,
>>> +            const struct dma_buf_map *src,
>>> +            unsigned long len)
>>> +{
>>> +    if (WARN_ON(in_interrupt()))
>>> +        return;
>>
>> I would either make it a BUG_ON() or at least use the fallback memcpy.
>>
>> Just returning without doing anything isn't really nice.
>
> Hmm, Yes, Daniel suggested this on IRC. I would have gone for the 
> fallback which he didn't like, and I think crashing the kernel with a 
> BUG_ON in an interrupt which from experience might result in a 
> completely silent hang without a trace of what went wrong unless 
> possibly with a serial console is not really acceptable either.... 
> Perhaps we can go for a WARN_ON + fallback, which still forces the 
> caller to come up with something else...

Yeah, good argument. BUG_ON in an interrupt handler is nasty as well. 
WARN_ON+fallback sounds like the right thing to do.

Christian.

>
> /Thomas
>
>>
>> Christian.
>>
>>> +
>>> +    if (IS_ENABLED(CONFIG_X86) && 
>>> static_branch_likely(&has_movntdqa)) {
>>> +        __drm_memcpy_from_wc(dst->is_iomem ?
>>> +                     (void __force *)dst->vaddr_iomem :
>>> +                     dst->vaddr,
>>> +                     src->is_iomem ?
>>> +                     (void const __force *)src->vaddr_iomem :
>>> +                     src->vaddr,
>>> +                     len);
>>> +        return;
>>> +    }
>>> +
>>> +    memcpy_fallback(dst, src, len);
>>> +}
>>> +EXPORT_SYMBOL(drm_memcpy_from_wc);
>>> +
>>> +#ifdef CONFIG_X86
>>> +/**
>>> + * drm_memcpy_init_early - One time initialization of the WC memcpy 
>>> code
>>> + */
>>> +void drm_memcpy_init_early(void)
>>> +{
>>> +    /*
>>> +     * Some hypervisors (e.g. KVM) don't support VEX-prefix 
>>> instructions
>>> +     * emulation. So don't enable movntdqa in hypervisor guest.
>>> +     */
>>> +    if (static_cpu_has(X86_FEATURE_XMM4_1) &&
>>> +        !boot_cpu_has(X86_FEATURE_HYPERVISOR))
>>> +        static_branch_enable(&has_movntdqa);
>>> +}
>>> +#else
>>> +void drm_memcpy_init_early(void)
>>> +{
>>> +}
>>> +#endif
>>> diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
>>> index 3d8d68a98b95..8804ec7d3215 100644
>>> --- a/drivers/gpu/drm/drm_drv.c
>>> +++ b/drivers/gpu/drm/drm_drv.c
>>> @@ -35,6 +35,7 @@
>>>   #include <linux/slab.h>
>>>   #include <linux/srcu.h>
>>>   +#include <drm/drm_cache.h>
>>>   #include <drm/drm_client.h>
>>>   #include <drm/drm_color_mgmt.h>
>>>   #include <drm/drm_drv.h>
>>> @@ -1041,6 +1042,7 @@ static int __init drm_core_init(void)
>>>         drm_connector_ida_init();
>>>       idr_init(&drm_minors_idr);
>>> +    drm_memcpy_init_early();
>>>         ret = drm_sysfs_init();
>>>       if (ret < 0) {
>>> diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
>>> index e9ad4863d915..cc9de1632dd3 100644
>>> --- a/include/drm/drm_cache.h
>>> +++ b/include/drm/drm_cache.h
>>> @@ -35,6 +35,8 @@
>>>     #include <linux/scatterlist.h>
>>>   +struct dma_buf_map;
>>> +
>>>   void drm_clflush_pages(struct page *pages[], unsigned long 
>>> num_pages);
>>>   void drm_clflush_sg(struct sg_table *st);
>>>   void drm_clflush_virt_range(void *addr, unsigned long length);
>>> @@ -70,4 +72,9 @@ static inline bool drm_arch_can_wc_memory(void)
>>>   #endif
>>>   }
>>>   +void drm_memcpy_init_early(void);
>>> +
>>> +void drm_memcpy_from_wc(struct dma_buf_map *dst,
>>> +            const struct dma_buf_map *src,
>>> +            unsigned long len);
>>>   #endif
>>
diff mbox series

Patch

diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 21be6deadc12..c66058c5bce7 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -469,7 +469,7 @@  DRM MM Range Allocator Function References
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :export:
 
-DRM Cache Handling
+DRM Cache Handling and Fast WC memcpy()
 ==================
 
 .. kernel-doc:: drivers/gpu/drm/drm_cache.c
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 79a50ef1250f..08614f7fdd8d 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -28,6 +28,7 @@ 
  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  */
 
+#include <linux/dma-buf-map.h>
 #include <linux/export.h>
 #include <linux/highmem.h>
 #include <linux/mem_encrypt.h>
@@ -35,6 +36,9 @@ 
 
 #include <drm/drm_cache.h>
 
+/* A small bounce buffer that fits on the stack. */
+#define MEMCPY_BOUNCE_SIZE 128
+
 #if defined(CONFIG_X86)
 #include <asm/smp.h>
 
@@ -209,3 +213,137 @@  bool drm_need_swiotlb(int dma_bits)
 	return max_iomem > ((u64)1 << dma_bits);
 }
 EXPORT_SYMBOL(drm_need_swiotlb);
+
+#ifdef CONFIG_X86
+
+static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
+
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
+{
+	kernel_fpu_begin();
+
+	while (len >= 4) {
+		asm("movntdqa	(%0), %%xmm0\n"
+		    "movntdqa 16(%0), %%xmm1\n"
+		    "movntdqa 32(%0), %%xmm2\n"
+		    "movntdqa 48(%0), %%xmm3\n"
+		    "movaps %%xmm0,   (%1)\n"
+		    "movaps %%xmm1, 16(%1)\n"
+		    "movaps %%xmm2, 32(%1)\n"
+		    "movaps %%xmm3, 48(%1)\n"
+		    :: "r" (src), "r" (dst) : "memory");
+		src += 64;
+		dst += 64;
+		len -= 4;
+	}
+	while (len--) {
+		asm("movntdqa (%0), %%xmm0\n"
+		    "movaps %%xmm0, (%1)\n"
+		    :: "r" (src), "r" (dst) : "memory");
+		src += 16;
+		dst += 16;
+	}
+
+	kernel_fpu_end();
+}
+
+/*
+ * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
+ * non-temporal instructions where available. Note that all arguments
+ * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
+ * of 16.
+ */
+static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len)
+{
+	if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
+		memcpy(dst, src, len);
+	else if (likely(len))
+		__memcpy_ntdqa(dst, src, len >> 4);
+}
+#endif
+
+static void memcpy_fallback(struct dma_buf_map *dst,
+			    const struct dma_buf_map *src,
+			    unsigned long len)
+{
+	if (!dst->is_iomem && !src->is_iomem) {
+		memcpy(dst->vaddr, src->vaddr, len);
+	} else if (!src->is_iomem) {
+		dma_buf_map_memcpy_to(dst, src->vaddr, len);
+	} else if (!dst->is_iomem) {
+		memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
+	} else {
+		/*
+		 * Bounce size is not performance tuned, but using a
+		 * bounce buffer like this is significantly faster than
+		 * resorting to ioreadxx() + iowritexx().
+		 */
+		char bounce[MEMCPY_BOUNCE_SIZE];
+		void __iomem *_src = src->vaddr_iomem;
+		void __iomem *_dst = dst->vaddr_iomem;
+
+		while (len >= MEMCPY_BOUNCE_SIZE) {
+			memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
+			memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
+			_src += MEMCPY_BOUNCE_SIZE;
+			_dst += MEMCPY_BOUNCE_SIZE;
+			len -= MEMCPY_BOUNCE_SIZE;
+		}
+		if (len) {
+			memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
+			memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
+		}
+	}
+}
+
+/**
+ * drm_memcpy_from_wc - Perform the fastest available memcpy from a source
+ * that may be WC.
+ * @dst: The destination pointer
+ * @src: The source pointer
+ * @len: The size of the area o transfer in bytes
+ *
+ * Tries an arch optimized memcpy for prefetching reading out of a WC region,
+ * and if no such beast is available, falls back to a normal memcpy.
+ */
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+			const struct dma_buf_map *src,
+			unsigned long len)
+{
+	if (WARN_ON(in_interrupt()))
+		return;
+
+	if (IS_ENABLED(CONFIG_X86) && static_branch_likely(&has_movntdqa)) {
+		__drm_memcpy_from_wc(dst->is_iomem ?
+				     (void __force *)dst->vaddr_iomem :
+				     dst->vaddr,
+				     src->is_iomem ?
+				     (void const __force *)src->vaddr_iomem :
+				     src->vaddr,
+				     len);
+		return;
+	}
+
+	memcpy_fallback(dst, src, len);
+}
+EXPORT_SYMBOL(drm_memcpy_from_wc);
+
+#ifdef CONFIG_X86
+/**
+ * drm_memcpy_init_early - One time initialization of the WC memcpy code
+ */
+void drm_memcpy_init_early(void)
+{
+	/*
+	 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
+	 * emulation. So don't enable movntdqa in hypervisor guest.
+	 */
+	if (static_cpu_has(X86_FEATURE_XMM4_1) &&
+	    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+		static_branch_enable(&has_movntdqa);
+}
+#else
+void drm_memcpy_init_early(void)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3d8d68a98b95..8804ec7d3215 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -35,6 +35,7 @@ 
 #include <linux/slab.h>
 #include <linux/srcu.h>
 
+#include <drm/drm_cache.h>
 #include <drm/drm_client.h>
 #include <drm/drm_color_mgmt.h>
 #include <drm/drm_drv.h>
@@ -1041,6 +1042,7 @@  static int __init drm_core_init(void)
 
 	drm_connector_ida_init();
 	idr_init(&drm_minors_idr);
+	drm_memcpy_init_early();
 
 	ret = drm_sysfs_init();
 	if (ret < 0) {
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index e9ad4863d915..cc9de1632dd3 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,6 +35,8 @@ 
 
 #include <linux/scatterlist.h>
 
+struct dma_buf_map;
+
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
 void drm_clflush_sg(struct sg_table *st);
 void drm_clflush_virt_range(void *addr, unsigned long length);
@@ -70,4 +72,9 @@  static inline bool drm_arch_can_wc_memory(void)
 #endif
 }
 
+void drm_memcpy_init_early(void);
+
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+			const struct dma_buf_map *src,
+			unsigned long len);
 #endif