@@ -38,7 +38,8 @@
#include <asm/dma-mapping.h>
#include <trace/events/swiotlb.h>
-#define MAX_DMA_BITS 32
+#define MAX_DMA32_BITS 32
+#define MAX_DMA64_BITS 64
/*
* Quick lookup value of the bus address of the IOTLB.
@@ -109,19 +110,25 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs, bool high)
int rc;
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
unsigned int i, dma_bits = order + PAGE_SHIFT;
+ unsigned int max_dma_bits = MAX_DMA32_BITS;
dma_addr_t dma_handle;
phys_addr_t p = virt_to_phys(buf);
BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
BUG_ON(nslabs % IO_TLB_SEGSIZE);
+ if (high) {
+ dma_bits = MAX_DMA64_BITS;
+ max_dma_bits = MAX_DMA64_BITS;
+ }
+
i = 0;
do {
do {
rc = xen_create_contiguous_region(
p + (i << IO_TLB_SHIFT), order,
dma_bits, &dma_handle);
- } while (rc && dma_bits++ < MAX_DMA_BITS);
+ } while (rc && dma_bits++ < max_dma_bits);
if (rc)
return rc;
@@ -381,7 +388,8 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
+ return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask ||
+ xen_phys_to_dma(hwdev, io_tlb_high_mem.end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {
While for most of times the swiotlb-xen relies on the generic swiotlb api to initialize and use swiotlb, this patch is to support highmem swiotlb for swiotlb-xen specific code. E.g., the xen_swiotlb_fixup() may request the hypervisor to provide 64-bit memory pages as swiotlb buffer. Cc: Konrad Wilk <konrad.wilk@oracle.com> Cc: Joe Jin <joe.jin@oracle.com> Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com> --- drivers/xen/swiotlb-xen.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-)