@@ -8,7 +8,7 @@ extern int pci_xen_swiotlb_init_late(void);
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
#endif
-int xen_swiotlb_fixup(void *buf, unsigned long nslabs);
+int xen_swiotlb_fixup(void *buf, unsigned long nslabs, bool high);
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
@@ -104,7 +104,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
}
#ifdef CONFIG_X86
-int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
+int xen_swiotlb_fixup(void *buf, unsigned long nslabs, bool high)
{
int rc;
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
@@ -36,9 +36,9 @@ struct scatterlist;
unsigned long swiotlb_size_or_default(void);
void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
- int (*remap)(void *tlb, unsigned long nslabs));
+ int (*remap)(void *tlb, unsigned long nslabs, bool high));
int swiotlb_init_late(size_t size, gfp_t gfp_mask,
- int (*remap)(void *tlb, unsigned long nslabs));
+ int (*remap)(void *tlb, unsigned long nslabs, bool high));
extern void __init swiotlb_update_mem_attributes(void);
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
@@ -245,7 +245,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
* structures for the software IO TLB used to implement the DMA API.
*/
void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
- int (*remap)(void *tlb, unsigned long nslabs))
+ int (*remap)(void *tlb, unsigned long nslabs, bool high))
{
struct io_tlb_mem *mem = &io_tlb_default_mem;
unsigned long nslabs = default_nslabs;
@@ -274,7 +274,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
return;
}
- if (remap && remap(tlb, nslabs) < 0) {
+ if (remap && remap(tlb, nslabs, false) < 0) {
memblock_free(tlb, PAGE_ALIGN(bytes));
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
@@ -307,7 +307,7 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags)
* This should be just like above, but with some error catching.
*/
int swiotlb_init_late(size_t size, gfp_t gfp_mask,
- int (*remap)(void *tlb, unsigned long nslabs))
+ int (*remap)(void *tlb, unsigned long nslabs, bool high))
{
struct io_tlb_mem *mem = &io_tlb_default_mem;
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
@@ -337,7 +337,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
return -ENOMEM;
if (remap)
- rc = remap(vstart, nslabs);
+ rc = remap(vstart, nslabs, false);
if (rc) {
free_pages((unsigned long)vstart, order);
Add new argument 'high' to remap function, so that it will be able to remap the swiotlb buffer based on whether the swiotlb is 32-bit or 64-bit. Currently the only remap function is xen_swiotlb_fixup(). Cc: Konrad Wilk <konrad.wilk@oracle.com> Cc: Joe Jin <joe.jin@oracle.com> Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com> --- arch/x86/include/asm/xen/swiotlb-xen.h | 2 +- drivers/xen/swiotlb-xen.c | 2 +- include/linux/swiotlb.h | 4 ++-- kernel/dma/swiotlb.c | 8 ++++---- 4 files changed, 8 insertions(+), 8 deletions(-)