@@ -105,6 +105,7 @@ struct io_tlb_pool {
* struct io_tlb_mem - Software IO TLB allocator
* @pool: IO TLB memory pool descriptor.
* @nslabs: Total number of IO TLB slabs in all pools.
+ * @phys_limit: Maximum allowed physical address.
* @debugfs: The dentry to debugfs.
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
@@ -118,6 +119,7 @@ struct io_tlb_pool {
struct io_tlb_mem {
struct io_tlb_pool *pool;
unsigned long nslabs;
+ u64 phys_limit;
struct dentry *debugfs;
bool force_bounce;
bool for_alloc;
@@ -334,6 +334,10 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
io_tlb_default_mem.force_bounce =
swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
io_tlb_default_mem.can_grow = !remap;
+ if (flags & SWIOTLB_ANY)
+ io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
+ else
+ io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
@@ -402,6 +406,12 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
io_tlb_default_mem.can_grow = !remap;
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
+ io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
+ else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
+ io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
+ else
+ io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
@@ -1338,7 +1348,7 @@ phys_addr_t default_swiotlb_start(void)
*/
phys_addr_t default_swiotlb_limit(void)
{
- return io_tlb_default_pool.end - 1;
+ return io_tlb_default_mem.phys_limit;
}
#ifdef CONFIG_DEBUG_FS