@@ -109,6 +109,7 @@ struct io_tlb_pool {
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
* @can_grow: %true if more pools can be allocated dynamically.
+ * @phys_limit: Maximum allowed physical address.
* @total_used: The total number of slots in the pool that are currently used
* across all areas. Used only for calculating used_hiwater in
* debugfs.
@@ -123,6 +124,7 @@ struct io_tlb_mem {
bool for_alloc;
#ifdef CONFIG_SWIOTLB_DYNAMIC
bool can_grow;
+ u64 phys_limit;
#endif
#ifdef CONFIG_DEBUG_FS
atomic_long_t total_used;
@@ -334,6 +334,10 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
#ifdef CONFIG_SWIOTLB_DYNAMIC
if (!remap)
io_tlb_default_mem.can_grow = true;
+ if (flags & SWIOTLB_ANY)
+ io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
+ else
+ io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
#endif
if (!default_nareas)
@@ -409,6 +413,12 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
#ifdef CONFIG_SWIOTLB_DYNAMIC
if (!remap)
io_tlb_default_mem.can_grow = true;
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
+ io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
+ else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
+ io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
+ else
+ io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
#endif
if (!default_nareas)
@@ -1390,7 +1400,11 @@ phys_addr_t default_swiotlb_base(void)
*/
phys_addr_t default_swiotlb_limit(void)
{
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ return io_tlb_default_mem.phys_limit;
+#else
return io_tlb_default_mem.defpool.end - 1;
+#endif
}
#ifdef CONFIG_DEBUG_FS