diff mbox series

[v2,3/5] mm: shmem: add large folio support for tmpfs

Message ID eabd8c89fc1b4807eaf28750e04c44b718ae6487.1731397290.git.baolin.wang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series Support large folios for tmpfs | expand

Commit Message

Baolin Wang Nov. 12, 2024, 7:45 a.m. UTC
Add large folio support for tmpfs write and fallocate paths matching the
same high order preference mechanism used in the iomap buffered IO path
as used in __filemap_get_folio().

Add shmem_mapping_size_orders() to get a hint for the orders of the folio
based on the file size which takes care of the mapping requirements.

Traditionally, tmpfs only supported PMD-sized huge folios. However nowadays
with other file systems supporting any sized large folios, and extending
anonymous to support mTHP, we should not restrict tmpfs to allocating only
PMD-sized huge folios, making it more special. Instead, we should allow
tmpfs can allocate any sized large folios.

Considering that tmpfs already has the 'huge=' option to control the huge
folios allocation, we can extend the 'huge=' option to allow any sized huge
folios. The semantics of the 'huge=' mount option are:

huge=never: no any sized huge folios
huge=always: any sized huge folios
huge=within_size: like 'always' but respect the i_size
huge=advise: like 'always' if requested with fadvise()/madvise()

Note: for tmpfs mmap() faults, due to the lack of a write size hint, still
allocate the PMD-sized huge folios if huge=always/within_size/advise is set.

Moreover, the 'deny' and 'force' testing options controlled by
'/sys/kernel/mm/transparent_hugepage/shmem_enabled', still retain the same
semantics. The 'deny' can disable any sized large folios for tmpfs, while
the 'force' can enable PMD sized large folios for tmpfs.

Co-developed-by: Daniel Gomez <da.gomez@samsung.com>
Signed-off-by: Daniel Gomez <da.gomez@samsung.com>
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
 mm/shmem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 77 insertions(+), 14 deletions(-)

Comments

David Hildenbrand Nov. 12, 2024, 4:19 p.m. UTC | #1
On 12.11.24 08:45, Baolin Wang wrote:
> Add large folio support for tmpfs write and fallocate paths matching the
> same high order preference mechanism used in the iomap buffered IO path
> as used in __filemap_get_folio().
> 
> Add shmem_mapping_size_orders() to get a hint for the orders of the folio
> based on the file size which takes care of the mapping requirements.
> 
> Traditionally, tmpfs only supported PMD-sized huge folios. However nowadays
> with other file systems supporting any sized large folios, and extending
> anonymous to support mTHP, we should not restrict tmpfs to allocating only
> PMD-sized huge folios, making it more special. Instead, we should allow
> tmpfs can allocate any sized large folios.
> 
> Considering that tmpfs already has the 'huge=' option to control the huge
> folios allocation, we can extend the 'huge=' option to allow any sized huge
> folios. The semantics of the 'huge=' mount option are:
> 
> huge=never: no any sized huge folios
> huge=always: any sized huge folios
> huge=within_size: like 'always' but respect the i_size
> huge=advise: like 'always' if requested with fadvise()/madvise()
> 
> Note: for tmpfs mmap() faults, due to the lack of a write size hint, still
> allocate the PMD-sized huge folios if huge=always/within_size/advise is set.
> 
> Moreover, the 'deny' and 'force' testing options controlled by
> '/sys/kernel/mm/transparent_hugepage/shmem_enabled', still retain the same
> semantics. The 'deny' can disable any sized large folios for tmpfs, while
> the 'force' can enable PMD sized large folios for tmpfs.
> 
> Co-developed-by: Daniel Gomez <da.gomez@samsung.com>
> Signed-off-by: Daniel Gomez <da.gomez@samsung.com>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
>   mm/shmem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++---------
>   1 file changed, 77 insertions(+), 14 deletions(-)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 86b2e417dc6f..a3203cf8860f 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -549,10 +549,50 @@ static bool shmem_confirm_swap(struct address_space *mapping,
>   
>   static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
>   
> +/**
> + * shmem_mapping_size_orders - Get allowable folio orders for the given file size.
> + * @mapping: Target address_space.
> + * @index: The page index.
> + * @write_end: end of a write, could extend inode size.
> + *
> + * This returns huge orders for folios (when supported) based on the file size
> + * which the mapping currently allows at the given index. The index is relevant
> + * due to alignment considerations the mapping might have. The returned order
> + * may be less than the size passed.
> + *
> + * Return: The orders.
> + */
> +static inline unsigned int
> +shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t write_end)
> +{
> +	unsigned int order;
> +	size_t size;
> +
> +	if (!mapping_large_folio_support(mapping) || !write_end)
> +		return 0;
> +
> +	/* Calculate the write size based on the write_end */
> +	size = write_end - (index << PAGE_SHIFT);
> +	order = filemap_get_order(size);
> +	if (!order)
> +		return 0;
> +
> +	/* If we're not aligned, allocate a smaller folio */
> +	if (index & ((1UL << order) - 1))
> +		order = __ffs(index);
> +
> +	order = min_t(size_t, order, MAX_PAGECACHE_ORDER);
> +	return order > 0 ? BIT(order + 1) - 1 : 0;
> +}
> +
>   static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>   					      loff_t write_end, bool shmem_huge_force,
> +					      struct vm_area_struct *vma,
>   					      unsigned long vm_flags)
>   {
> +	unsigned long within_size_orders;
> +	unsigned int order;
> +	pgoff_t aligned_index;
>   	loff_t i_size;
>   
>   	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)

We can allow all orders up to MAX_PAGECACHE_ORDER, 
shmem_mapping_size_orders() handles it properly.

So maybe we should drop this condition and use instead below where we have

return BIT(HPAGE_PMD_ORDER);

instead something like.

return HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? 0 : BIT(HPAGE_PMD_ORDER);

Ideally, factoring it out somehow


int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? 0 : 
BIT(HPAGE_PMD_ORDER);

...

return maybe_pmd_order;

> @@ -564,15 +604,41 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
>   	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
>   		return BIT(HPAGE_PMD_ORDER);

Why not force-enable all orders (of course, respecting 
MAX_PAGECACHE_ORDER and possibly VMA)?

>   
> +	/*
> +	 * The huge order allocation for anon shmem is controlled through
> +	 * the mTHP interface, so we still use PMD-sized huge order to
> +	 * check whether global control is enabled.
> +	 *
> +	 * For tmpfs mmap()'s huge order, we still use PMD-sized order to
> +	 * allocate huge pages due to lack of a write size hint.
> +	 *
> +	 * Otherwise, tmpfs will allow getting a highest order hint based on
> +	 * the size of write and fallocate paths, then will try each allowable
> +	 * huge orders.
> +	 */
>   	switch (SHMEM_SB(inode->i_sb)->huge) {
>   	case SHMEM_HUGE_ALWAYS:
> -		return BIT(HPAGE_PMD_ORDER);
> -	case SHMEM_HUGE_WITHIN_SIZE:
> -		index = round_up(index + 1, HPAGE_PMD_NR);
> -		i_size = max(write_end, i_size_read(inode));
> -		i_size = round_up(i_size, PAGE_SIZE);
> -		if (i_size >> PAGE_SHIFT >= index)
> +		if (vma)
>   			return BIT(HPAGE_PMD_ORDER);
> +
> +		return shmem_mapping_size_orders(inode->i_mapping, index, write_end);
> +	case SHMEM_HUGE_WITHIN_SIZE:
> +		if (vma)
> +			within_size_orders = BIT(HPAGE_PMD_ORDER);
> +		else
> +			within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
> +								       index, write_end);
> +
> +		order = highest_order(within_size_orders);
> +		while (within_size_orders) {
> +			aligned_index = round_up(index + 1, 1 << order);
> +			i_size = max(write_end, i_size_read(inode));
> +			i_size = round_up(i_size, PAGE_SIZE);
> +			if (i_size >> PAGE_SHIFT >= aligned_index)
> +				return within_size_orders;
> +
> +			order = next_order(&within_size_orders, order);
> +		}
>   		fallthrough;
>   	case SHMEM_HUGE_ADVISE:
>   		if (vm_flags & VM_HUGEPAGE)

I think the point here is that "write" -> no VMA -> vm_flags == 0 -> no 
code changes needed :)
David Hildenbrand Nov. 12, 2024, 4:21 p.m. UTC | #2
On 12.11.24 17:19, David Hildenbrand wrote:
> On 12.11.24 08:45, Baolin Wang wrote:
>> Add large folio support for tmpfs write and fallocate paths matching the
>> same high order preference mechanism used in the iomap buffered IO path
>> as used in __filemap_get_folio().
>>
>> Add shmem_mapping_size_orders() to get a hint for the orders of the folio
>> based on the file size which takes care of the mapping requirements.
>>
>> Traditionally, tmpfs only supported PMD-sized huge folios. However nowadays
>> with other file systems supporting any sized large folios, and extending
>> anonymous to support mTHP, we should not restrict tmpfs to allocating only
>> PMD-sized huge folios, making it more special. Instead, we should allow
>> tmpfs can allocate any sized large folios.
>>
>> Considering that tmpfs already has the 'huge=' option to control the huge
>> folios allocation, we can extend the 'huge=' option to allow any sized huge
>> folios. The semantics of the 'huge=' mount option are:
>>
>> huge=never: no any sized huge folios
>> huge=always: any sized huge folios
>> huge=within_size: like 'always' but respect the i_size
>> huge=advise: like 'always' if requested with fadvise()/madvise()
>>
>> Note: for tmpfs mmap() faults, due to the lack of a write size hint, still
>> allocate the PMD-sized huge folios if huge=always/within_size/advise is set.
>>
>> Moreover, the 'deny' and 'force' testing options controlled by
>> '/sys/kernel/mm/transparent_hugepage/shmem_enabled', still retain the same
>> semantics. The 'deny' can disable any sized large folios for tmpfs, while
>> the 'force' can enable PMD sized large folios for tmpfs.
>>
>> Co-developed-by: Daniel Gomez <da.gomez@samsung.com>
>> Signed-off-by: Daniel Gomez <da.gomez@samsung.com>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>>    mm/shmem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++---------
>>    1 file changed, 77 insertions(+), 14 deletions(-)
>>
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index 86b2e417dc6f..a3203cf8860f 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -549,10 +549,50 @@ static bool shmem_confirm_swap(struct address_space *mapping,
>>    
>>    static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
>>    
>> +/**
>> + * shmem_mapping_size_orders - Get allowable folio orders for the given file size.
>> + * @mapping: Target address_space.
>> + * @index: The page index.
>> + * @write_end: end of a write, could extend inode size.
>> + *
>> + * This returns huge orders for folios (when supported) based on the file size
>> + * which the mapping currently allows at the given index. The index is relevant
>> + * due to alignment considerations the mapping might have. The returned order
>> + * may be less than the size passed.
>> + *
>> + * Return: The orders.
>> + */
>> +static inline unsigned int
>> +shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t write_end)
>> +{
>> +	unsigned int order;
>> +	size_t size;
>> +
>> +	if (!mapping_large_folio_support(mapping) || !write_end)
>> +		return 0;
>> +
>> +	/* Calculate the write size based on the write_end */
>> +	size = write_end - (index << PAGE_SHIFT);
>> +	order = filemap_get_order(size);
>> +	if (!order)
>> +		return 0;
>> +
>> +	/* If we're not aligned, allocate a smaller folio */
>> +	if (index & ((1UL << order) - 1))
>> +		order = __ffs(index);
>> +
>> +	order = min_t(size_t, order, MAX_PAGECACHE_ORDER);
>> +	return order > 0 ? BIT(order + 1) - 1 : 0;
>> +}
>> +
>>    static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>>    					      loff_t write_end, bool shmem_huge_force,
>> +					      struct vm_area_struct *vma,
>>    					      unsigned long vm_flags)
>>    {
>> +	unsigned long within_size_orders;
>> +	unsigned int order;
>> +	pgoff_t aligned_index;
>>    	loff_t i_size;
>>    
>>    	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
> 
> We can allow all orders up to MAX_PAGECACHE_ORDER,
> shmem_mapping_size_orders() handles it properly.
> 
> So maybe we should drop this condition and use instead below where we have
> 
> return BIT(HPAGE_PMD_ORDER);
> 
> instead something like.
> 
> return HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? 0 : BIT(HPAGE_PMD_ORDER);
> 
> Ideally, factoring it out somehow
> 
> 
> int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? 0 :
> BIT(HPAGE_PMD_ORDER);
> 
> ...
> 
> return maybe_pmd_order;
> 
>> @@ -564,15 +604,41 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
>>    	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
>>    		return BIT(HPAGE_PMD_ORDER);
> 
> Why not force-enable all orders (of course, respecting
> MAX_PAGECACHE_ORDER and possibly VMA)?
> 
>>    
>> +	/*
>> +	 * The huge order allocation for anon shmem is controlled through
>> +	 * the mTHP interface, so we still use PMD-sized huge order to
>> +	 * check whether global control is enabled.
>> +	 *
>> +	 * For tmpfs mmap()'s huge order, we still use PMD-sized order to
>> +	 * allocate huge pages due to lack of a write size hint.
>> +	 *
>> +	 * Otherwise, tmpfs will allow getting a highest order hint based on
>> +	 * the size of write and fallocate paths, then will try each allowable
>> +	 * huge orders.
>> +	 */
>>    	switch (SHMEM_SB(inode->i_sb)->huge) {
>>    	case SHMEM_HUGE_ALWAYS:
>> -		return BIT(HPAGE_PMD_ORDER);
>> -	case SHMEM_HUGE_WITHIN_SIZE:
>> -		index = round_up(index + 1, HPAGE_PMD_NR);
>> -		i_size = max(write_end, i_size_read(inode));
>> -		i_size = round_up(i_size, PAGE_SIZE);
>> -		if (i_size >> PAGE_SHIFT >= index)
>> +		if (vma)
>>    			return BIT(HPAGE_PMD_ORDER);
>> +
>> +		return shmem_mapping_size_orders(inode->i_mapping, index, write_end);
>> +	case SHMEM_HUGE_WITHIN_SIZE:
>> +		if (vma)
>> +			within_size_orders = BIT(HPAGE_PMD_ORDER);
>> +		else
>> +			within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
>> +								       index, write_end);
>> +
>> +		order = highest_order(within_size_orders);
>> +		while (within_size_orders) {
>> +			aligned_index = round_up(index + 1, 1 << order);
>> +			i_size = max(write_end, i_size_read(inode));
>> +			i_size = round_up(i_size, PAGE_SIZE);
>> +			if (i_size >> PAGE_SHIFT >= aligned_index)
>> +				return within_size_orders;
>> +
>> +			order = next_order(&within_size_orders, order);
>> +		}
>>    		fallthrough;
>>    	case SHMEM_HUGE_ADVISE:
>>    		if (vm_flags & VM_HUGEPAGE)
> 
> I think the point here is that "write" -> no VMA -> vm_flags == 0 -> no
> code changes needed :)

... and now I wonder about documented "fadvise", because this here is 
only concerned with madvise?
Baolin Wang Nov. 13, 2024, 3:07 a.m. UTC | #3
On 2024/11/13 00:19, David Hildenbrand wrote:
> On 12.11.24 08:45, Baolin Wang wrote:
>> Add large folio support for tmpfs write and fallocate paths matching the
>> same high order preference mechanism used in the iomap buffered IO path
>> as used in __filemap_get_folio().
>>
>> Add shmem_mapping_size_orders() to get a hint for the orders of the folio
>> based on the file size which takes care of the mapping requirements.
>>
>> Traditionally, tmpfs only supported PMD-sized huge folios. However 
>> nowadays
>> with other file systems supporting any sized large folios, and extending
>> anonymous to support mTHP, we should not restrict tmpfs to allocating 
>> only
>> PMD-sized huge folios, making it more special. Instead, we should allow
>> tmpfs can allocate any sized large folios.
>>
>> Considering that tmpfs already has the 'huge=' option to control the huge
>> folios allocation, we can extend the 'huge=' option to allow any sized 
>> huge
>> folios. The semantics of the 'huge=' mount option are:
>>
>> huge=never: no any sized huge folios
>> huge=always: any sized huge folios
>> huge=within_size: like 'always' but respect the i_size
>> huge=advise: like 'always' if requested with fadvise()/madvise()
>>
>> Note: for tmpfs mmap() faults, due to the lack of a write size hint, 
>> still
>> allocate the PMD-sized huge folios if huge=always/within_size/advise 
>> is set.
>>
>> Moreover, the 'deny' and 'force' testing options controlled by
>> '/sys/kernel/mm/transparent_hugepage/shmem_enabled', still retain the 
>> same
>> semantics. The 'deny' can disable any sized large folios for tmpfs, while
>> the 'force' can enable PMD sized large folios for tmpfs.
>>
>> Co-developed-by: Daniel Gomez <da.gomez@samsung.com>
>> Signed-off-by: Daniel Gomez <da.gomez@samsung.com>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>>   mm/shmem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++---------
>>   1 file changed, 77 insertions(+), 14 deletions(-)
>>
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index 86b2e417dc6f..a3203cf8860f 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -549,10 +549,50 @@ static bool shmem_confirm_swap(struct 
>> address_space *mapping,
>>   static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
>> +/**
>> + * shmem_mapping_size_orders - Get allowable folio orders for the 
>> given file size.
>> + * @mapping: Target address_space.
>> + * @index: The page index.
>> + * @write_end: end of a write, could extend inode size.
>> + *
>> + * This returns huge orders for folios (when supported) based on the 
>> file size
>> + * which the mapping currently allows at the given index. The index 
>> is relevant
>> + * due to alignment considerations the mapping might have. The 
>> returned order
>> + * may be less than the size passed.
>> + *
>> + * Return: The orders.
>> + */
>> +static inline unsigned int
>> +shmem_mapping_size_orders(struct address_space *mapping, pgoff_t 
>> index, loff_t write_end)
>> +{
>> +    unsigned int order;
>> +    size_t size;
>> +
>> +    if (!mapping_large_folio_support(mapping) || !write_end)
>> +        return 0;
>> +
>> +    /* Calculate the write size based on the write_end */
>> +    size = write_end - (index << PAGE_SHIFT);
>> +    order = filemap_get_order(size);
>> +    if (!order)
>> +        return 0;
>> +
>> +    /* If we're not aligned, allocate a smaller folio */
>> +    if (index & ((1UL << order) - 1))
>> +        order = __ffs(index);
>> +
>> +    order = min_t(size_t, order, MAX_PAGECACHE_ORDER);
>> +    return order > 0 ? BIT(order + 1) - 1 : 0;
>> +}
>> +
>>   static unsigned int shmem_huge_global_enabled(struct inode *inode, 
>> pgoff_t index,
>>                             loff_t write_end, bool shmem_huge_force,
>> +                          struct vm_area_struct *vma,
>>                             unsigned long vm_flags)
>>   {
>> +    unsigned long within_size_orders;
>> +    unsigned int order;
>> +    pgoff_t aligned_index;
>>       loff_t i_size;
>>       if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
> 
> We can allow all orders up to MAX_PAGECACHE_ORDER, 
> shmem_mapping_size_orders() handles it properly.
> 
> So maybe we should drop this condition and use instead below where we have
> 
> return BIT(HPAGE_PMD_ORDER);
> 
> instead something like.
> 
> return HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? 0 : BIT(HPAGE_PMD_ORDER);
> 
> Ideally, factoring it out somehow
> 
> 
> int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? 0 : 
> BIT(HPAGE_PMD_ORDER);
> 
> ...
> 
> return maybe_pmd_order;

Good point. Will do.

> 
>> @@ -564,15 +604,41 @@ static unsigned int 
>> shmem_huge_global_enabled(struct inode *inode, pgoff_t index
>>       if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
>>           return BIT(HPAGE_PMD_ORDER);
> 
> Why not force-enable all orders (of course, respecting 
> MAX_PAGECACHE_ORDER and possibly VMA)?

The ‘force’ option will affect the tmpfs mmap()'s huge allocation, which 
I intend to handle in a separate patch as we discussed. Additionally, 
for the huge page allocation of tmpfs mmap(), I am also considering the 
readahead approach for the pagecache.

>> +    /*
>> +     * The huge order allocation for anon shmem is controlled through
>> +     * the mTHP interface, so we still use PMD-sized huge order to
>> +     * check whether global control is enabled.
>> +     *
>> +     * For tmpfs mmap()'s huge order, we still use PMD-sized order to
>> +     * allocate huge pages due to lack of a write size hint.
>> +     *
>> +     * Otherwise, tmpfs will allow getting a highest order hint based on
>> +     * the size of write and fallocate paths, then will try each 
>> allowable
>> +     * huge orders.
>> +     */
>>       switch (SHMEM_SB(inode->i_sb)->huge) {
>>       case SHMEM_HUGE_ALWAYS:
>> -        return BIT(HPAGE_PMD_ORDER);
>> -    case SHMEM_HUGE_WITHIN_SIZE:
>> -        index = round_up(index + 1, HPAGE_PMD_NR);
>> -        i_size = max(write_end, i_size_read(inode));
>> -        i_size = round_up(i_size, PAGE_SIZE);
>> -        if (i_size >> PAGE_SHIFT >= index)
>> +        if (vma)
>>               return BIT(HPAGE_PMD_ORDER);
>> +
>> +        return shmem_mapping_size_orders(inode->i_mapping, index, 
>> write_end);
>> +    case SHMEM_HUGE_WITHIN_SIZE:
>> +        if (vma)
>> +            within_size_orders = BIT(HPAGE_PMD_ORDER);
>> +        else
>> +            within_size_orders = 
>> shmem_mapping_size_orders(inode->i_mapping,
>> +                                       index, write_end);
>> +
>> +        order = highest_order(within_size_orders);
>> +        while (within_size_orders) {
>> +            aligned_index = round_up(index + 1, 1 << order);
>> +            i_size = max(write_end, i_size_read(inode));
>> +            i_size = round_up(i_size, PAGE_SIZE);
>> +            if (i_size >> PAGE_SHIFT >= aligned_index)
>> +                return within_size_orders;
>> +
>> +            order = next_order(&within_size_orders, order);
>> +        }
>>           fallthrough;
>>       case SHMEM_HUGE_ADVISE:
>>           if (vm_flags & VM_HUGEPAGE)
> 
> I think the point here is that "write" -> no VMA -> vm_flags == 0 -> no 
> code changes needed :)

Yes. Currently the fadvise() have no HUGEPAGE handling, so I will drop 
the 'fadvise' in the doc.
David Hildenbrand Nov. 15, 2024, 1:48 p.m. UTC | #4
>>>            return BIT(HPAGE_PMD_ORDER);
>>
>> Why not force-enable all orders (of course, respecting
>> MAX_PAGECACHE_ORDER and possibly VMA)?
> 
> The ‘force’ option will affect the tmpfs mmap()'s huge allocation, which
> I intend to handle in a separate patch as we discussed. Additionally,
> for the huge page allocation of tmpfs mmap(), I am also considering the
> readahead approach for the pagecache.

Okay, we can change this later. Likely force/deny are a blast from the 
past either way.

[...]

>>> +
>>> +        order = highest_order(within_size_orders);
>>> +        while (within_size_orders) {
>>> +            aligned_index = round_up(index + 1, 1 << order);
>>> +            i_size = max(write_end, i_size_read(inode));
>>> +            i_size = round_up(i_size, PAGE_SIZE);
>>> +            if (i_size >> PAGE_SHIFT >= aligned_index)
>>> +                return within_size_orders;
>>> +
>>> +            order = next_order(&within_size_orders, order);
>>> +        }
>>>            fallthrough;
>>>        case SHMEM_HUGE_ADVISE:
>>>            if (vm_flags & VM_HUGEPAGE)
>>
>> I think the point here is that "write" -> no VMA -> vm_flags == 0 -> no
>> code changes needed :)
> 
> Yes. Currently the fadvise() have no HUGEPAGE handling, so I will drop
> the 'fadvise' in the doc.

Interesting that we documented it :)
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index 86b2e417dc6f..a3203cf8860f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -549,10 +549,50 @@  static bool shmem_confirm_swap(struct address_space *mapping,
 
 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
 
+/**
+ * shmem_mapping_size_orders - Get allowable folio orders for the given file size.
+ * @mapping: Target address_space.
+ * @index: The page index.
+ * @write_end: end of a write, could extend inode size.
+ *
+ * This returns huge orders for folios (when supported) based on the file size
+ * which the mapping currently allows at the given index. The index is relevant
+ * due to alignment considerations the mapping might have. The returned order
+ * may be less than the size passed.
+ *
+ * Return: The orders.
+ */
+static inline unsigned int
+shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t write_end)
+{
+	unsigned int order;
+	size_t size;
+
+	if (!mapping_large_folio_support(mapping) || !write_end)
+		return 0;
+
+	/* Calculate the write size based on the write_end */
+	size = write_end - (index << PAGE_SHIFT);
+	order = filemap_get_order(size);
+	if (!order)
+		return 0;
+
+	/* If we're not aligned, allocate a smaller folio */
+	if (index & ((1UL << order) - 1))
+		order = __ffs(index);
+
+	order = min_t(size_t, order, MAX_PAGECACHE_ORDER);
+	return order > 0 ? BIT(order + 1) - 1 : 0;
+}
+
 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
 					      loff_t write_end, bool shmem_huge_force,
+					      struct vm_area_struct *vma,
 					      unsigned long vm_flags)
 {
+	unsigned long within_size_orders;
+	unsigned int order;
+	pgoff_t aligned_index;
 	loff_t i_size;
 
 	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
@@ -564,15 +604,41 @@  static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
 	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
 		return BIT(HPAGE_PMD_ORDER);
 
+	/*
+	 * The huge order allocation for anon shmem is controlled through
+	 * the mTHP interface, so we still use PMD-sized huge order to
+	 * check whether global control is enabled.
+	 *
+	 * For tmpfs mmap()'s huge order, we still use PMD-sized order to
+	 * allocate huge pages due to lack of a write size hint.
+	 *
+	 * Otherwise, tmpfs will allow getting a highest order hint based on
+	 * the size of write and fallocate paths, then will try each allowable
+	 * huge orders.
+	 */
 	switch (SHMEM_SB(inode->i_sb)->huge) {
 	case SHMEM_HUGE_ALWAYS:
-		return BIT(HPAGE_PMD_ORDER);
-	case SHMEM_HUGE_WITHIN_SIZE:
-		index = round_up(index + 1, HPAGE_PMD_NR);
-		i_size = max(write_end, i_size_read(inode));
-		i_size = round_up(i_size, PAGE_SIZE);
-		if (i_size >> PAGE_SHIFT >= index)
+		if (vma)
 			return BIT(HPAGE_PMD_ORDER);
+
+		return shmem_mapping_size_orders(inode->i_mapping, index, write_end);
+	case SHMEM_HUGE_WITHIN_SIZE:
+		if (vma)
+			within_size_orders = BIT(HPAGE_PMD_ORDER);
+		else
+			within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
+								       index, write_end);
+
+		order = highest_order(within_size_orders);
+		while (within_size_orders) {
+			aligned_index = round_up(index + 1, 1 << order);
+			i_size = max(write_end, i_size_read(inode));
+			i_size = round_up(i_size, PAGE_SIZE);
+			if (i_size >> PAGE_SHIFT >= aligned_index)
+				return within_size_orders;
+
+			order = next_order(&within_size_orders, order);
+		}
 		fallthrough;
 	case SHMEM_HUGE_ADVISE:
 		if (vm_flags & VM_HUGEPAGE)
@@ -776,6 +842,7 @@  static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 
 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
 					      loff_t write_end, bool shmem_huge_force,
+					      struct vm_area_struct *vma,
 					      unsigned long vm_flags)
 {
 	return 0;
@@ -1173,7 +1240,7 @@  static int shmem_getattr(struct mnt_idmap *idmap,
 	generic_fillattr(idmap, request_mask, inode, stat);
 	inode_unlock_shared(inode);
 
-	if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
+	if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
 		stat->blksize = HPAGE_PMD_SIZE;
 
 	if (request_mask & STATX_BTIME) {
@@ -1690,14 +1757,10 @@  unsigned long shmem_allowable_huge_orders(struct inode *inode,
 		return 0;
 
 	global_orders = shmem_huge_global_enabled(inode, index, write_end,
-						  shmem_huge_force, vm_flags);
-	if (!vma || !vma_is_anon_shmem(vma)) {
-		/*
-		 * For tmpfs, we now only support PMD sized THP if huge page
-		 * is enabled, otherwise fallback to order 0.
-		 */
+						  shmem_huge_force, vma, vm_flags);
+	/* Tmpfs huge pages allocation */
+	if (!vma || !vma_is_anon_shmem(vma))
 		return global_orders;
-	}
 
 	/*
 	 * Following the 'deny' semantics of the top level, force the huge