diff mbox series

[2/2] mm: fix spelling mistakes in under directory mm

Message ID 20210517065155.7257-3-thunder.leizhen@huawei.com (mailing list archive)
State New, archived
Headers show
Series mm: clear spelling mistakes | expand

Commit Message

Leizhen (ThunderTown) May 17, 2021, 6:51 a.m. UTC
Fix some spelling mistakes in comments:
posion ==> poison
higer ==> higher
precisly ==> precisely
wont ==> won't
tha ==> the
endianess ==> endianness

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
---
 mm/internal.h       | 2 +-
 mm/memory-failure.c | 2 +-
 mm/memory_hotplug.c | 4 ++--
 mm/page_alloc.c     | 2 +-
 mm/swap.c           | 2 +-
 mm/swapfile.c       | 2 +-
 6 files changed, 7 insertions(+), 7 deletions(-)

Comments

Souptick Joarder May 18, 2021, 7:41 p.m. UTC | #1
On Mon, May 17, 2021 at 12:22 PM Zhen Lei <thunder.leizhen@huawei.com> wrote:
>
> Fix some spelling mistakes in comments:
> posion ==> poison
> higer ==> higher
> precisly ==> precisely
> wont ==> won't
> tha ==> the
> endianess ==> endianness
>
> Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>

This can be merged with [1/2].
> ---
>  mm/internal.h       | 2 +-
>  mm/memory-failure.c | 2 +-
>  mm/memory_hotplug.c | 4 ++--
>  mm/page_alloc.c     | 2 +-
>  mm/swap.c           | 2 +-
>  mm/swapfile.c       | 2 +-
>  6 files changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index 54bd0dc2c23c..e64e72782978 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -100,7 +100,7 @@ static inline void set_page_refcounted(struct page *page)
>   * When kernel touch the user page, the user page may be have been marked
>   * poison but still mapped in user space, if without this page, the kernel
>   * can guarantee the data integrity and operation success, the kernel is
> - * better to check the posion status and avoid touching it, be good not to
> + * better to check the poison status and avoid touching it, be good not to
>   * panic, coredump for process fatal signal is a sample case matching this
>   * scenario. Or if kernel can't guarantee the data integrity, it's better
>   * not to call this function, let kernel touch the poison page and get to
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 85ad98c00fd9..8e06c6998fb9 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1134,7 +1134,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>                          * could potentially call huge_pmd_unshare.  Because of
>                          * this, take semaphore in write mode here and set
>                          * TTU_RMAP_LOCKED to indicate we have taken the lock
> -                        * at this higer level.
> +                        * at this higher level.
>                          */
>                         mapping = hugetlb_page_mapping_lock_write(hpage);
>                         if (mapping) {
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 70620d0dd923..02f8073a364f 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -913,7 +913,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
>
>         /*
>          * {on,off}lining is constrained to full memory sections (or more
> -        * precisly to memory blocks from the user space POV).
> +        * precisely to memory blocks from the user space POV).
>          * memmap_on_memory is an exception because it reserves initial part
>          * of the physical memory space for vmemmaps. That space is pageblock
>          * aligned.
> @@ -1703,7 +1703,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
>
>         /*
>          * {on,off}lining is constrained to full memory sections (or more
> -        * precisly to memory blocks from the user space POV).
> +        * precisely to memory blocks from the user space POV).
>          * memmap_on_memory is an exception because it reserves initial part
>          * of the physical memory space for vmemmaps. That space is pageblock
>          * aligned.
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index aaa1655cf682..a16f8f3f9e80 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3103,7 +3103,7 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
>         int cpu;
>
>         /*
> -        * Allocate in the BSS so we wont require allocation in
> +        * Allocate in the BSS so we won't require allocation in
>          * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
>          */
>         static cpumask_t cpus_with_pcps;
> diff --git a/mm/swap.c b/mm/swap.c
> index dfb48cf9c2c9..d35b8d615248 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -554,7 +554,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
>         } else {
>                 /*
>                  * The page's writeback ends up during pagevec
> -                * We moves tha page into tail of inactive.
> +                * We moves the page into tail of inactive.

*we move the page into tail of inactive*

>                  */
>                 add_page_to_lru_list_tail(page, lruvec);
>                 __count_vm_events(PGROTATED, nr_pages);
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 149e77454e3c..88a6f01cfb88 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -2972,7 +2972,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
>                 return 0;
>         }
>
> -       /* swap partition endianess hack... */
> +       /* swap partition endianness hack... */
>         if (swab32(swap_header->info.version) == 1) {
>                 swab32s(&swap_header->info.version);
>                 swab32s(&swap_header->info.last_page);
> --
> 2.25.1
>
>
>
Leizhen (ThunderTown) May 19, 2021, 1:43 a.m. UTC | #2
On 2021/5/19 3:41, Souptick Joarder wrote:
> On Mon, May 17, 2021 at 12:22 PM Zhen Lei <thunder.leizhen@huawei.com> wrote:
>>
>> Fix some spelling mistakes in comments:
>> posion ==> poison
>> higer ==> higher
>> precisly ==> precisely
>> wont ==> won't
>> tha ==> the
>> endianess ==> endianness
>>
>> Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
> 
> This can be merged with [1/2].

OK, I will send V2.

>> ---
>>  mm/internal.h       | 2 +-
>>  mm/memory-failure.c | 2 +-
>>  mm/memory_hotplug.c | 4 ++--
>>  mm/page_alloc.c     | 2 +-
>>  mm/swap.c           | 2 +-
>>  mm/swapfile.c       | 2 +-
>>  6 files changed, 7 insertions(+), 7 deletions(-)
>>
>> diff --git a/mm/internal.h b/mm/internal.h
>> index 54bd0dc2c23c..e64e72782978 100644
>> --- a/mm/internal.h
>> +++ b/mm/internal.h
>> @@ -100,7 +100,7 @@ static inline void set_page_refcounted(struct page *page)
>>   * When kernel touch the user page, the user page may be have been marked
>>   * poison but still mapped in user space, if without this page, the kernel
>>   * can guarantee the data integrity and operation success, the kernel is
>> - * better to check the posion status and avoid touching it, be good not to
>> + * better to check the poison status and avoid touching it, be good not to
>>   * panic, coredump for process fatal signal is a sample case matching this
>>   * scenario. Or if kernel can't guarantee the data integrity, it's better
>>   * not to call this function, let kernel touch the poison page and get to
>> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
>> index 85ad98c00fd9..8e06c6998fb9 100644
>> --- a/mm/memory-failure.c
>> +++ b/mm/memory-failure.c
>> @@ -1134,7 +1134,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>>                          * could potentially call huge_pmd_unshare.  Because of
>>                          * this, take semaphore in write mode here and set
>>                          * TTU_RMAP_LOCKED to indicate we have taken the lock
>> -                        * at this higer level.
>> +                        * at this higher level.
>>                          */
>>                         mapping = hugetlb_page_mapping_lock_write(hpage);
>>                         if (mapping) {
>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>> index 70620d0dd923..02f8073a364f 100644
>> --- a/mm/memory_hotplug.c
>> +++ b/mm/memory_hotplug.c
>> @@ -913,7 +913,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
>>
>>         /*
>>          * {on,off}lining is constrained to full memory sections (or more
>> -        * precisly to memory blocks from the user space POV).
>> +        * precisely to memory blocks from the user space POV).
>>          * memmap_on_memory is an exception because it reserves initial part
>>          * of the physical memory space for vmemmaps. That space is pageblock
>>          * aligned.
>> @@ -1703,7 +1703,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
>>
>>         /*
>>          * {on,off}lining is constrained to full memory sections (or more
>> -        * precisly to memory blocks from the user space POV).
>> +        * precisely to memory blocks from the user space POV).
>>          * memmap_on_memory is an exception because it reserves initial part
>>          * of the physical memory space for vmemmaps. That space is pageblock
>>          * aligned.
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index aaa1655cf682..a16f8f3f9e80 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -3103,7 +3103,7 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
>>         int cpu;
>>
>>         /*
>> -        * Allocate in the BSS so we wont require allocation in
>> +        * Allocate in the BSS so we won't require allocation in
>>          * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
>>          */
>>         static cpumask_t cpus_with_pcps;
>> diff --git a/mm/swap.c b/mm/swap.c
>> index dfb48cf9c2c9..d35b8d615248 100644
>> --- a/mm/swap.c
>> +++ b/mm/swap.c
>> @@ -554,7 +554,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
>>         } else {
>>                 /*
>>                  * The page's writeback ends up during pagevec
>> -                * We moves tha page into tail of inactive.
>> +                * We moves the page into tail of inactive.
> 
> *we move the page into tail of inactive*

Good eye!

> 
>>                  */
>>                 add_page_to_lru_list_tail(page, lruvec);
>>                 __count_vm_events(PGROTATED, nr_pages);
>> diff --git a/mm/swapfile.c b/mm/swapfile.c
>> index 149e77454e3c..88a6f01cfb88 100644
>> --- a/mm/swapfile.c
>> +++ b/mm/swapfile.c
>> @@ -2972,7 +2972,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
>>                 return 0;
>>         }
>>
>> -       /* swap partition endianess hack... */
>> +       /* swap partition endianness hack... */
>>         if (swab32(swap_header->info.version) == 1) {
>>                 swab32s(&swap_header->info.version);
>>                 swab32s(&swap_header->info.last_page);
>> --
>> 2.25.1
>>
>>
>>
> 
> .
>
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 54bd0dc2c23c..e64e72782978 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -100,7 +100,7 @@  static inline void set_page_refcounted(struct page *page)
  * When kernel touch the user page, the user page may be have been marked
  * poison but still mapped in user space, if without this page, the kernel
  * can guarantee the data integrity and operation success, the kernel is
- * better to check the posion status and avoid touching it, be good not to
+ * better to check the poison status and avoid touching it, be good not to
  * panic, coredump for process fatal signal is a sample case matching this
  * scenario. Or if kernel can't guarantee the data integrity, it's better
  * not to call this function, let kernel touch the poison page and get to
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 85ad98c00fd9..8e06c6998fb9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1134,7 +1134,7 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 			 * could potentially call huge_pmd_unshare.  Because of
 			 * this, take semaphore in write mode here and set
 			 * TTU_RMAP_LOCKED to indicate we have taken the lock
-			 * at this higer level.
+			 * at this higher level.
 			 */
 			mapping = hugetlb_page_mapping_lock_write(hpage);
 			if (mapping) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 70620d0dd923..02f8073a364f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -913,7 +913,7 @@  int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
 
 	/*
 	 * {on,off}lining is constrained to full memory sections (or more
-	 * precisly to memory blocks from the user space POV).
+	 * precisely to memory blocks from the user space POV).
 	 * memmap_on_memory is an exception because it reserves initial part
 	 * of the physical memory space for vmemmaps. That space is pageblock
 	 * aligned.
@@ -1703,7 +1703,7 @@  int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
 
 	/*
 	 * {on,off}lining is constrained to full memory sections (or more
-	 * precisly to memory blocks from the user space POV).
+	 * precisely to memory blocks from the user space POV).
 	 * memmap_on_memory is an exception because it reserves initial part
 	 * of the physical memory space for vmemmaps. That space is pageblock
 	 * aligned.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aaa1655cf682..a16f8f3f9e80 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3103,7 +3103,7 @@  static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
 	int cpu;
 
 	/*
-	 * Allocate in the BSS so we wont require allocation in
+	 * Allocate in the BSS so we won't require allocation in
 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
 	 */
 	static cpumask_t cpus_with_pcps;
diff --git a/mm/swap.c b/mm/swap.c
index dfb48cf9c2c9..d35b8d615248 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -554,7 +554,7 @@  static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
 	} else {
 		/*
 		 * The page's writeback ends up during pagevec
-		 * We moves tha page into tail of inactive.
+		 * We moves the page into tail of inactive.
 		 */
 		add_page_to_lru_list_tail(page, lruvec);
 		__count_vm_events(PGROTATED, nr_pages);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 149e77454e3c..88a6f01cfb88 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2972,7 +2972,7 @@  static unsigned long read_swap_header(struct swap_info_struct *p,
 		return 0;
 	}
 
-	/* swap partition endianess hack... */
+	/* swap partition endianness hack... */
 	if (swab32(swap_header->info.version) == 1) {
 		swab32s(&swap_header->info.version);
 		swab32s(&swap_header->info.last_page);