@@ -2227,7 +2227,7 @@ config CMDLINE
default ""
help
Provide a set of default command-line options at build time by
- entering them here. As a minimum, you should specify the the
+ entering them here. As a minimum, you should specify the
root device (e.g. root=/dev/nfs).
choice
@@ -390,7 +390,7 @@ alternative_endif
* [start, end) with dcache line size explicitly provided.
*
* op: operation passed to dc instruction
- * domain: domain used in dsb instruciton
+ * domain: domain used in dsb instruction
* start: starting virtual address of the region
* end: end virtual address of the region
* linesz: dcache line size
@@ -431,7 +431,7 @@ alternative_endif
* [start, end)
*
* op: operation passed to dc instruction
- * domain: domain used in dsb instruciton
+ * domain: domain used in dsb instruction
* start: starting virtual address of the region
* end: end virtual address of the region
* fixup: optional label to branch to on user fault
@@ -198,7 +198,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
* registers (e.g, SCTLR, TCR etc.) or patching the kernel via
* alternatives. The kernel patching is batched and performed at later
* point. The actions are always initiated only after the capability
- * is finalised. This is usally denoted by "enabling" the capability.
+ * is finalised. This is usually denoted by "enabling" the capability.
* The actions are initiated as follows :
* a) Action is triggered on all online CPUs, after the capability is
* finalised, invoked within the stop_machine() context from
@@ -250,7 +250,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
/*
- * The capabilitiy is detected on the Boot CPU and is used by kernel
+ * The capability is detected on the Boot CPU and is used by kernel
* during early boot. i.e, the capability should be "detected" and
* "enabled" as early as possibly on all booting CPUs.
*/
@@ -288,7 +288,7 @@ bool pgattr_change_is_safe(u64 old, u64 new);
* 1 0 | 1 0 1
* 1 1 | 0 1 x
*
- * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
+ * When hardware DBM is not present, the software PTE_DIRTY bit is updated via
* the page fault mechanism. Checking the dirty status of a pte becomes:
*
* PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
@@ -23,7 +23,7 @@ struct cpu_suspend_ctx {
* __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
* This data must survive until cpu_resume() is called.
*
- * This struct desribes the size and the layout of the saved cpu state.
+ * This struct describes the size and the layout of the saved cpu state.
* The layout of the callee_saved_regs is defined by the implementation
* of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
* in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
@@ -52,8 +52,8 @@ static inline int in_entry_text(unsigned long ptr)
* CPUs with the RAS extensions have an Implementation-Defined-Syndrome bit
* to indicate whether this ESR has a RAS encoding. CPUs without this feature
* have a ISS-Valid bit in the same position.
- * If this bit is set, we know its not a RAS SError.
- * If its clear, we need to know if the CPU supports RAS. Uncategorized RAS
+ * If this bit is set, we know it's not a RAS SError.
+ * If it's clear, we need to know if the CPU supports RAS. Uncategorized RAS
* errors share the same encoding as an all-zeros encoding from a CPU that
* doesn't support RAS.
*/
@@ -128,7 +128,7 @@ static int __init acpi_fadt_sanity_check(void)
/*
* FADT is required on arm64; retrieve it to check its presence
- * and carry out revision and ACPI HW reduced compliancy tests
+ * and carry out revision and ACPI HW reduced compliance tests
*/
status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
if (ACPI_FAILURE(status)) {
@@ -748,7 +748,7 @@ static int search_cmp_ftr_reg(const void *id, const void *regp)
* entry.
*
* returns - Upon success, matching ftr_reg entry for id.
- * - NULL on failure. It is upto the caller to decide
+ * - NULL on failure. It is up to the caller to decide
* the impact of a failure.
*/
static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
@@ -874,7 +874,7 @@ static void __init sort_ftr_regs(void)
/*
* Initialise the CPU feature register from Boot CPU values.
- * Also initiliases the strict_mask for the register.
+ * Also initialises the strict_mask for the register.
* Any bits that are not covered by an arm64_ftr_bits entry are considered
* RES0 for the system-wide value, and must strictly match.
*/
@@ -3108,7 +3108,7 @@ static void verify_local_cpu_caps(u16 scope_mask)
/*
* We have to issue cpu_enable() irrespective of
* whether the CPU has it or not, as it is enabeld
- * system wide. It is upto the call back to take
+ * system wide. It is up to the call back to take
* appropriate action on this CPU.
*/
if (caps->cpu_enable)
@@ -660,7 +660,7 @@ static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
{
- /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
+ /* Only watchpoints write FAR_EL1, otherwise it's UNKNOWN */
unsigned long far = read_sysreg(far_el1);
enter_from_user_mode(regs);
@@ -94,7 +94,7 @@ SYM_CODE_START(ftrace_caller)
stp x29, x30, [sp, #FREGS_SIZE]
add x29, sp, #FREGS_SIZE
- /* Prepare arguments for the the tracer func */
+ /* Prepare arguments for the tracer func */
sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
mov x1, x9 // parent_ip (callsite's LR)
mov x3, sp // regs
@@ -547,7 +547,7 @@ SYM_CODE_START_LOCAL(__bad_stack)
mrs x0, tpidrro_el0
/*
- * Store the original GPRs to the new stack. The orginal SP (minus
+ * Store the original GPRs to the new stack. The original SP (minus
* PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
*/
sub sp, sp, #PT_REGS_SIZE
@@ -423,7 +423,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return ret;
/*
- * When using mcount, callsites in modules may have been initalized to
+ * When using mcount, callsites in modules may have been initialized to
* call an arbitrary module PLT (which redirects to the _mcount stub)
* rather than the ftrace PLT we'll use at runtime (which redirects to
* the ftrace trampoline). We can ignore the old PLT when initializing
@@ -296,7 +296,7 @@ void crash_post_resume(void)
* marked as Reserved as memory was allocated via memblock_reserve().
*
* In hibernation, the pages which are Reserved and yet "nosave" are excluded
- * from the hibernation iamge. crash_is_nosave() does thich check for crash
+ * from the hibernation image. crash_is_nosave() does this check for crash
* dump kernel and will reduce the total size of hibernation image.
*/
@@ -122,7 +122,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
struct uprobe_task *utask = current->utask;
/*
- * Task has received a fatal signal, so reset back to probbed
+ * Task has received a fatal signal, so reset back to probed
* address.
*/
instruction_pointer_set(regs, utask->vaddr);
@@ -206,7 +206,7 @@ unsigned long sdei_arch_get_entry_point(int conduit)
/*
* do_sdei_event() returns one of:
* SDEI_EV_HANDLED - success, return to the interrupted context.
- * SDEI_EV_FAILED - failure, return this error code to firmare.
+ * SDEI_EV_FAILED - failure, return this error code to firmware.
* virtual-address - success, return to this address.
*/
unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
@@ -351,7 +351,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
/*
* Now that the dying CPU is beyond the point of no return w.r.t.
- * in-kernel synchronisation, try to get the firwmare to help us to
+ * in-kernel synchronisation, try to get the firmware to help us to
* verify that it has really left the kernel before we consider
* clobbering anything it might still be using.
*/
@@ -897,7 +897,7 @@ void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigne
__show_regs(regs);
/*
- * We use nmi_panic to limit the potential for recusive overflows, and
+ * We use nmi_panic to limit the potential for recursive overflows, and
* to get a better stack trace.
*/
nmi_panic(NULL, "kernel stack overflow");