@@ -195,6 +195,14 @@ config VECTORS_BASE
help
The base address of exception vectors.
+config ARCH_HIBERNATION_POSSIBLE
+ bool
+ depends on !SMP
+ help
+ If the machine architecture supports suspend-to-disk
+ it should select this automatically for you.
+ Otherwise, say 'Y' at your own peril.
+
config ARCH_HAS_CPU_IDLE_WAIT
def_bool y
@@ -191,6 +191,7 @@ static inline void *phys_to_virt(unsigned long x)
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
/*
new file mode 100644
@@ -0,0 +1,6 @@
+#ifndef __ASM_ARM_SUSPEND_H
+#define __ASM_ARM_SUSPEND_H
+
+static inline int arch_prepare_suspend(void) { return 0; }
+
+#endif /* __ASM_ARM_SUSPEND_H */
@@ -36,6 +36,7 @@ obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
+obj-$(CONFIG_HIBERNATION) += cpu.o swsusp.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
new file mode 100644
@@ -0,0 +1,36 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Based on work by:
+ *
+ * Ubuntu project, hibernation support for mach-dove,
+ * https://lkml.org/lkml/2010/6/18/4
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * via linux-omap mailing list, Teerth Reddy et al.
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/mm.h>
+
+/* References to section boundaries */
+extern const void __nosave_begin, __nosave_end;
+
+/*
+ * pfn_is_nosave - check if given pfn is in the 'nosave' section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
+
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}
new file mode 100644
@@ -0,0 +1,167 @@
+/*
+ * Hibernation support specific for ARM
+ *
+ * Based on work by:
+ *
+ * Ubuntu project, hibernation support for mach-dove,
+ * https://lkml.org/lkml/2010/6/18/4
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * via linux-omap mailing list, Teerth Reddy et al.
+ * https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/cache.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+
+/*
+ * Force ARM mode because:
+ * - we use PC-relative addressing with >8bit offsets
+ * - we use msr with immediates
+ */
+.arm
+
+.align PAGE_SHIFT
+.Lswsusp_page_start:
+
+/*
+ * Save the current CPU state before suspend / poweroff.
+ */
+ENTRY(swsusp_arch_suspend)
+ adr r0, ctx
+ mrs r1, cpsr
+ stm r0!, {r1} /* current CPSR */
+ msr cpsr_c, #SYSTEM_MODE
+ stm r0!, {r0-r14} /* user regs */
+ msr cpsr_c, #SVC_MODE
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr} /* SVC SPSR, SVC regs */
+ msr cpsr, r1 /* restore original mode */
+
+ stmfd sp!, {lr}
+ bl __save_processor_state
+ ldmfd sp!, {lr}
+ b swsusp_save /* this will also set the return code */
+ENDPROC(swsusp_arch_suspend)
+
+
+/*
+ * Restore the memory image from the pagelists, and load the CPU registers
+ * from saved state.
+ * This runs in a very restrictive context - namely, no stack can be used
+ * before the CPU register state saved by swsusp_arch_suspend() has been
+ * restored.
+ */
+ENTRY(swsusp_arch_resume)
+ /*
+ * TODO: Ubuntu mach-dove suspend-to-disk code sets pagedir to swapper
+ * (so that resume via initramfs can work). The code is equivalent to:
+ *
+ * cpu_switch_mm(__pa(swapper_pg_dir), current->active_mm);
+ *
+ * It's not directly callable from asm (active_mm is a macro in a
+ * nonexported header, proc-fns.h, while cpu_switch_mm is a macro
+ * for C use only).
+ * To enable this and prevent direct CPU-dependent manipulation of
+ * MMU registers, header file changes are required.
+ *
+ * FIXME: Supplying the code breaks resume on OMAP3. For now, don't.
+ */
+#ifdef NOTYET
+ act_mm r1
+ ldr r0, =__virt_to_phys(swapper_pg_dir)
+ cpu_switch_mm
+#endif
+
+ /*
+ * The following code is an assembly version of:
+ *
+ * struct pbe *pbe;
+ * for (pbe = restore_pblist; pbe != NULL; pbe = pbe->next)
+ * copy_page(pbe->orig_address, pbe->address);
+ *
+ * Because this is the very place where data pages, including our stack,
+ * are overwritten, function calls are obviously impossible. Hence asm.
+ *
+ * The core of the loop is taken almost verbatim from copy_page.S.
+ */
+ ldr r1, =(restore_pblist - 8) /* "fake" pbe->next */
+ b 3f
+.ltorg
+.align L1_CACHE_SHIFT
+0:
+PLD( pld [r0, #0] )
+PLD( pld [r0, #L1_CACHE_BYTES] )
+ mov r3, #(PAGE_SIZE / (2 * L1_CACHE_BYTES) PLD( -1 ))
+ ldmia r0!, {r4-r7}
+1:
+PLD( pld [r0, #(2 * L1_CACHE_BYTES)] )
+PLD( pld [r0, #(3 * L1_CACHE_BYTES)] )
+2:
+.rept (2 * L1_CACHE_BYTES / 16 - 1)
+ stmia r2!, {r4-r7}
+ ldmia r0!, {r4-r7}
+.endr
+ subs r3, r3, #1
+ stmia r2!, {r4-r7}
+ ldmgtia r0!, {r4-r7}
+ bgt 1b
+PLD( ldmeqia r0!, {r4-r7} )
+PLD( beq 2b )
+3:
+ ldr r1, [r1, #8] /* load next in list (pbe->next) */
+ cmp r1, #0
+ ldrne r0, [r1] /* src page start address (pbe->address) */
+ ldrne r2, [r1, #4] /* dst page start address (pbe->orig_address) */
+ bne 0b
+
+ /*
+ * Done - now restore the CPU state and return.
+ */
+ msr cpsr_c, #SYSTEM_MODE
+ adr r0, ctx
+ ldm r0!, {r1, sp, lr} /* first word is CPSR, following are r0/r1 (irrelevant) */
+ msr cpsr_cxsf, r1
+ ldm r0!, {r2-r14}
+ msr cpsr_c, #SVC_MODE
+ ldm r0!, {r2, sp, lr}
+ msr spsr_cxsf, r2
+ msr cpsr_c, r1 /* use CPSR from above */
+
+ /*
+ * From here on we have a valid stack again. Core state is
+ * not restored yet, redirect to the machine-specific
+ * implementation to get that done.
+ * Note that at this point we have succeeded with restore;
+ * if machine-specific code fails it'd need to panic, there
+ * is no way anymore now to recover from "resume failure".
+ */
+ mov r1, #0
+ stmfd sp!, {r1,lr}
+ bl __restore_processor_state /* restore core state */
+ ldmfd sp!, {r0,pc}
+ENDPROC(swsusp_arch_resume)
+
+.ltorg
+
+/*
+ * Save the CPU context (register set for all modes and mach-specific cp regs)
+ * here. Setting aside what remains of this CPU page, should be aplenty.
+ */
+.align L1_CACHE_SHIFT
+ENTRY(ctx)
+.space (PAGE_SIZE - (. - .Lswsusp_page_start))
+END(ctx)
@@ -153,7 +153,6 @@ SECTIONS
__init_end = .;
#endif
- NOSAVE_DATA
CACHELINE_ALIGNED_DATA(32)
/*
@@ -176,6 +175,8 @@ SECTIONS
}
_edata_loc = __data_loc + SIZEOF(.data);
+ NOSAVE_DATA
+
#ifdef CONFIG_HAVE_TCM
/*
* We align everything to a page boundary so we can
@@ -171,7 +171,7 @@
#define NOSAVE_DATA \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_begin) = .; \
- *(.data.nosave) \
+ .data.nosave : { *(.data.nosave) } \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_end) = .;
@@ -274,8 +274,13 @@ static inline void hibernate_nvs_restore(void) {}
#endif /* CONFIG_HIBERNATION_NVS */
#ifdef CONFIG_PM_SLEEP
+#ifndef CONFIG_ARM
void save_processor_state(void);
void restore_processor_state(void);
+#else
+#define save_processor_state preempt_disable
+#define restore_processor_state preempt_enable
+#endif
/* kernel/power/main.c */
extern int register_pm_notifier(struct notifier_block *nb);