@@ -41,6 +41,18 @@ struct psw {
uint64_t addr;
};
+struct cpu {
+ struct lowcore *lowcore;
+ uint64_t *stack;
+ void (*pgm_cleanup_func)(struct stack_frame_int *);
+ void (*ext_cleanup_func)(struct stack_frame_int *);
+ uint16_t addr;
+ uint16_t idx;
+ bool active;
+ bool pgm_int_expected;
+ bool ext_int_expected;
+};
+
#define AS_PRIM 0
#define AS_ACCR 1
#define AS_SECN 2
@@ -125,7 +137,8 @@ struct lowcore {
uint8_t pad_0x0280[0x0308 - 0x0280]; /* 0x0280 */
uint64_t sw_int_crs[16]; /* 0x0308 */
struct psw sw_int_psw; /* 0x0388 */
- uint8_t pad_0x0310[0x11b0 - 0x0398]; /* 0x0398 */
+ struct cpu * this_cpu; /* 0x0398 */
+ uint8_t pad_0x03a0[0x11b0 - 0x03a0]; /* 0x03a0 */
uint64_t mcck_ext_sa_addr; /* 0x11b0 */
uint8_t pad_0x11b8[0x1200 - 0x11b8]; /* 0x11b8 */
uint64_t fprs_sa[16]; /* 0x1200 */
@@ -148,6 +161,8 @@ _Static_assert(sizeof(struct lowcore) == 0x1900, "Lowcore size");
extern struct lowcore lowcore;
+#define THIS_CPU (lowcore.this_cpu)
+
#define PGM_INT_CODE_OPERATION 0x01
#define PGM_INT_CODE_PRIVILEGED_OPERATION 0x02
#define PGM_INT_CODE_EXECUTE 0x03
@@ -37,7 +37,8 @@ union teid {
};
};
-void register_pgm_cleanup_func(void (*f)(void));
+void register_pgm_cleanup_func(void (*f)(struct stack_frame_int *));
+void register_ext_cleanup_func(void (*f)(struct stack_frame_int *));
void handle_pgm_int(struct stack_frame_int *stack);
void handle_ext_int(struct stack_frame_int *stack);
void handle_mcck_int(void);
@@ -12,13 +12,6 @@
#include <asm/arch_def.h>
-struct cpu {
- struct lowcore *lowcore;
- uint64_t *stack;
- uint16_t addr;
- bool active;
-};
-
struct cpu_status {
uint64_t fprs[16]; /* 0x0000 */
uint64_t grs[16]; /* 0x0080 */
@@ -52,5 +45,6 @@ int smp_cpu_setup(uint16_t idx, struct psw psw);
void smp_teardown(void);
void smp_setup(void);
int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status);
+struct lowcore *smp_get_lowcore(uint16_t idx);
#endif
@@ -15,25 +15,36 @@
#include <fault.h>
#include <asm/page.h>
-static bool pgm_int_expected;
-static bool ext_int_expected;
-static void (*pgm_cleanup_func)(void);
-
+/**
+ * expect_pgm_int - Expect a program interrupt on the current CPU.
+ */
void expect_pgm_int(void)
{
- pgm_int_expected = true;
+ THIS_CPU->pgm_int_expected = true;
lowcore.pgm_int_code = 0;
lowcore.trans_exc_id = 0;
mb();
}
+/**
+ * expect_ext_int - Expect an external interrupt on the current CPU.
+ */
void expect_ext_int(void)
{
- ext_int_expected = true;
+ THIS_CPU->ext_int_expected = true;
lowcore.ext_int_code = 0;
mb();
}
+/**
+ * clear_pgm_int - Clear program interrupt information
+ *
+ * Clear program interrupt information, including the expected program
+ * interrupt flag.
+ * No program interrupts are expected after calling this function.
+ *
+ * Return: the program interrupt code before clearing
+ */
uint16_t clear_pgm_int(void)
{
uint16_t code;
@@ -42,10 +53,17 @@ uint16_t clear_pgm_int(void)
code = lowcore.pgm_int_code;
lowcore.pgm_int_code = 0;
lowcore.trans_exc_id = 0;
- pgm_int_expected = false;
+ THIS_CPU->pgm_int_expected = false;
return code;
}
+/**
+ * check_pgm_int_code - Check the program interrupt code on the current CPU.
+ * @code the expected program interrupt code on the current CPU
+ *
+ * Check and report if the program interrupt on the current CPU matches the
+ * expected one.
+ */
void check_pgm_int_code(uint16_t code)
{
mb();
@@ -54,9 +72,34 @@ void check_pgm_int_code(uint16_t code)
lowcore.pgm_int_code);
}
-void register_pgm_cleanup_func(void (*f)(void))
+/**
+ * register_pgm_cleanup_func - Register a cleanup function for progam
+ * interrupts for the current CPU.
+ * @f the cleanup function to be registered on the current CPU
+ *
+ * Register a cleanup function to be called at the end of the normal
+ * interrupt handling for program interrupts for this CPU.
+ *
+ * Pass NULL to unregister a previously registered cleanup function.
+ */
+void register_pgm_cleanup_func(void (*f)(struct stack_frame_int *))
+{
+ THIS_CPU->pgm_cleanup_func = f;
+}
+
+/**
+ * register_ext_cleanup_func - Register a cleanup function for external
+ * interrupts for the current CPU.
+ * @f the cleanup function to be registered on the current CPU
+ *
+ * Register a cleanup function to be called at the end of the normal
+ * interrupt handling for external interrupts for this CPU.
+ *
+ * Pass NULL to unregister a previously registered cleanup function.
+ */
+void register_ext_cleanup_func(void (*f)(struct stack_frame_int *))
{
- pgm_cleanup_func = f;
+ THIS_CPU->ext_cleanup_func = f;
}
static void fixup_pgm_int(struct stack_frame_int *stack)
@@ -183,24 +226,23 @@ static void print_pgm_info(struct stack_frame_int *stack)
void handle_pgm_int(struct stack_frame_int *stack)
{
- if (!pgm_int_expected) {
+ if (!THIS_CPU->pgm_int_expected) {
/* Force sclp_busy to false, otherwise we will loop forever */
sclp_handle_ext();
print_pgm_info(stack);
}
- pgm_int_expected = false;
+ THIS_CPU->pgm_int_expected = false;
- if (pgm_cleanup_func)
- (*pgm_cleanup_func)();
+ if (THIS_CPU->pgm_cleanup_func)
+ THIS_CPU->pgm_cleanup_func(stack);
else
fixup_pgm_int(stack);
}
void handle_ext_int(struct stack_frame_int *stack)
{
- if (!ext_int_expected &&
- lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG) {
+ if (!THIS_CPU->ext_int_expected && lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG) {
report_abort("Unexpected external call interrupt (code %#x): on cpu %d at %#lx",
lowcore.ext_int_code, stap(), lowcore.ext_old_psw.addr);
return;
@@ -210,11 +252,14 @@ void handle_ext_int(struct stack_frame_int *stack)
stack->crs[0] &= ~(1UL << 9);
sclp_handle_ext();
} else {
- ext_int_expected = false;
+ THIS_CPU->ext_int_expected = false;
}
if (!(stack->crs[0] & CR0_EXTM_MASK))
lowcore.ext_old_psw.mask &= ~PSW_MASK_EXT;
+
+ if (THIS_CPU->ext_cleanup_func)
+ THIS_CPU->ext_cleanup_func(stack);
}
void handle_mcck_int(void)
@@ -39,6 +39,15 @@ int smp_query_num_cpus(void)
return sclp_get_cpu_num();
}
+struct lowcore *smp_get_lowcore(uint16_t idx)
+{
+ if (THIS_CPU->idx == idx)
+ return &lowcore;
+
+ check_idx(idx);
+ return cpus[idx].lowcore;
+}
+
int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
{
check_idx(idx);
@@ -253,6 +262,7 @@ static int smp_cpu_setup_nolock(uint16_t idx, struct psw psw)
/* Copy all exception psws. */
memcpy(lc, cpus[0].lowcore, 512);
+ lc->this_cpu = &cpus[idx];
/* Setup stack */
cpus[idx].stack = (uint64_t *)alloc_pages(2);
@@ -325,6 +335,7 @@ void smp_setup(void)
for (i = 0; i < num; i++) {
cpus[i].addr = entry[i].address;
cpus[i].active = false;
+ cpus[i].idx = i;
/*
* Fill in the boot CPU. If the boot CPU is not at index 0,
* swap it with the one at index 0. This guarantees that the
@@ -119,7 +119,7 @@ static void set_flag(int val)
mb();
}
-static void ecall_cleanup(void)
+static void ecall_cleanup(struct stack_frame_int *stack)
{
lowcore.ext_new_psw.mask = PSW_MASK_64;
lowcore.sw_int_crs[0] = BIT_ULL(CTL0_AFP);
Use per-CPU flags and callbacks for Program and Extern interrupts, instead of global variables. This allows for more accurate error handling; a CPU waiting for an interrupt will not have it "stolen" by a different CPU that was not supposed to wait for one, and now two CPUs can wait for interrupts at the same time. This will significantly improve error reporting and debugging when things go wrong. Both program interrupts and external interrupts are now CPU-bound, even though some external interrupts are floating (notably, the SCLP interrupt). In those cases, the testcases should mask interrupts and/or expect them appropriately according to need. Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com> --- lib/s390x/asm/arch_def.h | 17 ++++++++- lib/s390x/asm/interrupt.h | 3 +- lib/s390x/smp.h | 8 +--- lib/s390x/interrupt.c | 77 +++++++++++++++++++++++++++++++-------- lib/s390x/smp.c | 11 ++++++ s390x/skrf.c | 2 +- 6 files changed, 92 insertions(+), 26 deletions(-)