===================================================================
@@ -58,6 +58,9 @@ $(TEST_DIR)/tsc.flat: $(cstart.o) $(TEST
$(TEST_DIR)/apic.flat: $(cstart.o) $(TEST_DIR)/apic.o $(TEST_DIR)/vm.o \
$(TEST_DIR)/print.o
+$(TEST_DIR)/time.flat: $(cstart.o) $(TEST_DIR)/time.o $(TEST_DIR)/vm.o \
+ $(TEST_DIR)/print.o
+
$(TEST_DIR)/realmode.flat: $(TEST_DIR)/realmode.o
$(CC) -m32 -nostdlib -o $@ -Wl,-T,$(TEST_DIR)/realmode.lds $^
===================================================================
@@ -7,6 +7,7 @@ CFLAGS += -D__x86_64__
tests = $(TEST_DIR)/access.flat $(TEST_DIR)/sieve.flat \
$(TEST_DIR)/simple.flat $(TEST_DIR)/stringio.flat \
$(TEST_DIR)/memtest1.flat $(TEST_DIR)/emulator.flat \
- $(TEST_DIR)/hypercall.flat $(TEST_DIR)/apic.flat
+ $(TEST_DIR)/hypercall.flat $(TEST_DIR)/apic.flat \
+ $(TEST_DIR)/time.flat
include config-x86-common.mak
===================================================================
@@ -0,0 +1,35 @@
+static inline void outb(unsigned char val, unsigned short port)
+{
+ asm volatile("outb %0, %w1": : "a"(val), "Nd" (port));
+}
+
+static inline void outw(unsigned short val, unsigned short port)
+{
+ asm volatile("outw %0, %w1": : "a"(val), "Nd" (port));
+}
+
+static inline void outl(unsigned long val, unsigned short port)
+{
+ asm volatile("outl %0, %w1": : "a"(val), "Nd" (port));
+}
+
+static inline unsigned char inb(unsigned short port)
+{
+ unsigned char val;
+ asm volatile("inb %w1, %0": "=a"(val) : "Nd" (port));
+ return val;
+}
+
+static inline short inw(unsigned short port)
+{
+ short val;
+ asm volatile("inw %w1, %0": "=a"(val) : "Nd" (port));
+ return val;
+}
+
+static inline unsigned int inl(unsigned short port)
+{
+ unsigned int val;
+ asm volatile("inl %w1, %0": "=a"(val) : "Nd" (port));
+ return val;
+}
===================================================================
@@ -0,0 +1,1010 @@
+#include "libcflat.h"
+#include "apic.h"
+#include "vm.h"
+#include "io.h"
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned u32;
+typedef unsigned long ulong;
+typedef unsigned long long u64;
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+typedef struct {
+ unsigned short offset0;
+ unsigned short selector;
+ unsigned short ist : 3;
+ unsigned short : 5;
+ unsigned short type : 4;
+ unsigned short : 1;
+ unsigned short dpl : 2;
+ unsigned short p : 1;
+ unsigned short offset1;
+#ifdef __x86_64__
+ unsigned offset2;
+ unsigned reserved;
+#endif
+} idt_entry_t;
+
+typedef struct {
+ ulong rflags;
+ ulong cs;
+ ulong rip;
+ ulong func;
+ ulong regs[sizeof(ulong)*2];
+} isr_regs_t;
+
+#ifdef __x86_64__
+# define R "r"
+#else
+# define R "e"
+#endif
+
+extern char isr_entry_point[];
+
+asm (
+ "isr_entry_point: \n"
+#ifdef __x86_64__
+ "push %r15 \n\t"
+ "push %r14 \n\t"
+ "push %r13 \n\t"
+ "push %r12 \n\t"
+ "push %r11 \n\t"
+ "push %r10 \n\t"
+ "push %r9 \n\t"
+ "push %r8 \n\t"
+#endif
+ "push %"R "di \n\t"
+ "push %"R "si \n\t"
+ "push %"R "bp \n\t"
+ "push %"R "sp \n\t"
+ "push %"R "bx \n\t"
+ "push %"R "dx \n\t"
+ "push %"R "cx \n\t"
+ "push %"R "ax \n\t"
+#ifdef __x86_64__
+ "mov %rsp, %rdi \n\t"
+ "callq *8*16(%rsp) \n\t"
+#else
+ "push %esp \n\t"
+ "calll *4+4*8(%esp) \n\t"
+ "add $4, %esp \n\t"
+#endif
+ "pop %"R "ax \n\t"
+ "pop %"R "cx \n\t"
+ "pop %"R "dx \n\t"
+ "pop %"R "bx \n\t"
+ "pop %"R "bp \n\t"
+ "pop %"R "bp \n\t"
+ "pop %"R "si \n\t"
+ "pop %"R "di \n\t"
+#ifdef __x86_64__
+ "pop %r8 \n\t"
+ "pop %r9 \n\t"
+ "pop %r10 \n\t"
+ "pop %r11 \n\t"
+ "pop %r12 \n\t"
+ "pop %r13 \n\t"
+ "pop %r14 \n\t"
+ "pop %r15 \n\t"
+#endif
+#ifdef __x86_64__
+ "add $8, %rsp \n\t"
+ "iretq \n\t"
+#else
+ "add $4, %esp \n\t"
+ "iretl \n\t"
+#endif
+ );
+
+static idt_entry_t idt[256];
+
+static int g_fail;
+static int g_tests;
+
+static void report(const char *msg, int pass)
+{
+ ++g_tests;
+ printf("%s: %s\n", msg, (pass ? "PASS" : "FAIL"));
+ if (!pass)
+ ++g_fail;
+}
+
+static u16 read_cs(void)
+{
+ u16 v;
+
+ asm("mov %%cs, %0" : "=rm"(v));
+ return v;
+}
+
+static void init_idt(void)
+{
+ struct {
+ u16 limit;
+ ulong idt;
+ } __attribute__((packed)) idt_ptr = {
+ sizeof(idt_entry_t) * 256 - 1,
+ (ulong)&idt,
+ };
+
+ asm volatile("lidt %0" : : "m"(idt_ptr));
+}
+
+static void set_idt_entry(unsigned vec, void (*func)(isr_regs_t *regs))
+{
+ u8 *thunk = vmalloc(50);
+ ulong ptr = (ulong)thunk;
+ idt_entry_t ent = {
+ .offset0 = ptr,
+ .selector = read_cs(),
+ .ist = 0,
+ .type = 14,
+ .dpl = 0,
+ .p = 1,
+ .offset1 = ptr >> 16,
+#ifdef __x86_64__
+ .offset2 = ptr >> 32,
+#endif
+ };
+#ifdef __x86_64__
+ /* sub $8, %rsp */
+ *thunk++ = 0x48; *thunk++ = 0x83; *thunk++ = 0xec; *thunk++ = 0x08;
+ /* mov $func_low, %(rsp) */
+ *thunk++ = 0xc7; *thunk++ = 0x04; *thunk++ = 0x24;
+ *(u32 *)thunk = (ulong)func; thunk += 4;
+ /* mov $func_high, %(rsp+4) */
+ *thunk++ = 0xc7; *thunk++ = 0x44; *thunk++ = 0x24; *thunk++ = 0x04;
+ *(u32 *)thunk = (ulong)func >> 32; thunk += 4;
+ /* jmp isr_entry_point */
+ *thunk ++ = 0xe9;
+ *(u32 *)thunk = (ulong)isr_entry_point - (ulong)(thunk + 4);
+#else
+ /* push $func */
+ *thunk++ = 0x68;
+ *(u32 *)thunk = (ulong)func;
+ /* jmp isr_entry_point */
+ *thunk ++ = 0xe9;
+ *(u32 *)thunk = (ulong)isr_entry_point - (ulong)(thunk + 4);
+#endif
+ idt[vec] = ent;
+}
+
+static void irq_disable(void)
+{
+ asm volatile("cli");
+}
+
+static void irq_enable(void)
+{
+ asm volatile("sti");
+}
+
+static void eoi(void)
+{
+ apic_write(APIC_EOI, 0);
+}
+
+static int ipi_count;
+
+static void self_ipi_isr(isr_regs_t *regs)
+{
+ ++ipi_count;
+ eoi();
+}
+
+static void test_self_ipi(void)
+{
+ int vec = 0xf1;
+
+ set_idt_entry(vec, self_ipi_isr);
+ irq_enable();
+ apic_write(APIC_ICR,
+ APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | vec);
+ asm volatile ("nop");
+ report("self ipi", ipi_count == 1);
+}
+
+static void set_ioapic_redir(unsigned line, unsigned vec, unsigned trig_mode)
+{
+ ioapic_redir_entry_t e = {
+ .vector = vec,
+ .delivery_mode = 0,
+ .trig_mode = trig_mode,
+ };
+
+ ioapic_write_redir(line, e);
+}
+
+/* interrupt handlers */
+
+#define TIMER_VEC_BASE 0x90
+
+struct int_table {
+ void (*func)(isr_regs_t *regs);
+ void (*irq_handler)(void *irq_priv);
+ void *irq_priv;
+};
+
+static struct int_table int_handlers[];
+
+#define decl_irq_handler(N) \
+static void timer_int_##N(isr_regs_t *regs) { \
+ struct int_table *t = &int_handlers[N]; \
+ t->irq_handler(t->irq_priv); \
+ eoi(); \
+}
+
+void set_irq_handler(int vec, void (*func)(void *irq_priv), void *irq_priv);
+
+void hlt(void) { asm volatile("hlt"); }
+
+#define NS_FREQ 1000000000ULL
+#define US_FREQ 1000000ULL
+
+#define ns2cyc(ns) (ns*cpu_hz)/NS_FREQ
+#define cyc2ns(cyc) (cyc*NS_FREQ)/cpu_hz
+
+#define us_to_ns(n) (1000*n)
+#define ms_to_ns(n) (1000000*n)
+#define s_to_ns(n) (1000000000*n)
+
+#define sdelay(n) nsdelay(s_to_ns(n))
+
+u64 cpu_hz;
+
+static inline int fls(int x)
+{
+ int r;
+ asm("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+ return r + 1;
+}
+
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+static inline u64 div_u64(u64 dividend, u32 divisor)
+{
+ u32 remainder;
+ return div_u64_rem(dividend, divisor, &remainder);
+}
+
+#ifdef CONFIG_32BITS
+#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
+#else
+#define mod_64(x, y) ((x) % (y))
+#endif
+
+/* 64bit divisor, dividend and result. dynamic precision */
+u64 div64_u64(u64 dividend, u64 divisor)
+{
+ u32 high, d;
+
+ high = divisor >> 32;
+ if (high) {
+ unsigned int shift = fls(high);
+
+ d = divisor >> shift;
+ dividend >>= shift;
+ } else
+ d = divisor;
+
+ return div_u64(dividend, d);
+}
+
+static u64 muldiv64(u64 a, u32 b, u32 c)
+{
+ union {
+ u64 ll;
+ struct {
+ u32 low, high;
+ } l;
+ } u, res;
+ u64 rl, rh;
+
+ u.ll = a;
+ rl = (u64)u.l.low * (u64)b;
+ rh = (u64)u.l.high * (u64)b;
+ rh += (rl >> 32);
+ res.l.high = div64_u64(rh, c);
+ res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
+ return res.ll;
+}
+
+
+u64 rdtsc(void)
+{
+ unsigned a, d;
+
+ asm volatile("rdtsc" : "=a"(a), "=d"(d));
+ return a | (u64)d << 32;
+}
+
+void wrtsc(u64 tsc)
+{
+ unsigned a = tsc, d = tsc >> 32;
+
+ asm volatile("wrmsr" : : "a"(a), "d"(d), "c"(0x10));
+}
+
+void nsdelay(u64 ns)
+{
+ u64 entry = cyc2ns(rdtsc());
+
+ do {
+ __asm__ volatile ("nop");
+ } while (cyc2ns(rdtsc()) - entry < ns);
+}
+
+struct clocksource {
+ char *name;
+ int (*init) (void);
+ u64 (*read) (void);
+ u64 freq;
+};
+
+/* return count in nanoseconds */
+u64 clocksource_read(struct clocksource *clock)
+{
+ u64 val = clock->read();
+
+ val = muldiv64(val, NS_FREQ, clock->freq);
+
+ return val;
+}
+
+struct freq_divisor {
+ unsigned int divisor;
+ unsigned int program_value;
+};
+
+enum clockevt_type { CLOCKEVT_PERIODIC, CLOCKEVT_ONESHOT, };
+
+struct clockevent {
+ char *name;
+ u64 (*init) (int vec);
+ int (*arm) (u64 count, u64 divisor, enum clockevt_type);
+ void (*cancel)(void);
+ u64 (*remain) (void);
+
+ u64 max_count;
+ u64 freq;
+ struct freq_divisor *divisors;
+
+ unsigned vec;
+};
+
+void clock_arm(struct clockevent *clockevt, enum clockevt_type type,
+ u64 period, u64 divisor)
+{
+ u64 count = (period * clockevt->freq) / NS_FREQ;
+
+ if (count > clockevt->max_count)
+ printf("ERROR clock_arm: %s invalid count %ld (max_count=%ld)\n",
+ clockevt->name, period, clockevt->max_count);
+
+ clockevt->arm(count, divisor, type);
+}
+
+void clock_arm_raw(struct clockevent *clockevt, enum clockevt_type type,
+ u64 count, u64 divisor)
+{
+ if (count > clockevt->max_count)
+ printf("ERROR clock_arm: %s invalid count %ld (max_count=%ld)\n",
+ clockevt->name, count, clockevt->max_count);
+
+ clockevt->arm(count, divisor, type);
+}
+
+
+/* -------- TSC clocksource ------------- */
+
+int tsc_init(void) { printf("%s\n", __func__); return 0; }
+u64 tsc_read(void) { return rdtsc(); }
+
+struct clocksource tsc = {
+ .name = "tsc",
+ .init = tsc_init,
+ .read = tsc_read,
+};
+
+/* --------- ACPI clocksource ----------- */
+
+#define ACPI_PORT 0xb008
+#define ACPI_FREQ 3579545
+
+u64 acpi_time_base;
+
+static void acpi_ovf(void *irq_priv)
+{
+ static int flip = 1;
+
+ outw(1, 0xb000);
+ if (flip)
+ acpi_time_base += 0xffffff;
+ flip ^= 1;
+}
+
+int acpi_init(void)
+{
+ outb(0xf1, 0xb2);
+ outw(1, 0xb002);
+
+ /* wait to synchronize flip above with bit 23 going 1->0 */
+ while (inl(ACPI_PORT) > 0x100);
+
+ set_irq_handler(TIMER_VEC_BASE+3, acpi_ovf, 0);
+ set_ioapic_redir(9, TIMER_VEC_BASE+3, 1);
+
+ return 0;
+}
+
+u64 acpi_read(void)
+{
+ u64 val;
+
+ irq_disable();
+ val = acpi_time_base + inl(ACPI_PORT);
+ irq_enable();
+
+ return val;
+}
+
+struct clocksource acpi = {
+ .name = "acpi",
+ .init = acpi_init,
+ .read = acpi_read,
+ .freq = ACPI_FREQ,
+};
+
+struct clocksource *clocksources[] = { &tsc, &acpi, };
+
+/* --------- HPET clockevent ---------- */
+
+#define HPET_CFG 0x010
+#define HPET_COUNTER 0x0f0
+
+#define HPET_Tn_CFG(n) (0x100 + 0x20 * n)
+#define HPET_Tn_CMP(n) (0x108 + 0x20 * n)
+
+static void *hpet_addr = (void *)0xfed00000;
+
+static unsigned long hpet_readl(unsigned reg)
+{
+ return *(volatile unsigned long *)(hpet_addr + reg);
+}
+
+static void hpet_writel(unsigned reg, unsigned long val)
+{
+ *(volatile unsigned long *)(hpet_addr + reg) = val;
+}
+
+struct freq_divisor hpet_divisors[] = {
+ {1, 1},
+ {-1, -1},
+};
+
+int hpet_vec;
+
+u64 hpet_timer_init(int vec)
+{
+ u64 val;
+
+ hpet_writel(HPET_CFG, hpet_readl(HPET_CFG)|0x3);
+
+ val = hpet_readl(HPET_COUNTER);
+ sdelay(1);
+ val = hpet_readl(HPET_COUNTER) - val;
+ printf("%s: detected %lld Hz timer\n", __func__, val);
+ hpet_vec = vec;
+
+ return val;
+}
+
+int hpet_timer_arm(u64 period, u64 divisor, enum clockevt_type type)
+{
+ u64 cfg = 0x4;
+
+ set_ioapic_redir(8, hpet_vec, 0);
+ if (type == CLOCKEVT_PERIODIC)
+ cfg |= 0x8;
+
+ hpet_writel(HPET_Tn_CFG(1), cfg);
+ hpet_writel(HPET_COUNTER, 0);
+ hpet_writel(HPET_CFG, hpet_readl(HPET_CFG)|0x3); /* enable HPET */
+ hpet_writel(HPET_Tn_CMP(1), period);
+
+ return 0;
+}
+
+void hpet_timer_cancel(void)
+{
+ hpet_writel(HPET_Tn_CFG(1), 0);
+ hpet_writel(HPET_CFG, hpet_readl(HPET_CFG) & ~(0x3));
+}
+
+u64 hpet_timer_remain(void)
+{
+ return hpet_readl(HPET_Tn_CMP(1)) - hpet_readl(HPET_COUNTER);
+}
+
+struct clockevent hpet_timer = {
+ .name = "hpet",
+ .init = hpet_timer_init,
+ .arm = hpet_timer_arm,
+ .cancel = hpet_timer_cancel,
+ .remain = hpet_timer_remain,
+ .max_count = 0xffffffff,
+ .divisors = hpet_divisors,
+};
+
+/* --------- LAPIC clockevent ---------- */
+
+static void dummy(void *irq_priv)
+{
+}
+
+struct freq_divisor lapic_divisors[] = {
+ {1, 0xB},
+ {2, 0x0},
+ {4, 0x1},
+ {8, 0x2},
+ {16, 0x3},
+ {32, 0x8},
+ {64, 0x9},
+ {128, 0xA},
+ {-1, -1},
+};
+
+u64 lapic_timer_init(int vec)
+{
+ u64 hz;
+
+ set_irq_handler(vec, dummy, 0);
+ apic_write(APIC_LVTT, vec);
+ apic_write(APIC_TDCR, 0xB); /* divide by 1 */
+ apic_write(APIC_TMICT, 0xffffffff);
+ sdelay(1);
+ hz = 0xffffffff - apic_read(APIC_TMCCT);
+ printf("%s: detected %d Hz timer\n", __func__, hz);
+ return hz;
+}
+
+int lapic_timer_arm(u64 period, u64 divisor, enum clockevt_type type)
+{
+ if (type == CLOCKEVT_PERIODIC)
+ apic_write(APIC_LVTT, apic_read(APIC_LVTT) | 1 << 17);
+ /* divide count */
+ apic_write(APIC_TDCR, divisor);
+ /* initial count */
+ apic_write(APIC_TMICT, period);
+ return 0;
+}
+
+void lapic_timer_cancel(void)
+{
+ apic_write(APIC_LVTT, apic_read(APIC_LVTT) & ~(1 << 17)); /* one-shot */
+ apic_write(APIC_TMICT, 0);
+}
+
+u64 lapic_timer_remain(void)
+{
+ return apic_read(APIC_TMCCT);
+}
+
+struct clockevent lapic_timer = {
+ .name = "lapic",
+ .init = lapic_timer_init,
+ .arm = lapic_timer_arm,
+ .cancel = lapic_timer_cancel,
+ .remain = lapic_timer_remain,
+ .max_count = 0xffffffff,
+ .divisors = lapic_divisors,
+};
+
+/* ---------- PIT clockevent --------- */
+
+#define PIT_FREQ 1193181
+#define PIT_CNT_0 0x40
+#define PIT_CNT_1 0x41
+#define PIT_CNT_2 0x42
+#define PIT_TCW 0x43
+
+u64 pit_timer_remain(void)
+{
+ outb(0xf0, PIT_TCW);
+ return inb(PIT_CNT_0) | inb(PIT_CNT_0) << 8;
+}
+
+u64 pit_timer_init(int vec)
+{
+ set_ioapic_redir(2, vec, 0);
+ /* mask LINT0, int is coming through IO-APIC */
+ apic_write(APIC_LVT0, 1 << 16);
+ return PIT_FREQ;
+}
+
+int pit_timer_arm(u64 period, u64 divisor, enum clockevt_type type)
+{
+ unsigned char ctrl_word = 0x30;
+
+ hpet_writel(HPET_CFG, hpet_readl(HPET_CFG) & ~(0x3)); /* disable HPET */
+ if (type == CLOCKEVT_PERIODIC)
+ ctrl_word |= 0x4;
+ outb(ctrl_word, PIT_TCW);
+ outb(period & 0xff, PIT_CNT_0);
+ outb((period & 0xff00) >> 8, PIT_CNT_0);
+ return 0;
+}
+
+void pit_timer_cancel(void)
+{
+ unsigned char ctrl_word = 0x30;
+ outb(ctrl_word, PIT_TCW);
+ outb(0, PIT_CNT_0);
+ outb(0, PIT_CNT_0);
+}
+
+struct freq_divisor pit_divisors[] = {
+ {1, 1},
+ {-1, -1},
+};
+
+struct clockevent pit_timer = {
+ .name = "pit",
+ .init = pit_timer_init,
+ .arm = pit_timer_arm,
+ .cancel = pit_timer_cancel,
+ .remain = pit_timer_remain,
+ .divisors = pit_divisors,
+ .max_count = 0xffff,
+};
+
+
+#define NR_CLOCKEVENTS 3
+
+/* clockevent initialization */
+struct clockevent *clockevents[NR_CLOCKEVENTS] = {
+ &pit_timer, &lapic_timer, &hpet_timer,
+};
+
+decl_irq_handler(0) /* PIT */
+decl_irq_handler(1) /* LAPIC */
+decl_irq_handler(2) /* HPET */
+decl_irq_handler(3) /* ACPI OVF */
+
+static struct int_table int_handlers[NR_CLOCKEVENTS+1] = {
+ { .func = timer_int_0 },
+ { .func = timer_int_1 },
+ { .func = timer_int_2 },
+ { .func = timer_int_3 },
+};
+
+void set_irq_handler(int vec, void (*func)(void *irq_priv), void *irq_priv)
+{
+ int int_table_idx = vec - TIMER_VEC_BASE;
+
+ if (int_table_idx >= NR_CLOCKEVENTS+1)
+ printf("%s invalid vec\n", __func__);
+
+ int_handlers[int_table_idx].irq_handler = func;
+ int_handlers[int_table_idx].irq_priv = irq_priv;
+}
+
+void init_interrupts(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CLOCKEVENTS+1; i++) {
+ int vec = TIMER_VEC_BASE+i;
+
+ set_idt_entry(vec, int_handlers[i].func);
+ }
+}
+
+int init_clockevents(void)
+{
+ int i;
+ int vec = TIMER_VEC_BASE;
+
+ for (i=0; i < ARRAY_SIZE(clockevents); i++) {
+ u64 freq = clockevents[i]->init(vec);
+ clockevents[i]->freq = freq;
+ clockevents[i]->vec = vec;
+ vec++;
+ }
+ return 0;
+}
+
+void init_clocksources(void)
+{
+ int i;
+
+ for(i=0; i < ARRAY_SIZE(clocksources); i++)
+ clocksources[i]->init();
+}
+
+/* actual tests */
+
+#define TIME_TABLE_SZ 70
+struct time_table {
+ int idx;
+ struct clocksource *source;
+ struct clockevent *event;
+ u64 period;
+ u64 count;
+ u32 freq_divider;
+ unsigned long int val[TIME_TABLE_SZ];
+ unsigned long int remain[TIME_TABLE_SZ];
+};
+
+void time_table_record(struct time_table *t)
+{
+ t->remain[t->idx] = t->event->remain();
+ t->val[t->idx] = clocksource_read(t->source);
+ t->idx++;
+ if (t->idx >= TIME_TABLE_SZ/2)
+ t->idx = 0;
+}
+
+void dump_time_table(struct time_table *t)
+{
+ int i;
+
+ for (i = 1; i < t->idx; i++)
+ printf("i=%d %lld - %lld = %lld (expect %lld) %% %d off\n", i,
+ t->val[i], t->val[i-1], t->val[i] - t->val[i-1], t->period,
+ 100 - ((t->val[i] - t->val[i-1])*100 / t->period));
+}
+
+/* Tunables
+ *
+ * pct_delta_threshold: percentage of correctness before declaring failure
+ * pct_remain_threshold: "" for remain (read from clockevent on interrupt)
+ * nr_periods_per_clock: test periodic clocks in the
+ * <0...max_count>/nr_periods_per_clock range.
+ *
+ */
+
+int pct_delta_threshold = 10;
+int pct_remain_threshold = 20;
+int nr_periods_per_clock = 4;
+
+void inspect_table(struct time_table *t)
+{
+ int i;
+ u64 percent_avg = 0;
+ int dump_table = 0;
+
+ if (t->idx <= 1 || t->period == 0) {
+ printf("failure\n");
+ return;
+ }
+
+ /* first entry can be little of if programming is slow (eg hpet) */
+ for (i = 2; i < t->idx; i++) {
+ u64 fire_period = t->val[i] - t->val[i-1];
+ u64 percent_off = (fire_period*100) / t->period;
+
+ percent_avg += percent_off;
+ if (percent_off < 100-pct_delta_threshold ||
+ percent_off > 100+pct_delta_threshold)
+ dump_table = 1;
+ }
+
+ percent_avg /= t->idx-1;
+
+ if (dump_table) {
+ printf("%s vs %s (period=%lld divider=%d) ", t->source->name,
+ t->event->name, t->period, t->freq_divider);
+ printf("correctness = %d %% off\n", percent_avg-100);
+ dump_time_table(t);
+ }
+}
+
+void inspect_table_remain(struct time_table *t)
+{
+ int i;
+ int dump_table = 0;
+
+ for (i = 2; i < t->idx; i++) {
+ u64 percent_off = (t->remain[i]*100) / t->count;
+ if (percent_off < 100-pct_remain_threshold ||
+ percent_off > 100+pct_remain_threshold)
+ dump_table = 1;
+ }
+
+ if (dump_table) {
+ printf("%s vs %s (period=%lld divider=%d)\n", t->source->name,
+ t->event->name, t->period, t->freq_divider);
+ for (i = 0; i < t->idx; i++)
+ printf("remain i=%d %lld (expect %lld) %d %% off\n", i,
+ t->remain[i], t->count,
+ ((t->remain[i]*100) / t->count)-100);
+ }
+}
+
+static void timer_int_record(void *irq_priv)
+{
+ time_table_record(irq_priv);
+}
+
+#define NR_TABLE_RECORD 30
+
+void __test_periodic_one_clock(struct clockevent *clockevt,
+ struct clocksource *source, u64 count,
+ u64 freq_divider, u64 freq_divider_value)
+{
+ u64 period;
+ struct time_table *t = vmalloc(sizeof(struct time_table));
+
+ period = muldiv64(count, NS_FREQ, clockevt->freq);
+
+ t->idx = 0;
+ t->period = period;
+ t->count = count / freq_divider;
+ t->source = source;
+ t->event = clockevt;
+ t->freq_divider = freq_divider;
+
+ clockevt->cancel();
+ set_irq_handler(clockevt->vec, timer_int_record, t);
+
+ clock_arm_raw(clockevt, CLOCKEVT_PERIODIC, count/freq_divider, freq_divider_value);
+
+ while (t->idx <= NR_TABLE_RECORD)
+ hlt();
+
+ clockevt->cancel();
+ inspect_table(t);
+ inspect_table_remain(t);
+ vfree(t);
+}
+
+void test_periodic_one_clock(struct clockevent *clockevt,
+ struct clocksource *source, u64 count)
+{
+ struct freq_divisor *entry = clockevt->divisors;
+
+ while (entry && entry->divisor != -1) {
+ if (count < entry->divisor)
+ break;
+ __test_periodic_one_clock(clockevt, source, count, entry->divisor,
+ entry->program_value);
+ entry++;
+ }
+}
+
+void test_periodic_one_clock2(struct clockevent *clockevt,
+ struct clocksource *source, u64 count)
+{
+ struct freq_divisor *entry = clockevt->divisors;
+
+ while (entry && entry->divisor != -1) {
+ __test_periodic_one_clock(clockevt, source, count*entry->divisor,
+ entry->divisor, entry->program_value);
+ entry++;
+ }
+}
+
+void test_periodic_events(void)
+{
+ int i, x;
+
+ for (x = 0; x < ARRAY_SIZE(clocksources); x++) {
+ struct clocksource *clocksource = clocksources[x];
+
+ for (i = 0; i < ARRAY_SIZE(clockevents); i++) {
+ struct clockevent *clockevt = clockevents[i];
+ u64 count, inc;
+
+ printf("clockevent = %s clocksource = %s\n", clockevt->name,
+ clocksource->name);
+
+ inc = clockevt->max_count / nr_periods_per_clock;
+ for (count = inc; count < clockevt->max_count;
+ count += inc)
+ test_periodic_one_clock(clockevt, clocksource, count);
+
+ test_periodic_one_clock(clockevt, clocksource, 10);
+ test_periodic_one_clock2(clockevt, clocksource,
+ clockevt->max_count);
+ }
+ }
+}
+
+u64 static_periods[] = { 1, 2, 5, 10, 20};
+
+void test_periodic_events_short(void)
+{
+ int i, x, n;
+
+ for (x = 0; x < ARRAY_SIZE(clocksources); x++) {
+ struct clocksource *clocksource = clocksources[x];
+
+ for (i = 0; i < ARRAY_SIZE(clockevents); i++) {
+ struct clockevent *clockevt = clockevents[i];
+
+
+ for (n = 0; n < ARRAY_SIZE(static_periods); n++) {
+ printf("clockevent = %s clocksource = %s period = %dms\n",
+ clockevt->name, clocksource->name, static_periods[n]);
+ test_periodic_one_clock(clockevt, clocksource,
+ muldiv64(ms_to_ns(static_periods[n]),
+ clockevt->freq, NS_FREQ));
+ }
+ }
+ }
+}
+
+/* early calibration with PIT to detect TSC frequency, which is necessary
+ * to find lapic frequency.
+ */
+volatile int timer_isr;
+static void timer_int_handler(void *irq_priv)
+{
+ timer_isr++;
+}
+
+void early_calibrate_cpu_hz(void)
+{
+ u64 t1, t2;
+ int ints_per_sec = (PIT_FREQ/0xffff)+1;
+
+ timer_isr = 0;
+
+ pit_timer.arm(0xffff, 1, CLOCKEVT_PERIODIC);
+ t1 = rdtsc();
+ do {
+ hlt();
+ } while (timer_isr < ints_per_sec);
+ t2 = rdtsc();
+ cpu_hz = t2 - t1;
+ printf("detected %lld MHz cpu\n", cpu_hz/1000/1000);
+ tsc.freq = cpu_hz;
+}
+
+void early_calibrate(void)
+{
+ pit_timer.init(TIMER_VEC_BASE);
+ set_irq_handler(TIMER_VEC_BASE, timer_int_handler, 0);
+ early_calibrate_cpu_hz();
+}
+
+int main()
+{
+ setup_vm();
+ init_interrupts();
+
+ init_idt();
+
+ test_self_ipi();
+
+ early_calibrate();
+ init_clocksources();
+ init_clockevents();
+
+ test_periodic_events_short();
+ // full test is slow since it uses maximum count and dividers
+ // test_periodic_events();
+
+ return g_fail != 0;
+}
+