Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | /* * Copyright (c) 2021 Intel Corporation * SPDX-License-Identifier: Apache-2.0 */ #include <drivers/timer/system_timer.h> #include <sys_clock.h> #include <spinlock.h> #include <drivers/interrupt_controller/loapic.h> #define IA32_TSC_DEADLINE_MSR 0x6e0 #define IA32_TSC_ADJUST_MSR 0x03b #define CYC_PER_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \ / (uint64_t) CONFIG_SYS_CLOCK_TICKS_PER_SEC) struct apic_timer_lvt { uint8_t vector : 8; uint8_t unused0 : 8; uint8_t masked : 1; enum { ONE_SHOT, PERIODIC, TSC_DEADLINE } mode: 2; uint32_t unused2 : 13; }; static struct k_spinlock lock; static uint64_t last_announce; static union { uint32_t val; struct apic_timer_lvt lvt; } lvt_reg; static ALWAYS_INLINE uint64_t rdtsc(void) { uint32_t hi, lo; __asm__ volatile("rdtsc" : "=d"(hi), "=a"(lo)); return lo + (((uint64_t)hi) << 32); } static void isr(const void *arg) { ARG_UNUSED(arg); k_spinlock_key_t key = k_spin_lock(&lock); uint32_t ticks = (rdtsc() - last_announce) / CYC_PER_TICK; last_announce += ticks * CYC_PER_TICK; k_spin_unlock(&lock, key); sys_clock_announce(ticks); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { sys_clock_set_timeout(1, false); } } static inline void wrmsr(int32_t msr, uint64_t val) { uint32_t hi = (uint32_t) (val >> 32); uint32_t lo = (uint32_t) val; __asm__ volatile("wrmsr" :: "d"(hi), "a"(lo), "c"(msr)); } void sys_clock_set_timeout(int32_t ticks, bool idle) { ARG_UNUSED(idle); uint64_t now = rdtsc(); k_spinlock_key_t key = k_spin_lock(&lock); uint64_t expires = now + MAX(ticks - 1, 0) * CYC_PER_TICK; expires = last_announce + (((expires - last_announce + CYC_PER_TICK - 1) / CYC_PER_TICK) * CYC_PER_TICK); /* The second condition is to catch the wraparound. * Interpreted strictly, the IA SDM description of the * TSC_DEADLINE MSR implies that it will trigger an immediate * interrupt if we try to set an expiration across the 64 bit * rollover. Unfortunately there's no way to test that as on * real hardware it requires more than a century of uptime, * but this is cheap and safe. */ if (ticks == K_TICKS_FOREVER || expires < last_announce) { expires = UINT64_MAX; } wrmsr(IA32_TSC_DEADLINE_MSR, expires); k_spin_unlock(&lock, key); } uint32_t sys_clock_elapsed(void) { k_spinlock_key_t key = k_spin_lock(&lock); uint32_t ret = (rdtsc() - last_announce) / CYC_PER_TICK; k_spin_unlock(&lock, key); return ret; } uint32_t sys_clock_cycle_get_32(void) { return (uint32_t) rdtsc(); } static inline uint32_t timer_irq(void) { /* The Zephyr APIC API is... idiosyncratic. The timer is a * "local vector table" interrupt. These aren't system IRQs * presented to the IO-APIC, they're indices into a register * array in the local APIC. By Zephyr convention they come * after all the external IO-APIC interrupts, but that number * changes depending on device configuration so we have to * fetch it at runtime. The timer happens to be the first * entry in the table. */ return z_loapic_irq_base(); } /* The TSC_ADJUST MSR implements a synchronized offset such that * multiple CPUs (within a socket, anyway) can synchronize exactly, or * implement managed timing spaces for guests in a recoverable way, * etc... We set it to zero on all cores for simplicity, because * firmware often leaves it in an inconsistent state between cores. */ static void clear_tsc_adjust(void) { /* But don't touch it on ACRN, where an hypervisor bug * confuses the APIC emulation and deadline interrupts don't * arrive. */ #ifndef CONFIG_BOARD_ACRN wrmsr(IA32_TSC_ADJUST_MSR, 0); #endif } void smp_timer_init(void) { /* Copy the LVT configuration from CPU0, because IRQ_CONNECT() * doesn't know how to manage LVT interrupts for anything * other than the calling/initial CPU. Same fence needed to * prevent later MSR writes from reordering before the APIC * configuration write. */ x86_write_loapic(LOAPIC_TIMER, lvt_reg.val); __asm__ volatile("mfence" ::: "memory"); clear_tsc_adjust(); irq_enable(timer_irq()); } static inline void cpuid(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { __asm__ volatile("cpuid" : "=b"(*ebx), "=c"(*ecx), "=d"(*edx) : "a"(*eax), "c"(*ecx)); } int sys_clock_driver_init(const struct device *dev) { #ifdef CONFIG_ASSERT uint32_t eax, ebx, ecx, edx; eax = 1; ecx = 0; cpuid(&eax, &ebx, &ecx, &edx); __ASSERT((ecx & BIT(24)) != 0, "No TSC Deadline support"); eax = 0x80000007; ecx = 0; cpuid(&eax, &ebx, &ecx, &edx); __ASSERT((edx & BIT(8)) != 0, "No Invariant TSC support"); eax = 7; ecx = 0; cpuid(&eax, &ebx, &ecx, &edx); __ASSERT((ebx & BIT(1)) != 0, "No TSC_ADJUST MSR support"); #endif clear_tsc_adjust(); /* Timer interrupt number is runtime-fetched, so can't use * static IRQ_CONNECT() */ irq_connect_dynamic(timer_irq(), CONFIG_APIC_TIMER_IRQ_PRIORITY, isr, 0, 0); lvt_reg.val = x86_read_loapic(LOAPIC_TIMER); lvt_reg.lvt.mode = TSC_DEADLINE; lvt_reg.lvt.masked = 0; x86_write_loapic(LOAPIC_TIMER, lvt_reg.val); /* Per the SDM, the TSC_DEADLINE MSR is not serializing, so * this fence is needed to be sure that an upcoming MSR write * (i.e. a timeout we're about to set) cannot possibly reorder * around the initialization we just did. */ __asm__ volatile("mfence" ::: "memory"); last_announce = rdtsc(); irq_enable(timer_irq()); if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { sys_clock_set_timeout(1, false); } return 0; } |