Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 | /* * Copyright (c) 2019 Synopsys. * * SPDX-License-Identifier: Apache-2.0 */ /** * @file * @brief codes required for ARC multicore and Zephyr smp support * */ #include <device.h> #include <kernel.h> #include <kernel_structs.h> #include <ksched.h> #include <soc.h> #include <init.h> #ifndef IRQ_ICI #define IRQ_ICI 19 #endif #define ARCV2_ICI_IRQ_PRIORITY 1 volatile struct { arch_cpustart_t fn; void *arg; } arc_cpu_init[CONFIG_MP_NUM_CPUS]; /* * arc_cpu_wake_flag is used to sync up master core and slave cores * Slave core will spin for arc_cpu_wake_flag until master core sets * it to the core id of slave core. Then, slave core clears it to notify * master core that it's waken * */ volatile uint32_t arc_cpu_wake_flag; volatile char *arc_cpu_sp; /* * _curr_cpu is used to record the struct of _cpu_t of each cpu. * for efficient usage in assembly */ volatile _cpu_t *_curr_cpu[CONFIG_MP_NUM_CPUS]; /* Called from Zephyr initialization */ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_t fn, void *arg) { _curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]); arc_cpu_init[cpu_num].fn = fn; arc_cpu_init[cpu_num].arg = arg; /* set the initial sp of target sp through arc_cpu_sp * arc_cpu_wake_flag will protect arc_cpu_sp that * only one slave cpu can read it per time */ arc_cpu_sp = Z_KERNEL_STACK_BUFFER(stack) + sz; arc_cpu_wake_flag = cpu_num; /* wait slave cpu to start */ while (arc_cpu_wake_flag != 0U) { ; } } #ifdef CONFIG_SMP static void arc_connect_debug_mask_update(int cpu_num) { uint32_t core_mask = 1 << cpu_num; core_mask |= z_arc_connect_debug_select_read(); z_arc_connect_debug_select_set(core_mask); /* Debugger halts cores at all conditions: * ARC_CONNECT_CMD_DEBUG_MASK_H: Core global halt. * ARC_CONNECT_CMD_DEBUG_MASK_AH: Actionpoint halt. * ARC_CONNECT_CMD_DEBUG_MASK_BH: Software breakpoint halt. * ARC_CONNECT_CMD_DEBUG_MASK_SH: Self halt. */ z_arc_connect_debug_mask_set(core_mask, (ARC_CONNECT_CMD_DEBUG_MASK_SH | ARC_CONNECT_CMD_DEBUG_MASK_BH | ARC_CONNECT_CMD_DEBUG_MASK_AH | ARC_CONNECT_CMD_DEBUG_MASK_H)); } #endif /* the C entry of slave cores */ void z_arc_slave_start(int cpu_num) { arch_cpustart_t fn; #ifdef CONFIG_SMP struct arc_connect_bcr bcr; bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR); if (bcr.dbg) { /* configure inter-core debug unit if available */ arc_connect_debug_mask_update(cpu_num); } z_irq_setup(); z_arc_connect_ici_clear(); z_irq_priority_set(IRQ_ICI, ARCV2_ICI_IRQ_PRIORITY, 0); irq_enable(IRQ_ICI); #endif /* call the function set by arch_start_cpu */ fn = arc_cpu_init[cpu_num].fn; fn(arc_cpu_init[cpu_num].arg); } #ifdef CONFIG_SMP static void sched_ipi_handler(const void *unused) { ARG_UNUSED(unused); z_arc_connect_ici_clear(); z_sched_ipi(); } /* arch implementation of sched_ipi */ void arch_sched_ipi(void) { uint32_t i; /* broadcast sched_ipi request to other cores * if the target is current core, hardware will ignore it */ for (i = 0U; i < CONFIG_MP_NUM_CPUS; i++) { z_arc_connect_ici_generate(i); } } static int arc_smp_init(const struct device *dev) { ARG_UNUSED(dev); struct arc_connect_bcr bcr; /* necessary master core init */ _curr_cpu[0] = &(_kernel.cpus[0]); bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR); if (bcr.dbg) { /* configure inter-core debug unit if available */ arc_connect_debug_mask_update(0); } if (bcr.ipi) { /* register ici interrupt, just need master core to register once */ z_arc_connect_ici_clear(); IRQ_CONNECT(IRQ_ICI, ARCV2_ICI_IRQ_PRIORITY, sched_ipi_handler, NULL, 0); irq_enable(IRQ_ICI); } else { __ASSERT(0, "ARC connect has no inter-core interrupt\n"); return -ENODEV; } if (bcr.gfrc) { /* global free running count init */ z_arc_connect_gfrc_enable(); /* when all cores halt, gfrc halt */ z_arc_connect_gfrc_core_set((1 << CONFIG_MP_NUM_CPUS) - 1); z_arc_connect_gfrc_clear(); } else { __ASSERT(0, "ARC connect has no global free running counter\n"); return -ENODEV; } return 0; } SYS_INIT(arc_smp_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); #endif |