Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 | /* SPDX-License-Identifier: Apache-2.0 */ #if defined(CONFIG_GEN_SW_ISR_TABLE) && defined(CONFIG_DYNAMIC_INTERRUPTS) SECTION_DATA_PROLOGUE(sw_isr_table,,) { /* * Some arch requires an entry to be aligned to arch * specific boundary for using double word load * instruction. See include/sw_isr_table.h. */ . = ALIGN(CONFIG_ARCH_SW_ISR_TABLE_ALIGN); *(_SW_ISR_TABLE_SECTION_SYMS) } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) #endif /* * Space for storing per device init status and busy bitmap in case PM is * enabled. Since we do not know beforehand the number of devices, * we go through the below mechanism to allocate the required space. * Both are made of 1 bit per-device instance, so we compute the size of * of an entire bitfield, aligned on 32bits. */ #define DEVICE_COUNT \ ((__device_end - __device_start) / _DEVICE_STRUCT_SIZEOF) #define DEVICE_BITFIELD_SIZE (((DEVICE_COUNT + 31) / 32) * 4) #define DEVICE_INIT_STATUS_BITFIELD() \ FILL(0x00); \ __device_init_status_start = .; \ . = . + DEVICE_BITFIELD_SIZE; \ __device_init_status_end = .; #ifdef CONFIG_PM_DEVICE #define DEVICE_BUSY_BITFIELD() \ FILL(0x00); \ __device_busy_start = .; \ . = . + DEVICE_BITFIELD_SIZE; \ __device_busy_end = .; #else #define DEVICE_BUSY_BITFIELD() #endif SECTION_DATA_PROLOGUE(devices,,) { /* link in devices objects, which are tied to the init ones; * the objects are thus sorted the same way as their init * object parent see include/device.h */ __device_start = .; CREATE_OBJ_LEVEL(device, PRE_KERNEL_1) CREATE_OBJ_LEVEL(device, PRE_KERNEL_2) CREATE_OBJ_LEVEL(device, POST_KERNEL) CREATE_OBJ_LEVEL(device, APPLICATION) CREATE_OBJ_LEVEL(device, SMP) __device_end = .; DEVICE_INIT_STATUS_BITFIELD() DEVICE_BUSY_BITFIELD() } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) SECTION_DATA_PROLOGUE(initshell,,) { /* link in shell initialization objects for all modules that * use shell and their shell commands are automatically * initialized by the kernel. */ __shell_module_start = .; KEEP(*(".shell_module_*")); __shell_module_end = .; __shell_cmd_start = .; KEEP(*(".shell_cmd_*")); __shell_cmd_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) SECTION_DATA_PROLOGUE(log_dynamic_sections,,) { __log_dynamic_start = .; KEEP(*(SORT(.log_dynamic_*))); __log_dynamic_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) Z_ITERABLE_SECTION_RAM(_static_thread_data, 4) #ifdef CONFIG_USERSPACE /* All kernel objects within are assumed to be either completely * initialized at build time, or initialized automatically at runtime * via iteration before the POST_KERNEL phase. * * These two symbols only used by gen_kobject_list.py */ _static_kernel_objects_begin = .; #endif /* CONFIG_USERSPACE */ Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_timer, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_mem_slab, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_mem_pool, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_heap, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_mutex, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_stack, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_msgq, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_mbox, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_pipe, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_sem, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_queue, 4) Z_ITERABLE_SECTION_RAM_GC_ALLOWED(k_condvar, 4) SECTION_DATA_PROLOGUE(_net_buf_pool_area,,SUBALIGN(4)) { _net_buf_pool_list = .; KEEP(*(SORT_BY_NAME("._net_buf_pool.static.*"))) } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) #if defined(CONFIG_NETWORKING) Z_ITERABLE_SECTION_RAM(net_if, 4) Z_ITERABLE_SECTION_RAM(net_if_dev, 4) Z_ITERABLE_SECTION_RAM(net_l2, 4) #endif /* NETWORKING */ #if defined(CONFIG_UART_MUX) SECTION_DATA_PROLOGUE(uart_mux,,SUBALIGN(4)) { __uart_mux_start = .; *(".uart_mux.*") KEEP(*(SORT_BY_NAME(".uart_mux.*"))) __uart_mux_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) #endif #if defined(CONFIG_USB_DEVICE_STACK) SECTION_DATA_PROLOGUE(usb_descriptor,,SUBALIGN(1)) { __usb_descriptor_start = .; *(".usb.descriptor") KEEP(*(SORT_BY_NAME(".usb.descriptor*"))) __usb_descriptor_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) SECTION_DATA_PROLOGUE(usb_data,,SUBALIGN(1)) { __usb_data_start = .; *(".usb.data") KEEP(*(SORT_BY_NAME(".usb.data*"))) __usb_data_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) #endif /* CONFIG_USB_DEVICE_STACK */ #if defined(CONFIG_USB_DEVICE_BOS) SECTION_DATA_PROLOGUE(usb_bos_desc,,SUBALIGN(1)) { __usb_bos_desc_start = .; *(".usb.bos_desc") KEEP(*(SORT_BY_NAME(".usb.bos_desc*"))) __usb_bos_desc_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) #endif /* CONFIG_USB_DEVICE_BOS */ #ifdef CONFIG_USERSPACE _static_kernel_objects_end = .; #endif |