Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 | /* * Copyright (c) 2018 Intel Corporation * * SPDX-License-Identifier: Apache-2.0 */ #include <stdlib.h> #include <zephyr.h> #include <init.h> #include <errno.h> #include <sys/math_extras.h> #include <string.h> #include <app_memory/app_memdomain.h> #include <sys/mutex.h> #include <sys/sys_heap.h> #include <zephyr/types.h> #define LOG_LEVEL CONFIG_KERNEL_LOG_LEVEL #include <logging/log.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); #ifdef CONFIG_MINIMAL_LIBC_MALLOC #if (CONFIG_MINIMAL_LIBC_MALLOC_ARENA_SIZE > 0) #ifdef CONFIG_USERSPACE K_APPMEM_PARTITION_DEFINE(z_malloc_partition); #define POOL_SECTION K_APP_DMEM_SECTION(z_malloc_partition) #else #define POOL_SECTION .data #endif /* CONFIG_USERSPACE */ #define HEAP_BYTES CONFIG_MINIMAL_LIBC_MALLOC_ARENA_SIZE Z_GENERIC_SECTION(POOL_SECTION) static struct sys_heap z_malloc_heap; Z_GENERIC_SECTION(POOL_SECTION) struct sys_mutex z_malloc_heap_mutex; Z_GENERIC_SECTION(POOL_SECTION) static char z_malloc_heap_mem[HEAP_BYTES]; void *malloc(size_t size) { int lock_ret; lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER); __ASSERT_NO_MSG(lock_ret == 0); void *ret = sys_heap_aligned_alloc(&z_malloc_heap, __alignof__(z_max_align_t), size); if (ret == NULL && size != 0) { errno = ENOMEM; } (void) sys_mutex_unlock(&z_malloc_heap_mutex); return ret; } static int malloc_prepare(const struct device *unused) { ARG_UNUSED(unused); sys_heap_init(&z_malloc_heap, z_malloc_heap_mem, HEAP_BYTES); sys_mutex_init(&z_malloc_heap_mutex); return 0; } void *realloc(void *ptr, size_t requested_size) { int lock_ret; lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER); __ASSERT_NO_MSG(lock_ret == 0); void *ret = sys_heap_aligned_realloc(&z_malloc_heap, ptr, __alignof__(z_max_align_t), requested_size); if (ret == NULL && requested_size != 0) { errno = ENOMEM; } (void) sys_mutex_unlock(&z_malloc_heap_mutex); return ret; } void free(void *ptr) { int lock_ret; lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER); __ASSERT_NO_MSG(lock_ret == 0); sys_heap_free(&z_malloc_heap, ptr); (void) sys_mutex_unlock(&z_malloc_heap_mutex); } SYS_INIT(malloc_prepare, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); #else /* No malloc arena */ void *malloc(size_t size) { ARG_UNUSED(size); LOG_ERR("CONFIG_MINIMAL_LIBC_MALLOC_ARENA_SIZE is 0"); errno = ENOMEM; return NULL; } void free(void *ptr) { ARG_UNUSED(ptr); } void *realloc(void *ptr, size_t size) { ARG_UNUSED(ptr); return malloc(size); } #endif #endif /* CONFIG_MINIMAL_LIBC_MALLOC */ #ifdef CONFIG_MINIMAL_LIBC_CALLOC void *calloc(size_t nmemb, size_t size) { void *ret; if (size_mul_overflow(nmemb, size, &size)) { errno = ENOMEM; return NULL; } ret = malloc(size); if (ret != NULL) { (void)memset(ret, 0, size); } return ret; } #endif /* CONFIG_MINIMAL_LIBC_CALLOC */ #ifdef CONFIG_MINIMAL_LIBC_REALLOCARRAY void *reallocarray(void *ptr, size_t nmemb, size_t size) { #if (CONFIG_MINIMAL_LIBC_MALLOC_ARENA_SIZE > 0) if (size_mul_overflow(nmemb, size, &size)) { errno = ENOMEM; return NULL; } return realloc(ptr, size); #else return NULL; #endif } #endif /* CONFIG_MINIMAL_LIBC_REALLOCARRAY */ |