Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 | /* * Copyright (c) 2016, Wind River Systems, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file * * @brief Public kernel APIs. */ #ifndef _kernel__h_ #define _kernel__h_ #include <stddef.h> #include <stdint.h> #include <toolchain.h> #include <sections.h> #include <atomic.h> #include <errno.h> #include <misc/__assert.h> #include <misc/dlist.h> #include <misc/slist.h> #ifdef __cplusplus extern "C" { #endif #ifdef CONFIG_KERNEL_DEBUG #include <misc/printk.h> #define K_DEBUG(fmt, ...) printk("[%s] " fmt, __func__, ##__VA_ARGS__) #else #define K_DEBUG(fmt, ...) #endif #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x))) #define K_PRIO_PREEMPT(x) (x) #define K_ANY NULL #define K_END NULL #if CONFIG_NUM_COOP_PRIORITIES > 0 #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES) #else #define K_HIGHEST_THREAD_PRIO 0 #endif #if CONFIG_NUM_PREEMPT_PRIORITIES > 0 #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES #else #define K_LOWEST_THREAD_PRIO -1 #endif #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO) #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1) typedef sys_dlist_t _wait_q_t; #ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS #define _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(type) struct type *__next #define _DEBUG_TRACING_KERNEL_OBJECTS_INIT .__next = NULL, #else #define _DEBUG_TRACING_KERNEL_OBJECTS_INIT #define _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(type) #endif #define tcs k_thread struct k_thread; struct k_mutex; struct k_sem; struct k_alert; struct k_msgq; struct k_mbox; struct k_pipe; struct k_fifo; struct k_lifo; struct k_stack; struct k_mem_slab; struct k_mem_pool; struct k_timer; typedef struct k_thread *k_tid_t; enum execution_context_types { K_ISR = 0, K_COOP_THREAD, K_PREEMPT_THREAD, }; /** * @defgroup thread_apis Thread APIs * @ingroup kernel_apis * @{ */ /** * @typedef k_thread_entry_t * @brief Thread entry point function type. * * A thread's entry point function is invoked when the thread starts executing. * Up to 3 argument values can be passed to the function. * * The thread terminates execution permanently if the entry point function * returns. The thread is responsible for releasing any shared resources * it may own (such as mutexes and dynamically allocated memory), prior to * returning. * * @param p1 First argument. * @param p2 Second argument. * @param p3 Third argument. * * @return N/A */ typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3); /** * @brief Spawn a thread. * * This routine initializes a thread, then schedules it for execution. * * The new thread may be scheduled for immediate execution or a delayed start. * If the newly spawned thread does not have a delayed start the kernel * scheduler may preempt the current thread to allow the new thread to * execute. * * Thread options are architecture-specific, and can include K_ESSENTIAL, * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating * them using "|" (the logical OR operator). * * @param stack Pointer to the stack space. * @param stack_size Stack size in bytes. * @param entry Thread entry function. * @param p1 1st entry point parameter. * @param p2 2nd entry point parameter. * @param p3 3rd entry point parameter. * @param prio Thread priority. * @param options Thread options. * @param delay Scheduling delay (in milliseconds), or K_NO_WAIT (for no delay). * * @return ID of new thread. */ extern k_tid_t k_thread_spawn(char *stack, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, int prio, uint32_t options, int32_t delay); /** * @brief Put the current thread to sleep. * * This routine puts the current thread to sleep for @a duration * milliseconds. * * @param duration Number of milliseconds to sleep. * * @return N/A */ extern void k_sleep(int32_t duration); /** * @brief Cause the current thread to busy wait. * * This routine causes the current thread to execute a "do nothing" loop for * @a usec_to_wait microseconds. * * @return N/A */ extern void k_busy_wait(uint32_t usec_to_wait); /** * @brief Yield the current thread. * * This routine causes the current thread to yield execution to another * thread of the same or higher priority. If there are no other ready threads * of the same or higher priority, the routine returns immediately. * * @return N/A */ extern void k_yield(void); /** * @brief Wake up a sleeping thread. * * This routine prematurely wakes up @a thread from sleeping. * * If @a thread is not currently sleeping, the routine has no effect. * * @param thread ID of thread to wake. * * @return N/A */ extern void k_wakeup(k_tid_t thread); /** * @brief Get thread ID of the current thread. * * @return ID of current thread. */ extern k_tid_t k_current_get(void); /** * @brief Cancel thread performing a delayed start. * * This routine prevents @a thread from executing if it has not yet started * execution. The thread must be re-spawned before it will execute. * * @param thread ID of thread to cancel. * * @retval 0 Thread spawning cancelled. * @retval -EINVAL Thread has already started executing. */ extern int k_thread_cancel(k_tid_t thread); /** * @brief Abort a thread. * * This routine permanently stops execution of @a thread. The thread is taken * off all kernel queues it is part of (i.e. the ready queue, the timeout * queue, or a kernel object wait queue). However, any kernel resources the * thread might currently own (such as mutexes or memory blocks) are not * released. It is the responsibility of the caller of this routine to ensure * all necessary cleanup is performed. * * @param thread ID of thread to abort. * * @return N/A */ extern void k_thread_abort(k_tid_t thread); /** * @cond INTERNAL_HIDDEN */ #ifdef CONFIG_SYS_CLOCK_EXISTS #define _THREAD_TIMEOUT_INIT(obj) \ (obj).nano_timeout = { \ .node = { {0}, {0} }, \ .thread = NULL, \ .wait_q = NULL, \ .delta_ticks_from_prev = -1, \ }, #else #define _THREAD_TIMEOUT_INIT(obj) #endif #ifdef CONFIG_ERRNO #define _THREAD_ERRNO_INIT(obj) (obj).errno_var = 0, #else #define _THREAD_ERRNO_INIT(obj) #endif struct _static_thread_data { union { char *init_stack; struct k_thread *thread; }; unsigned int init_stack_size; void (*init_entry)(void *, void *, void *); void *init_p1; void *init_p2; void *init_p3; int init_prio; uint32_t init_options; int32_t init_delay; void (*init_abort)(void); uint32_t init_groups; }; #define _THREAD_INITIALIZER(stack, stack_size, \ entry, p1, p2, p3, \ prio, options, delay, abort, groups) \ { \ .init_stack = (stack), \ .init_stack_size = (stack_size), \ .init_entry = (void (*)(void *, void *, void *))entry, \ .init_p1 = (void *)p1, \ .init_p2 = (void *)p2, \ .init_p3 = (void *)p3, \ .init_prio = (prio), \ .init_options = (options), \ .init_delay = (delay), \ .init_abort = (abort), \ .init_groups = (groups), \ } /** * INTERNAL_HIDDEN @endcond */ /** * @brief Statically define and initialize a thread. * * The thread may be scheduled for immediate execution or a delayed start. * * Thread options are architecture-specific, and can include K_ESSENTIAL, * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating * them using "|" (the logical OR operator). * * The ID of the thread can be accessed using: * * @code extern const k_tid_t <name>; @endcode * * @param name Name of the thread. * @param stack_size Stack size in bytes. * @param entry Thread entry function. * @param p1 1st entry point parameter. * @param p2 2nd entry point parameter. * @param p3 3rd entry point parameter. * @param prio Thread priority. * @param options Thread options. * @param delay Scheduling delay (in milliseconds), or K_NO_WAIT (for no delay). * * @internal It has been observed that the x86 compiler by default aligns * these _static_thread_data structures to 32-byte boundaries, thereby * wasting space. To work around this, force a 4-byte alignment. */ #define K_THREAD_DEFINE(name, stack_size, \ entry, p1, p2, p3, \ prio, options, delay) \ char __noinit __stack _k_thread_obj_##name[stack_size]; \ struct _static_thread_data _k_thread_data_##name __aligned(4) \ __in_section(_static_thread_data, static, name) = \ _THREAD_INITIALIZER(_k_thread_obj_##name, stack_size, \ entry, p1, p2, p3, prio, options, delay, \ NULL, 0); \ const k_tid_t name = (k_tid_t)_k_thread_obj_##name /** * @brief Get a thread's priority. * * This routine gets the priority of @a thread. * * @param thread ID of thread whose priority is needed. * * @return Priority of @a thread. */ extern int k_thread_priority_get(k_tid_t thread); /** * @brief Set a thread's priority. * * This routine immediately changes the priority of @a thread. * * Rescheduling can occur immediately depending on the priority @a thread is * set to: * * - If its priority is raised above the priority of the caller of this * function, and the caller is preemptible, @a thread will be scheduled in. * * - If the caller operates on itself, it lowers its priority below that of * other threads in the system, and the caller is preemptible, the thread of * highest priority will be scheduled in. * * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the * highest priority. * * @param thread ID of thread whose priority is to be set. * @param prio New priority. * * @warning Changing the priority of a thread currently involved in mutex * priority inheritance may result in undefined behavior. * * @return N/A */ extern void k_thread_priority_set(k_tid_t thread, int prio); /** * @brief Suspend a thread. * * This routine prevents the kernel scheduler from making @a thread the * current thread. All other internal operations on @a thread are still * performed; for example, any timeout it is waiting on keeps ticking, * kernel objects it is waiting on are still handed to it, etc. * * If @a thread is already suspended, the routine has no effect. * * @param thread ID of thread to suspend. * * @return N/A */ extern void k_thread_suspend(k_tid_t thread); /** * @brief Resume a suspended thread. * * This routine allows the kernel scheduler to make @a thread the current * thread, when it is next eligible for that role. * * If @a thread is not currently suspended, the routine has no effect. * * @param thread ID of thread to resume. * * @return N/A */ extern void k_thread_resume(k_tid_t thread); /** * @brief Set time-slicing period and scope. * * This routine specifies how the scheduler will perform time slicing of * preemptible threads. * * To enable time slicing, @a slice must be non-zero. The scheduler * ensures that no thread runs for more than the specified time limit * before other threads of that priority are given a chance to execute. * Any thread whose priority is higher than @a prio is exempted, and may * execute as long as desired without being pre-empted due to time slicing. * * Time slicing only limits the maximum amount of time a thread may continuously * execute. Once the scheduler selects a thread for execution, there is no * minimum guaranteed time the thread will execute before threads of greater or * equal priority are scheduled. * * When the current thread is the only one of that priority eligible * for execution, this routine has no effect; the thread is immediately * rescheduled after the slice period expires. * * To disable timeslicing, set both @a slice and @a prio to zero. * * @param slice Maximum time slice length (in milliseconds). * @param prio Highest thread priority level eligible for time slicing. * * @return N/A */ extern void k_sched_time_slice_set(int32_t slice, int prio); /** * @} end defgroup thread_apis */ /** * @addtogroup isr_apis * @{ */ /** * @brief Determine if code is running at interrupt level. * * This routine allows the caller to customize its actions, depending on * whether it is a thread or an ISR. * * @note Can be called by ISRs. * * @return 0 if invoked by a thread. * @return Non-zero if invoked by an ISR. */ extern int k_is_in_isr(void); /** * @brief Determine if code is running in a preemptible thread. * * This routine allows the caller to customize its actions, depending on * whether it can be preempted by another thread. The routine returns a 'true' * value if all of the following conditions are met: * * - The code is running in a thread, not at ISR. * - The thread's priority is in the preemptible range. * - The thread has not locked the scheduler. * * @note Can be called by ISRs. * * @return 0 if invoked by an ISR or by a cooperative thread. * @return Non-zero if invoked by a preemptible thread. */ extern int k_is_preempt_thread(void); /** * @} end addtogroup isr_apis */ /** * @addtogroup thread_apis * @{ */ /** * @brief Lock the scheduler. * * This routine prevents the current thread from being preempted by another * thread by instructing the scheduler to treat it as a cooperative thread. * If the thread subsequently performs an operation that makes it unready, * it will be context switched out in the normal manner. When the thread * again becomes the current thread, its non-preemptible status is maintained. * * This routine can be called recursively. * * @note k_sched_lock() and k_sched_unlock() should normally be used * when the operation being performed can be safely interrupted by ISRs. * However, if the amount of processing involved is very small, better * performance may be obtained by using irq_lock() and irq_unlock(). * * @return N/A */ extern void k_sched_lock(void); /** * @brief Unlock the scheduler. * * This routine reverses the effect of a previous call to k_sched_lock(). * A thread must call the routine once for each time it called k_sched_lock() * before the thread becomes preemptible. * * @return N/A */ extern void k_sched_unlock(void); /** * @brief Set current thread's custom data. * * This routine sets the custom data for the current thread to @ value. * * Custom data is not used by the kernel itself, and is freely available * for a thread to use as it sees fit. It can be used as a framework * upon which to build thread-local storage. * * @param value New custom data value. * * @return N/A */ extern void k_thread_custom_data_set(void *value); /** * @brief Get current thread's custom data. * * This routine returns the custom data for the current thread. * * @return Current custom data value. */ extern void *k_thread_custom_data_get(void); /** * @} end addtogroup thread_apis */ #include <sys_clock.h> /** * @addtogroup clock_apis * @{ */ /** * @brief Generate null timeout delay. * * This macro generates a timeout delay that that instructs a kernel API * not to wait if the requested operation cannot be performed immediately. * * @return Timeout delay value. */ #define K_NO_WAIT 0 /** * @brief Generate timeout delay from milliseconds. * * This macro generates a timeout delay that that instructs a kernel API * to wait up to @a ms milliseconds to perform the requested operation. * * @param ms Duration in milliseconds. * * @return Timeout delay value. */ #define K_MSEC(ms) (ms) /** * @brief Generate timeout delay from seconds. * * This macro generates a timeout delay that that instructs a kernel API * to wait up to @a s seconds to perform the requested operation. * * @param s Duration in seconds. * * @return Timeout delay value. */ #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC) /** * @brief Generate timeout delay from minutes. * * This macro generates a timeout delay that that instructs a kernel API * to wait up to @a m minutes to perform the requested operation. * * @param m Duration in minutes. * * @return Timeout delay value. */ #define K_MINUTES(m) K_SECONDS((m) * 60) /** * @brief Generate timeout delay from hours. * * This macro generates a timeout delay that that instructs a kernel API * to wait up to @a h hours to perform the requested operation. * * @param h Duration in hours. * * @return Timeout delay value. */ #define K_HOURS(h) K_MINUTES((h) * 60) /** * @brief Generate infinite timeout delay. * * This macro generates a timeout delay that that instructs a kernel API * to wait as long as necessary to perform the requested operation. * * @return Timeout delay value. */ #define K_FOREVER (-1) /** * @} end addtogroup clock_apis */ /** * @cond INTERNAL_HIDDEN */ /* added tick needed to account for tick in progress */ #define _TICK_ALIGN 1 static int64_t __ticks_to_ms(int64_t ticks) { #if CONFIG_SYS_CLOCK_EXISTS return (MSEC_PER_SEC * (uint64_t)ticks) / sys_clock_ticks_per_sec; #else __ASSERT(ticks == 0, ""); return 0; #endif } /* timeouts */ struct _timeout; typedef void (*_timeout_func_t)(struct _timeout *t); struct _timeout { sys_dlist_t node; struct k_thread *thread; sys_dlist_t *wait_q; int32_t delta_ticks_from_prev; _timeout_func_t func; }; /** * INTERNAL_HIDDEN @endcond */ /** * @cond INTERNAL_HIDDEN */ struct k_timer { /* * _timeout structure must be first here if we want to use * dynamic timer allocation. timeout.node is used in the double-linked * list of free timers */ struct _timeout timeout; /* wait queue for the (single) thread waiting on this timer */ _wait_q_t wait_q; /* runs in ISR context */ void (*expiry_fn)(struct k_timer *); /* runs in the context of the thread that calls k_timer_stop() */ void (*stop_fn)(struct k_timer *); /* timer period */ int32_t period; /* timer status */ uint32_t status; /* used to support legacy timer APIs */ void *_legacy_data; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_timer); }; #define K_TIMER_INITIALIZER(obj, expiry, stop) \ { \ .timeout.delta_ticks_from_prev = -1, \ .timeout.wait_q = NULL, \ .timeout.thread = NULL, \ .timeout.func = _timer_expiration_handler, \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .expiry_fn = expiry, \ .stop_fn = stop, \ .status = 0, \ ._legacy_data = NULL, \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @defgroup timer_apis Timer APIs * @ingroup kernel_apis * @{ */ /** * @typedef k_timer_expiry_t * @brief Timer expiry function type. * * A timer's expiry function is executed by the system clock interrupt handler * each time the timer expires. The expiry function is optional, and is only * invoked if the timer has been initialized with one. * * @param timer Address of timer. * * @return N/A */ typedef void (*k_timer_expiry_t)(struct k_timer *timer); /** * @typedef k_timer_stop_t * @brief Timer stop function type. * * A timer's stop function is executed if the timer is stopped prematurely. * The function runs in the context of the thread that stops the timer. * The stop function is optional, and is only invoked if the timer has been * initialized with one. * * @param timer Address of timer. * * @return N/A */ typedef void (*k_timer_stop_t)(struct k_timer *timer); /** * @brief Statically define and initialize a timer. * * The timer can be accessed outside the module where it is defined using: * * @code extern struct k_timer <name>; @endcode * * @param name Name of the timer variable. * @param expiry_fn Function to invoke each time the timer expires. * @param stop_fn Function to invoke if the timer is stopped while running. */ #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \ struct k_timer name \ __in_section(_k_timer, static, name) = \ K_TIMER_INITIALIZER(name, expiry_fn, stop_fn) /** * @brief Initialize a timer. * * This routine initializes a timer, prior to its first use. * * @param timer Address of timer. * @param expiry_fn Function to invoke each time the timer expires. * @param stop_fn Function to invoke if the timer is stopped while running. * * @return N/A */ extern void k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, k_timer_stop_t stop_fn); /** * @brief Start a timer. * * This routine starts a timer, and resets its status to zero. The timer * begins counting down using the specified duration and period values. * * Attempting to start a timer that is already running is permitted. * The timer's status is reset to zero and the timer begins counting down * using the new duration and period values. * * @param timer Address of timer. * @param duration Initial timer duration (in milliseconds). * @param period Timer period (in milliseconds). * * @return N/A */ extern void k_timer_start(struct k_timer *timer, int32_t duration, int32_t period); /** * @brief Stop a timer. * * This routine stops a running timer prematurely. The timer's stop function, * if one exists, is invoked by the caller. * * Attempting to stop a timer that is not running is permitted, but has no * effect on the timer. * * @param timer Address of timer. * * @return N/A */ extern void k_timer_stop(struct k_timer *timer); /** * @brief Read timer status. * * This routine reads the timer's status, which indicates the number of times * it has expired since its status was last read. * * Calling this routine resets the timer's status to zero. * * @param timer Address of timer. * * @return Timer status. */ extern uint32_t k_timer_status_get(struct k_timer *timer); /** * @brief Synchronize thread to timer expiration. * * This routine blocks the calling thread until the timer's status is non-zero * (indicating that it has expired at least once since it was last examined) * or the timer is stopped. If the timer status is already non-zero, * or the timer is already stopped, the caller continues without waiting. * * Calling this routine resets the timer's status to zero. * * This routine must not be used by interrupt handlers, since they are not * allowed to block. * * @param timer Address of timer. * * @return Timer status. */ extern uint32_t k_timer_status_sync(struct k_timer *timer); /** * @brief Get time remaining before a timer next expires. * * This routine computes the (approximate) time remaining before a running * timer next expires. If the timer is not running, it returns zero. * * @param timer Address of timer. * * @return Remaining time (in milliseconds). */ extern int32_t k_timer_remaining_get(struct k_timer *timer); /** * @} end defgroup timer_apis */ /** * @addtogroup clock_apis * @{ */ /** * @brief Get system uptime. * * This routine returns the elapsed time since the system booted, * in milliseconds. * * @return Current uptime. */ extern int64_t k_uptime_get(void); /** * @brief Get system uptime (32-bit version). * * This routine returns the lower 32-bits of the elapsed time since the system * booted, in milliseconds. * * This routine can be more efficient than k_uptime_get(), as it reduces the * need for interrupt locking and 64-bit math. However, the 32-bit result * cannot hold a system uptime time larger than approximately 50 days, so the * caller must handle possible rollovers. * * @return Current uptime. */ extern uint32_t k_uptime_get_32(void); /** * @brief Get elapsed time. * * This routine computes the elapsed time between the current system uptime * and an earlier reference time, in milliseconds. * * @param reftime Pointer to a reference time, which is updated to the current * uptime upon return. * * @return Elapsed time. */ extern int64_t k_uptime_delta(int64_t *reftime); /** * @brief Get elapsed time (32-bit version). * * This routine computes the elapsed time between the current system uptime * and an earlier reference time, in milliseconds. * * This routine can be more efficient than k_uptime_delta(), as it reduces the * need for interrupt locking and 64-bit math. However, the 32-bit result * cannot hold an elapsed time larger than approximately 50 days, so the * caller must handle possible rollovers. * * @param reftime Pointer to a reference time, which is updated to the current * uptime upon return. * * @return Elapsed time. */ extern uint32_t k_uptime_delta_32(int64_t *reftime); /** * @brief Read the hardware clock. * * This routine returns the current time, as measured by the system's hardware * clock. * * @return Current hardware clock up-counter (in cycles). */ extern uint32_t k_cycle_get_32(void); /** * @} end addtogroup clock_apis */ /** * @cond INTERNAL_HIDDEN */ struct k_fifo { _wait_q_t wait_q; sys_slist_t data_q; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_fifo); }; #define K_FIFO_INITIALIZER(obj) \ { \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @defgroup fifo_apis Fifo APIs * @ingroup kernel_apis * @{ */ /** * @brief Initialize a fifo. * * This routine initializes a fifo object, prior to its first use. * * @param fifo Address of the fifo. * * @return N/A */ extern void k_fifo_init(struct k_fifo *fifo); /** * @brief Add an element to a fifo. * * This routine adds a data item to @a fifo. A fifo data item must be * aligned on a 4-byte boundary, and the first 32 bits of the item are * reserved for the kernel's use. * * @note Can be called by ISRs. * * @param fifo Address of the fifo. * @param data Address of the data item. * * @return N/A */ extern void k_fifo_put(struct k_fifo *fifo, void *data); /** * @brief Atomically add a list of elements to a fifo. * * This routine adds a list of data items to @a fifo in one operation. * The data items must be in a singly-linked list, with the first 32 bits * each data item pointing to the next data item; the list must be * NULL-terminated. * * @note Can be called by ISRs. * * @param fifo Address of the fifo. * @param head Pointer to first node in singly-linked list. * @param tail Pointer to last node in singly-linked list. * * @return N/A */ extern void k_fifo_put_list(struct k_fifo *fifo, void *head, void *tail); /** * @brief Atomically add a list of elements to a fifo. * * This routine adds a list of data items to @a fifo in one operation. * The data items must be in a singly-linked list implemented using a * sys_slist_t object. Upon completion, the sys_slist_t object is invalid * and must be re-initialized via sys_slist_init(). * * @note Can be called by ISRs. * * @param fifo Address of the fifo. * @param list Pointer to sys_slist_t object. * * @return N/A */ extern void k_fifo_put_slist(struct k_fifo *fifo, sys_slist_t *list); /** * @brief Get an element from a fifo. * * This routine removes a data item from @a fifo in a "first in, first out" * manner. The first 32 bits of the data item are reserved for the kernel's use. * * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param fifo Address of the fifo. * @param timeout Waiting period to obtain a data item (in milliseconds), * or one of the special values K_NO_WAIT and K_FOREVER. * * @return Address of the data item if successful; NULL if returned * without waiting, or waiting period timed out. */ extern void *k_fifo_get(struct k_fifo *fifo, int32_t timeout); /** * @brief Statically define and initialize a fifo. * * The fifo can be accessed outside the module where it is defined using: * * @code extern struct k_fifo <name>; @endcode * * @param name Name of the fifo. */ #define K_FIFO_DEFINE(name) \ struct k_fifo name \ __in_section(_k_fifo, static, name) = \ K_FIFO_INITIALIZER(name) /** * @} end defgroup fifo_apis */ /** * @cond INTERNAL_HIDDEN */ struct k_lifo { _wait_q_t wait_q; void *list; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_lifo); }; #define K_LIFO_INITIALIZER(obj) \ { \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .list = NULL, \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @defgroup lifo_apis Lifo APIs * @ingroup kernel_apis * @{ */ /** * @brief Initialize a lifo. * * This routine initializes a lifo object, prior to its first use. * * @param lifo Address of the lifo. * * @return N/A */ extern void k_lifo_init(struct k_lifo *lifo); /** * @brief Add an element to a lifo. * * This routine adds a data item to @a lifo. A lifo data item must be * aligned on a 4-byte boundary, and the first 32 bits of the item are * reserved for the kernel's use. * * @note Can be called by ISRs. * * @param lifo Address of the lifo. * @param data Address of the data item. * * @return N/A */ extern void k_lifo_put(struct k_lifo *lifo, void *data); /** * @brief Get an element from a lifo. * * This routine removes a data item from @a lifo in a "last in, first out" * manner. The first 32 bits of the data item are reserved for the kernel's use. * * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param lifo Address of the lifo. * @param timeout Waiting period to obtain a data item (in milliseconds), * or one of the special values K_NO_WAIT and K_FOREVER. * * @return Address of the data item if successful; NULL if returned * without waiting, or waiting period timed out. */ extern void *k_lifo_get(struct k_lifo *lifo, int32_t timeout); /** * @brief Statically define and initialize a lifo. * * The lifo can be accessed outside the module where it is defined using: * * @code extern struct k_lifo <name>; @endcode * * @param name Name of the fifo. */ #define K_LIFO_DEFINE(name) \ struct k_lifo name \ __in_section(_k_lifo, static, name) = \ K_LIFO_INITIALIZER(name) /** * @} end defgroup lifo_apis */ /** * @cond INTERNAL_HIDDEN */ struct k_stack { _wait_q_t wait_q; uint32_t *base, *next, *top; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_stack); }; #define K_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \ { \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .base = stack_buffer, \ .next = stack_buffer, \ .top = stack_buffer + stack_num_entries, \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @defgroup stack_apis Stack APIs * @ingroup kernel_apis * @{ */ /** * @brief Initialize a stack. * * This routine initializes a stack object, prior to its first use. * * @param stack Address of the stack. * @param buffer Address of array used to hold stacked values. * @param num_entries Maximum number of values that can be stacked. * * @return N/A */ extern void k_stack_init(struct k_stack *stack, uint32_t *buffer, int num_entries); /** * @brief Push an element onto a stack. * * This routine adds a 32-bit value @a data to @a stack. * * @note Can be called by ISRs. * * @param stack Address of the stack. * @param data Value to push onto the stack. * * @return N/A */ extern void k_stack_push(struct k_stack *stack, uint32_t data); /** * @brief Pop an element from a stack. * * This routine removes a 32-bit value from @a stack in a "last in, first out" * manner and stores the value in @a data. * * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param stack Address of the stack. * @param data Address of area to hold the value popped from the stack. * @param timeout Waiting period to obtain a value (in milliseconds), * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 Element popped from stack. * @retval -EBUSY Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_stack_pop(struct k_stack *stack, uint32_t *data, int32_t timeout); /** * @brief Statically define and initialize a stack * * The stack can be accessed outside the module where it is defined using: * * @code extern struct k_stack <name>; @endcode * * @param name Name of the stack. * @param stack_num_entries Maximum number of values that can be stacked. */ #define K_STACK_DEFINE(name, stack_num_entries) \ uint32_t __noinit \ _k_stack_buf_##name[stack_num_entries]; \ struct k_stack name \ __in_section(_k_stack, static, name) = \ K_STACK_INITIALIZER(name, _k_stack_buf_##name, \ stack_num_entries) /** * @} end defgroup stack_apis */ struct k_work; /** * @defgroup workqueue_apis Workqueue Thread APIs * @ingroup kernel_apis * @{ */ /** * @typedef k_work_handler_t * @brief Work item handler function type. * * A work item's handler function is executed by a workqueue's thread * when the work item is processed by the workqueue. * * @param work Address of the work item. * * @return N/A */ typedef void (*k_work_handler_t)(struct k_work *work); /** * @cond INTERNAL_HIDDEN */ struct k_work_q { struct k_fifo fifo; }; enum { K_WORK_STATE_PENDING, /* Work item pending state */ }; struct k_work { void *_reserved; /* Used by k_fifo implementation. */ k_work_handler_t handler; atomic_t flags[1]; }; struct k_delayed_work { struct k_work work; struct _timeout timeout; struct k_work_q *work_q; }; extern struct k_work_q k_sys_work_q; /** * INTERNAL_HIDDEN @endcond */ /** * @brief Initialize a statically-defined work item. * * This macro can be used to initialize a statically-defined workqueue work * item, prior to its first use. For example, * * @code struct k_work <work> = K_WORK_INITIALIZER(<work_handler>); @endcode * * @param work_handler Function to invoke each time work item is processed. */ #define K_WORK_INITIALIZER(work_handler) \ { \ ._reserved = NULL, \ .handler = work_handler, \ .flags = { 0 } \ } /** * @brief Initialize a work item. * * This routine initializes a workqueue work item, prior to its first use. * * @param work Address of work item. * @param handler Function to invoke each time work item is processed. * * @return N/A */ static inline void k_work_init(struct k_work *work, k_work_handler_t handler) { atomic_clear_bit(work->flags, K_WORK_STATE_PENDING); work->handler = handler; } /** * @brief Submit a work item. * * This routine submits work item @a work to be processed by workqueue * @a work_q. If the work item is already pending in the workqueue's queue * as a result of an earlier submission, this routine has no effect on the * work item. If the work item has already been processed, or is currently * being processed, its work is considered complete and the work item can be * resubmitted. * * @warning * A submitted work item must not be modified until it has been processed * by the workqueue. * * @note Can be called by ISRs. * * @param work_q Address of workqueue. * @param work Address of work item. * * @return N/A */ static inline void k_work_submit_to_queue(struct k_work_q *work_q, struct k_work *work) { if (!atomic_test_and_set_bit(work->flags, K_WORK_STATE_PENDING)) { k_fifo_put(&work_q->fifo, work); } } /** * @brief Check if a work item is pending. * * This routine indicates if work item @a work is pending in a workqueue's * queue. * * @note Can be called by ISRs. * * @param work Address of work item. * * @return 1 if work item is pending, or 0 if it is not pending. */ static inline int k_work_pending(struct k_work *work) { return atomic_test_bit(work->flags, K_WORK_STATE_PENDING); } /** * @brief Start a workqueue. * * This routine starts workqueue @a work_q. The workqueue spawns its work * processing thread, which runs forever. * * @param work_q Address of workqueue. * @param stack Pointer to work queue thread's stack space. * @param stack_size Size of the work queue thread's stack (in bytes). * @param prio Priority of the work queue's thread. * * @return N/A */ extern void k_work_q_start(struct k_work_q *work_q, char *stack, size_t stack_size, int prio); /** * @brief Initialize a delayed work item. * * This routine initializes a workqueue delayed work item, prior to * its first use. * * @param work Address of delayed work item. * @param handler Function to invoke each time work item is processed. * * @return N/A */ extern void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler); /** * @brief Submit a delayed work item. * * This routine schedules work item @a work to be processed by workqueue * @a work_q after a delay of @a delay milliseconds. The routine initiates * an asychronous countdown for the work item and then returns to the caller. * Only when the countdown completes is the work item actually submitted to * the workqueue and becomes pending. * * Submitting a previously submitted delayed work item that is still * counting down cancels the existing submission and restarts the countdown * using the new delay. If the work item is currently pending on the * workqueue's queue because the countdown has completed it is too late to * resubmit the item, and resubmission fails without impacting the work item. * If the work item has already been processed, or is currently being processed, * its work is considered complete and the work item can be resubmitted. * * @warning * A delayed work item must not be modified until it has been processed * by the workqueue. * * @note Can be called by ISRs. * * @param work_q Address of workqueue. * @param work Address of delayed work item. * @param delay Delay before submitting the work item (in milliseconds). * * @retval 0 Work item countdown started. * @retval -EINPROGRESS Work item is already pending. * @retval -EINVAL Work item is being processed or has completed its work. * @retval -EADDRINUSE Work item is pending on a different workqueue. */ extern int k_delayed_work_submit_to_queue(struct k_work_q *work_q, struct k_delayed_work *work, int32_t delay); /** * @brief Cancel a delayed work item. * * This routine cancels the submission of delayed work item @a work. * A delayed work item can only be cancelled while its countdown is still * underway. * * @note Can be called by ISRs. * * @param work Address of delayed work item. * * @retval 0 Work item countdown cancelled. * @retval -EINPROGRESS Work item is already pending. * @retval -EINVAL Work item is being processed or has completed its work. */ extern int k_delayed_work_cancel(struct k_delayed_work *work); /** * @brief Submit a work item to the system workqueue. * * This routine submits work item @a work to be processed by the system * workqueue. If the work item is already pending in the workqueue's queue * as a result of an earlier submission, this routine has no effect on the * work item. If the work item has already been processed, or is currently * being processed, its work is considered complete and the work item can be * resubmitted. * * @warning * Work items submitted to the system workqueue should avoid using handlers * that block or yield since this may prevent the system workqueue from * processing other work items in a timely manner. * * @note Can be called by ISRs. * * @param work Address of work item. * * @return N/A */ static inline void k_work_submit(struct k_work *work) { k_work_submit_to_queue(&k_sys_work_q, work); } /** * @brief Submit a delayed work item to the system workqueue. * * This routine schedules work item @a work to be processed by the system * workqueue after a delay of @a delay milliseconds. The routine initiates * an asychronous countdown for the work item and then returns to the caller. * Only when the countdown completes is the work item actually submitted to * the workqueue and becomes pending. * * Submitting a previously submitted delayed work item that is still * counting down cancels the existing submission and restarts the countdown * using the new delay. If the work item is currently pending on the * workqueue's queue because the countdown has completed it is too late to * resubmit the item, and resubmission fails without impacting the work item. * If the work item has already been processed, or is currently being processed, * its work is considered complete and the work item can be resubmitted. * * @warning * Work items submitted to the system workqueue should avoid using handlers * that block or yield since this may prevent the system workqueue from * processing other work items in a timely manner. * * @note Can be called by ISRs. * * @param work Address of delayed work item. * @param delay Delay before submitting the work item (in milliseconds). * * @retval 0 Work item countdown started. * @retval -EINPROGRESS Work item is already pending. * @retval -EINVAL Work item is being processed or has completed its work. * @retval -EADDRINUSE Work item is pending on a different workqueue. */ static inline int k_delayed_work_submit(struct k_delayed_work *work, int32_t delay) { return k_delayed_work_submit_to_queue(&k_sys_work_q, work, delay); } /** * @} end defgroup workqueue_apis */ /** * @cond INTERNAL_HIDDEN */ struct k_mutex { _wait_q_t wait_q; struct k_thread *owner; uint32_t lock_count; int owner_orig_prio; #ifdef CONFIG_OBJECT_MONITOR int num_lock_state_changes; int num_conflicts; #endif _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_mutex); }; #ifdef CONFIG_OBJECT_MONITOR #define _MUTEX_INIT_OBJECT_MONITOR \ .num_lock_state_changes = 0, .num_conflicts = 0, #else #define _MUTEX_INIT_OBJECT_MONITOR #endif #define K_MUTEX_INITIALIZER(obj) \ { \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .owner = NULL, \ .lock_count = 0, \ .owner_orig_prio = K_LOWEST_THREAD_PRIO, \ _MUTEX_INIT_OBJECT_MONITOR \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @defgroup mutex_apis Mutex APIs * @ingroup kernel_apis * @{ */ /** * @brief Statically define and initialize a mutex. * * The mutex can be accessed outside the module where it is defined using: * * @code extern struct k_mutex <name>; @endcode * * @param name Name of the mutex. */ #define K_MUTEX_DEFINE(name) \ struct k_mutex name \ __in_section(_k_mutex, static, name) = \ K_MUTEX_INITIALIZER(name) /** * @brief Initialize a mutex. * * This routine initializes a mutex object, prior to its first use. * * Upon completion, the mutex is available and does not have an owner. * * @param mutex Address of the mutex. * * @return N/A */ extern void k_mutex_init(struct k_mutex *mutex); /** * @brief Lock a mutex. * * This routine locks @a mutex. If the mutex is locked by another thread, * the calling thread waits until the mutex becomes available or until * a timeout occurs. * * A thread is permitted to lock a mutex it has already locked. The operation * completes immediately and the lock count is increased by 1. * * @param mutex Address of the mutex. * @param timeout Waiting period to lock the mutex (in milliseconds), * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 Mutex locked. * @retval -EBUSY Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_mutex_lock(struct k_mutex *mutex, int32_t timeout); /** * @brief Unlock a mutex. * * This routine unlocks @a mutex. The mutex must already be locked by the * calling thread. * * The mutex cannot be claimed by another thread until it has been unlocked by * the calling thread as many times as it was previously locked by that * thread. * * @param mutex Address of the mutex. * * @return N/A */ extern void k_mutex_unlock(struct k_mutex *mutex); /** * @} end defgroup mutex_apis */ /** * @cond INTERNAL_HIDDEN */ struct k_sem { _wait_q_t wait_q; unsigned int count; unsigned int limit; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_sem); }; #define K_SEM_INITIALIZER(obj, initial_count, count_limit) \ { \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .count = initial_count, \ .limit = count_limit, \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @defgroup semaphore_apis Semaphore APIs * @ingroup kernel_apis * @{ */ /** * @brief Initialize a semaphore. * * This routine initializes a semaphore object, prior to its first use. * * @param sem Address of the semaphore. * @param initial_count Initial semaphore count. * @param limit Maximum permitted semaphore count. * * @return N/A */ extern void k_sem_init(struct k_sem *sem, unsigned int initial_count, unsigned int limit); /** * @brief Take a semaphore. * * This routine takes @a sem. * * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param sem Address of the semaphore. * @param timeout Waiting period to take the semaphore (in milliseconds), * or one of the special values K_NO_WAIT and K_FOREVER. * * @note When porting code from the nanokernel legacy API to the new API, be * careful with the return value of this function. The return value is the * reverse of the one of nano_sem_take family of APIs: 0 means success, and * non-zero means failure, while the nano_sem_take family returns 1 for success * and 0 for failure. * * @retval 0 Semaphore taken. * @retval -EBUSY Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_sem_take(struct k_sem *sem, int32_t timeout); /** * @brief Give a semaphore. * * This routine gives @a sem, unless the semaphore is already at its maximum * permitted count. * * @note Can be called by ISRs. * * @param sem Address of the semaphore. * * @return N/A */ extern void k_sem_give(struct k_sem *sem); /** * @brief Reset a semaphore's count to zero. * * This routine sets the count of @a sem to zero. * * @param sem Address of the semaphore. * * @return N/A */ static inline void k_sem_reset(struct k_sem *sem) { sem->count = 0; } /** * @brief Get a semaphore's count. * * This routine returns the current count of @a sem. * * @param sem Address of the semaphore. * * @return Current semaphore count. */ static inline unsigned int k_sem_count_get(struct k_sem *sem) { return sem->count; } /** * @brief Statically define and initialize a semaphore. * * The semaphore can be accessed outside the module where it is defined using: * * @code extern struct k_sem <name>; @endcode * * @param name Name of the semaphore. * @param initial_count Initial semaphore count. * @param count_limit Maximum permitted semaphore count. */ #define K_SEM_DEFINE(name, initial_count, count_limit) \ struct k_sem name \ __in_section(_k_sem, static, name) = \ K_SEM_INITIALIZER(name, initial_count, count_limit) /** * @} end defgroup semaphore_apis */ /** * @defgroup alert_apis Alert APIs * @ingroup kernel_apis * @{ */ /** * @typedef k_alert_handler_t * @brief Alert handler function type. * * An alert's alert handler function is invoked by the system workqueue * when the alert is signalled. The alert handler function is optional, * and is only invoked if the alert has been initialized with one. * * @param alert Address of the alert. * * @return 0 if alert has been consumed; non-zero if alert should pend. */ typedef int (*k_alert_handler_t)(struct k_alert *alert); /** * @} end defgroup alert_apis */ /** * @cond INTERNAL_HIDDEN */ #define K_ALERT_DEFAULT NULL #define K_ALERT_IGNORE ((void *)(-1)) struct k_alert { k_alert_handler_t handler; atomic_t send_count; struct k_work work_item; struct k_sem sem; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_alert); }; extern void _alert_deliver(struct k_work *work); #define K_ALERT_INITIALIZER(obj, alert_handler, max_num_pending_alerts) \ { \ .handler = (k_alert_handler_t)alert_handler, \ .send_count = ATOMIC_INIT(0), \ .work_item = K_WORK_INITIALIZER(_alert_deliver), \ .sem = K_SEM_INITIALIZER(obj.sem, 0, max_num_pending_alerts), \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @addtogroup alert_apis * @{ */ /** * @brief Statically define and initialize an alert. * * The alert can be accessed outside the module where it is defined using: * * @code extern struct k_alert <name>; @endcode * * @param name Name of the alert. * @param alert_handler Action to take when alert is sent. Specify either * the address of a function to be invoked by the system workqueue * thread, K_ALERT_IGNORE (which causes the alert to be ignored), or * K_ALERT_DEFAULT (which causes the alert to pend). * @param max_num_pending_alerts Maximum number of pending alerts. */ #define K_ALERT_DEFINE(name, alert_handler, max_num_pending_alerts) \ struct k_alert name \ __in_section(_k_alert, static, name) = \ K_ALERT_INITIALIZER(name, alert_handler, \ max_num_pending_alerts) /** * @brief Initialize an alert. * * This routine initializes an alert object, prior to its first use. * * @param alert Address of the alert. * @param handler Action to take when alert is sent. Specify either the address * of a function to be invoked by the system workqueue thread, * K_ALERT_IGNORE (which causes the alert to be ignored), or * K_ALERT_DEFAULT (which causes the alert to pend). * @param max_num_pending_alerts Maximum number of pending alerts. * * @return N/A */ extern void k_alert_init(struct k_alert *alert, k_alert_handler_t handler, unsigned int max_num_pending_alerts); /** * @brief Receive an alert. * * This routine receives a pending alert for @a alert. * * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param alert Address of the alert. * @param timeout Waiting period to receive the alert (in milliseconds), * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 Alert received. * @retval -EBUSY Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_alert_recv(struct k_alert *alert, int32_t timeout); /** * @brief Signal an alert. * * This routine signals @a alert. The action specified for @a alert will * be taken, which may trigger the execution of an alert handler function * and/or cause the alert to pend (assuming the alert has not reached its * maximum number of pending alerts). * * @note Can be called by ISRs. * * @param alert Address of the alert. * * @return N/A */ extern void k_alert_send(struct k_alert *alert); /** * @} end addtogroup alert_apis */ /** * @cond INTERNAL_HIDDEN */ struct k_msgq { _wait_q_t wait_q; size_t msg_size; uint32_t max_msgs; char *buffer_start; char *buffer_end; char *read_ptr; char *write_ptr; uint32_t used_msgs; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_msgq); }; #define K_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \ { \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .max_msgs = q_max_msgs, \ .msg_size = q_msg_size, \ .buffer_start = q_buffer, \ .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \ .read_ptr = q_buffer, \ .write_ptr = q_buffer, \ .used_msgs = 0, \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @defgroup msgq_apis Message Queue APIs * @ingroup kernel_apis * @{ */ /** * @brief Statically define and initialize a message queue. * * The message queue's ring buffer contains space for @a q_max_msgs messages, * each of which is @a q_msg_size bytes long. The buffer is aligned to a * @a q_align -byte boundary, which must be a power of 2. To ensure that each * message is similarly aligned to this boundary, @a q_msg_size must also be * a multiple of @a q_align. * * The message queue can be accessed outside the module where it is defined * using: * * @code extern struct k_msgq <name>; @endcode * * @param q_name Name of the message queue. * @param q_msg_size Message size (in bytes). * @param q_max_msgs Maximum number of messages that can be queued. * @param q_align Alignment of the message queue's ring buffer. */ #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \ static char __noinit __aligned(q_align) \ _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \ struct k_msgq q_name \ __in_section(_k_msgq, static, q_name) = \ K_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \ q_msg_size, q_max_msgs) /** * @brief Initialize a message queue. * * This routine initializes a message queue object, prior to its first use. * * The message queue's ring buffer must contain space for @a max_msgs messages, * each of which is @a msg_size bytes long. The buffer must be aligned to an * N-byte boundary, where N is a power of 2 (i.e. 1, 2, 4, ...). To ensure * that each message is similarly aligned to this boundary, @a q_msg_size * must also be a multiple of N. * * @param q Address of the message queue. * @param buffer Pointer to ring buffer that holds queued messages. * @param msg_size Message size (in bytes). * @param max_msgs Maximum number of messages that can be queued. * * @return N/A */ extern void k_msgq_init(struct k_msgq *q, char *buffer, size_t msg_size, uint32_t max_msgs); /** * @brief Send a message to a message queue. * * This routine sends a message to message queue @a q. * * @note Can be called by ISRs. * * @param q Address of the message queue. * @param data Pointer to the message. * @param timeout Waiting period to add the message (in milliseconds), * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 Message sent. * @retval -ENOMSG Returned without waiting or queue purged. * @retval -EAGAIN Waiting period timed out. */ extern int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout); /** * @brief Receive a message from a message queue. * * This routine receives a message from message queue @a q in a "first in, * first out" manner. * * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param q Address of the message queue. * @param data Address of area to hold the received message. * @param timeout Waiting period to receive the message (in milliseconds), * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 Message received. * @retval -ENOMSG Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_msgq_get(struct k_msgq *q, void *data, int32_t timeout); /** * @brief Purge a message queue. * * This routine discards all unreceived messages in a message queue's ring * buffer. Any threads that are blocked waiting to send a message to the * message queue are unblocked and see an -ENOMSG error code. * * @param q Address of the message queue. * * @return N/A */ extern void k_msgq_purge(struct k_msgq *q); /** * @brief Get the amount of free space in a message queue. * * This routine returns the number of unused entries in a message queue's * ring buffer. * * @param q Address of the message queue. * * @return Number of unused ring buffer entries. */ static inline uint32_t k_msgq_num_free_get(struct k_msgq *q) { return q->max_msgs - q->used_msgs; } /** * @brief Get the number of messages in a message queue. * * This routine returns the number of messages in a message queue's ring buffer. * * @param q Address of the message queue. * * @return Number of messages. */ static inline uint32_t k_msgq_num_used_get(struct k_msgq *q) { return q->used_msgs; } /** * @} end defgroup msgq_apis */ /** * @defgroup mem_pool_apis Memory Pool APIs * @ingroup kernel_apis * @{ */ struct k_mem_block { struct k_mem_pool *pool_id; void *addr_in_pool; void *data; size_t req_size; }; /** * @} end defgroup mem_pool_apis */ /** * @defgroup mailbox_apis Mailbox APIs * @ingroup kernel_apis * @{ */ struct k_mbox_msg { /** internal use only - needed for legacy API support */ uint32_t _mailbox; /** size of message (in bytes) */ size_t size; /** application-defined information value */ uint32_t info; /** sender's message data buffer */ void *tx_data; /** internal use only - needed for legacy API support */ void *_rx_data; /** message data block descriptor */ struct k_mem_block tx_block; /** source thread id */ k_tid_t rx_source_thread; /** target thread id */ k_tid_t tx_target_thread; /** internal use only - thread waiting on send (may be a dummy) */ k_tid_t _syncing_thread; #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) /** internal use only - semaphore used during asynchronous send */ struct k_sem *_async_sem; #endif }; /** * @cond INTERNAL_HIDDEN */ struct k_mbox { _wait_q_t tx_msg_queue; _wait_q_t rx_msg_queue; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_mbox); }; #define K_MBOX_INITIALIZER(obj) \ { \ .tx_msg_queue = SYS_DLIST_STATIC_INIT(&obj.tx_msg_queue), \ .rx_msg_queue = SYS_DLIST_STATIC_INIT(&obj.rx_msg_queue), \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @brief Statically define and initialize a mailbox. * * The mailbox is to be accessed outside the module where it is defined using: * * @code extern struct k_mbox <name>; @endcode * * @param name Name of the mailbox. */ #define K_MBOX_DEFINE(name) \ struct k_mbox name \ __in_section(_k_mbox, static, name) = \ K_MBOX_INITIALIZER(name) \ /** * @brief Initialize a mailbox. * * This routine initializes a mailbox object, prior to its first use. * * @param mbox Address of the mailbox. * * @return N/A */ extern void k_mbox_init(struct k_mbox *mbox); /** * @brief Send a mailbox message in a synchronous manner. * * This routine sends a message to @a mbox and waits for a receiver to both * receive and process it. The message data may be in a buffer, in a memory * pool block, or non-existent (i.e. an empty message). * * @param mbox Address of the mailbox. * @param tx_msg Address of the transmit message descriptor. * @param timeout Waiting period for the message to be received (in * milliseconds), or one of the special values K_NO_WAIT * and K_FOREVER. Once the message has been received, * this routine waits as long as necessary for the message * to be completely processed. * * @retval 0 Message sent. * @retval -ENOMSG Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, int32_t timeout); /** * @brief Send a mailbox message in an asynchronous manner. * * This routine sends a message to @a mbox without waiting for a receiver * to process it. The message data may be in a buffer, in a memory pool block, * or non-existent (i.e. an empty message). Optionally, the semaphore @a sem * will be given when the message has been both received and completely * processed by the receiver. * * @param mbox Address of the mailbox. * @param tx_msg Address of the transmit message descriptor. * @param sem Address of a semaphore, or NULL if none is needed. * * @return N/A */ extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, struct k_sem *sem); /** * @brief Receive a mailbox message. * * This routine receives a message from @a mbox, then optionally retrieves * its data and disposes of the message. * * @param mbox Address of the mailbox. * @param rx_msg Address of the receive message descriptor. * @param buffer Address of the buffer to receive data, or NULL to defer data * retrieval and message disposal until later. * @param timeout Waiting period for a message to be received (in * milliseconds), or one of the special values K_NO_WAIT * and K_FOREVER. * * @retval 0 Message received. * @retval -ENOMSG Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, int32_t timeout); /** * @brief Retrieve mailbox message data into a buffer. * * This routine completes the processing of a received message by retrieving * its data into a buffer, then disposing of the message. * * Alternatively, this routine can be used to dispose of a received message * without retrieving its data. * * @param rx_msg Address of the receive message descriptor. * @param buffer Address of the buffer to receive data, or NULL to discard * the data. * * @return N/A */ extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer); /** * @brief Retrieve mailbox message data into a memory pool block. * * This routine completes the processing of a received message by retrieving * its data into a memory pool block, then disposing of the message. * The memory pool block that results from successful retrieval must be * returned to the pool once the data has been processed, even in cases * where zero bytes of data are retrieved. * * Alternatively, this routine can be used to dispose of a received message * without retrieving its data. In this case there is no need to return a * memory pool block to the pool. * * This routine allocates a new memory pool block for the data only if the * data is not already in one. If a new block cannot be allocated, the routine * returns a failure code and the received message is left unchanged. This * permits the caller to reattempt data retrieval at a later time or to dispose * of the received message without retrieving its data. * * @param rx_msg Address of a receive message descriptor. * @param pool Address of memory pool, or NULL to discard data. * @param block Address of the area to hold memory pool block info. * @param timeout Waiting period to wait for a memory pool block (in * milliseconds), or one of the special values K_NO_WAIT * and K_FOREVER. * * @retval 0 Data retrieved. * @retval -ENOMEM Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool, struct k_mem_block *block, int32_t timeout); /** * @} end defgroup mailbox_apis */ /** * @cond INTERNAL_HIDDEN */ struct k_pipe { unsigned char *buffer; /* Pipe buffer: may be NULL */ size_t size; /* Buffer size */ size_t bytes_used; /* # bytes used in buffer */ size_t read_index; /* Where in buffer to read from */ size_t write_index; /* Where in buffer to write */ struct { _wait_q_t readers; /* Reader wait queue */ _wait_q_t writers; /* Writer wait queue */ } wait_q; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_pipe); }; #define K_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \ { \ .buffer = pipe_buffer, \ .size = pipe_buffer_size, \ .bytes_used = 0, \ .read_index = 0, \ .write_index = 0, \ .wait_q.writers = SYS_DLIST_STATIC_INIT(&obj.wait_q.writers), \ .wait_q.readers = SYS_DLIST_STATIC_INIT(&obj.wait_q.readers), \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @defgroup pipe_apis Pipe APIs * @ingroup kernel_apis * @{ */ /** * @brief Statically define and initialize a pipe. * * The pipe can be accessed outside the module where it is defined using: * * @code extern struct k_pipe <name>; @endcode * * @param name Name of the pipe. * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes), * or zero if no ring buffer is used. * @param pipe_align Alignment of the pipe's ring buffer (power of 2). */ #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \ static unsigned char __noinit __aligned(pipe_align) \ _k_pipe_buf_##name[pipe_buffer_size]; \ struct k_pipe name \ __in_section(_k_pipe, static, name) = \ K_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size) /** * @brief Initialize a pipe. * * This routine initializes a pipe object, prior to its first use. * * @param pipe Address of the pipe. * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer * is used. * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring * buffer is used. * * @return N/A */ extern void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size); /** * @brief Write data to a pipe. * * This routine writes up to @a bytes_to_write bytes of data to @a pipe. * * @param pipe Address of the pipe. * @param data Address of data to write. * @param bytes_to_write Size of data (in bytes). * @param bytes_written Address of area to hold the number of bytes written. * @param min_xfer Minimum number of bytes to write. * @param timeout Waiting period to wait for the data to be written (in * milliseconds), or one of the special values K_NO_WAIT * and K_FOREVER. * * @retval 0 At least @a min_xfer bytes of data were written. * @retval -EIO Returned without waiting; zero data bytes were written. * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer * minus one data bytes were written. */ extern int k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, int32_t timeout); /** * @brief Read data from a pipe. * * This routine reads up to @a bytes_to_read bytes of data from @a pipe. * * @param pipe Address of the pipe. * @param data Address to place the data read from pipe. * @param bytes_to_read Maximum number of data bytes to read. * @param bytes_read Address of area to hold the number of bytes read. * @param min_xfer Minimum number of data bytes to read. * @param timeout Waiting period to wait for the data to be read (in * milliseconds), or one of the special values K_NO_WAIT * and K_FOREVER. * * @retval 0 At least @a min_xfer bytes of data were read. * @retval -EIO Returned without waiting; zero data bytes were read. * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer * minus one data bytes were read. */ extern int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, int32_t timeout); /** * @brief Write memory block to a pipe. * * This routine writes the data contained in a memory block to @a pipe. * Once all of the data in the block has been written to the pipe, it will * free the memory block @a block and give the semaphore @a sem (if specified). * * @param pipe Address of the pipe. * @param block Memory block containing data to send * @param size Number of data bytes in memory block to send * @param sem Semaphore to signal upon completion (else NULL) * * @return N/A */ extern void k_pipe_block_put(struct k_pipe *pipe, struct k_mem_block *block, size_t size, struct k_sem *sem); /** * @} end defgroup pipe_apis */ /** * @cond INTERNAL_HIDDEN */ struct k_mem_slab { _wait_q_t wait_q; uint32_t num_blocks; size_t block_size; char *buffer; char *free_list; uint32_t num_used; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_mem_slab); }; #define K_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \ slab_num_blocks) \ { \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .num_blocks = slab_num_blocks, \ .block_size = slab_block_size, \ .buffer = slab_buffer, \ .free_list = NULL, \ .num_used = 0, \ _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ } /** * INTERNAL_HIDDEN @endcond */ /** * @defgroup mem_slab_apis Memory Slab APIs * @ingroup kernel_apis * @{ */ /** * @brief Statically define and initialize a memory slab. * * The memory slab's buffer contains @a slab_num_blocks memory blocks * that are @a slab_block_size bytes long. The buffer is aligned to a * @a slab_align -byte boundary. To ensure that each memory block is similarly * aligned to this boundary, @a slab_block_size must also be a multiple of * @a slab_align. * * The memory slab can be accessed outside the module where it is defined * using: * * @code extern struct k_mem_slab <name>; @endcode * * @param name Name of the memory slab. * @param slab_block_size Size of each memory block (in bytes). * @param slab_num_blocks Number memory blocks. * @param slab_align Alignment of the memory slab's buffer (power of 2). */ #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \ char __noinit __aligned(slab_align) \ _k_mem_slab_buf_##name[(slab_num_blocks) * (slab_block_size)]; \ struct k_mem_slab name \ __in_section(_k_mem_slab, static, name) = \ K_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \ slab_block_size, slab_num_blocks) /** * @brief Initialize a memory slab. * * Initializes a memory slab, prior to its first use. * * The memory slab's buffer contains @a slab_num_blocks memory blocks * that are @a slab_block_size bytes long. The buffer must be aligned to an * N-byte boundary, where N is a power of 2 larger than 2 (i.e. 4, 8, 16, ...). * To ensure that each memory block is similarly aligned to this boundary, * @a slab_block_size must also be a multiple of N. * * @param slab Address of the memory slab. * @param buffer Pointer to buffer used for the memory blocks. * @param block_size Size of each memory block (in bytes). * @param num_blocks Number of memory blocks. * * @return N/A */ extern void k_mem_slab_init(struct k_mem_slab *slab, void *buffer, size_t block_size, uint32_t num_blocks); /** * @brief Allocate memory from a memory slab. * * This routine allocates a memory block from a memory slab. * * @param slab Address of the memory slab. * @param mem Pointer to block address area. * @param timeout Maximum time to wait for operation to complete * (in milliseconds). Use K_NO_WAIT to return without waiting, * or K_FOREVER to wait as long as necessary. * * @retval 0 Memory allocated. The block address area pointed at by @a mem * is set to the starting address of the memory block. * @retval -ENOMEM Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, int32_t timeout); /** * @brief Free memory allocated from a memory slab. * * This routine releases a previously allocated memory block back to its * associated memory slab. * * @param slab Address of the memory slab. * @param mem Pointer to block address area (as set by k_mem_slab_alloc()). * * @return N/A */ extern void k_mem_slab_free(struct k_mem_slab *slab, void **mem); /** * @brief Get the number of used blocks in a memory slab. * * This routine gets the number of memory blocks that are currently * allocated in @a slab. * * @param slab Address of the memory slab. * * @return Number of allocated memory blocks. */ static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab) { return slab->num_used; } /** * @brief Get the number of unused blocks in a memory slab. * * This routine gets the number of memory blocks that are currently * unallocated in @a slab. * * @param slab Address of the memory slab. * * @return Number of unallocated memory blocks. */ static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab) { return slab->num_blocks - slab->num_used; } /** * @} end defgroup mem_slab_apis */ /** * @cond INTERNAL_HIDDEN */ /* * Memory pool requires a buffer and two arrays of structures for the * memory block accounting: * A set of arrays of k_mem_pool_quad_block structures where each keeps a * status of four blocks of memory. */ struct k_mem_pool_quad_block { char *mem_blocks; /* pointer to the first of four memory blocks */ uint32_t mem_status; /* four bits. If bit is set, memory block is allocated */ }; /* * Memory pool mechanism uses one array of k_mem_pool_quad_block for accounting * blocks of one size. Block sizes go from maximal to minimal. Next memory * block size is 4 times less than the previous one and thus requires 4 times * bigger array of k_mem_pool_quad_block structures to keep track of the * memory blocks. */ /* * The array of k_mem_pool_block_set keeps the information of each array of * k_mem_pool_quad_block structures */ struct k_mem_pool_block_set { size_t block_size; /* memory block size */ uint32_t nr_of_entries; /* nr of quad block structures in the array */ struct k_mem_pool_quad_block *quad_block; int count; }; /* Memory pool descriptor */ struct k_mem_pool { size_t max_block_size; size_t min_block_size; uint32_t nr_of_maxblocks; uint32_t nr_of_block_sets; struct k_mem_pool_block_set *block_set; char *bufblock; _wait_q_t wait_q; _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_mem_pool); }; #ifdef CONFIG_ARM #define _SECTION_TYPE_SIGN "%" #else #define _SECTION_TYPE_SIGN "@" #endif /* * Static memory pool initialization */ /* * Use .altmacro to be able to recalculate values and pass them as string * arguments when calling assembler macros resursively */ __asm__(".altmacro\n\t"); /* * Recursively calls a macro * The followig global symbols need to be initialized: * __memory_pool_max_block_size - maximal size of the memory block * __memory_pool_min_block_size - minimal size of the memory block * Notes: * Global symbols are used due the fact that assembler macro allows only * one argument be passed with the % conversion * Some assemblers do not get division operation ("/"). To avoid it >> 2 * is used instead of / 4. * n_max argument needs to go first in the invoked macro, as some * assemblers concatenate \name and %(\n_max * 4) arguments * if \name goes first */ __asm__(".macro __do_recurse macro_name, name, n_max\n\t" ".ifge __memory_pool_max_block_size >> 2 -" " __memory_pool_min_block_size\n\t\t" "__memory_pool_max_block_size = __memory_pool_max_block_size >> 2\n\t\t" "\\macro_name %(\\n_max * 4) \\name\n\t" ".endif\n\t" ".endm\n"); /* * Build quad blocks * Macro allocates space in memory for the array of k_mem_pool_quad_block * structures and recursively calls itself for the next array, 4 times * larger. * The followig global symbols need to be initialized: * __memory_pool_max_block_size - maximal size of the memory block * __memory_pool_min_block_size - minimal size of the memory block * __memory_pool_quad_block_size - sizeof(struct k_mem_pool_quad_block) */ __asm__(".macro _build_quad_blocks n_max, name\n\t" ".balign 4\n\t" "_mem_pool_quad_blocks_\\name\\()_\\n_max:\n\t" ".skip __memory_pool_quad_block_size * \\n_max >> 2\n\t" ".if \\n_max % 4\n\t\t" ".skip __memory_pool_quad_block_size\n\t" ".endif\n\t" "__do_recurse _build_quad_blocks \\name \\n_max\n\t" ".endm\n"); /* * Build block sets and initialize them * Macro initializes the k_mem_pool_block_set structure and * recursively calls itself for the next one. * The followig global symbols need to be initialized: * __memory_pool_max_block_size - maximal size of the memory block * __memory_pool_min_block_size - minimal size of the memory block * __memory_pool_block_set_count, the number of the elements in the * block set array must be set to 0. Macro calculates it's real * value. * Since the macro initializes pointers to an array of k_mem_pool_quad_block * structures, _build_quad_blocks must be called prior it. */ __asm__(".macro _build_block_set n_max, name\n\t" ".int __memory_pool_max_block_size\n\t" /* block_size */ ".if \\n_max % 4\n\t\t" ".int \\n_max >> 2 + 1\n\t" /* nr_of_entries */ ".else\n\t\t" ".int \\n_max >> 2\n\t" ".endif\n\t" ".int _mem_pool_quad_blocks_\\name\\()_\\n_max\n\t" /* quad_block */ ".int 0\n\t" /* count */ "__memory_pool_block_set_count = __memory_pool_block_set_count + 1\n\t" "__do_recurse _build_block_set \\name \\n_max\n\t" ".endm\n"); /* * Build a memory pool structure and initialize it * Macro uses __memory_pool_block_set_count global symbol, * block set addresses and buffer address, it may be called only after * _build_block_set */ __asm__(".macro _build_mem_pool name, min_size, max_size, n_max\n\t" ".pushsection ._k_mem_pool.static.\\name,\"aw\"," _SECTION_TYPE_SIGN "progbits\n\t" ".globl \\name\n\t" "\\name:\n\t" ".int \\max_size\n\t" /* max_block_size */ ".int \\min_size\n\t" /* min_block_size */ ".int \\n_max\n\t" /* nr_of_maxblocks */ ".int __memory_pool_block_set_count\n\t" /* nr_of_block_sets */ ".int _mem_pool_block_sets_\\name\n\t" /* block_set */ ".int _mem_pool_buffer_\\name\n\t" /* bufblock */ ".int 0\n\t" /* wait_q->head */ ".int 0\n\t" /* wait_q->next */ ".popsection\n\t" ".endm\n"); #define _MEMORY_POOL_QUAD_BLOCK_DEFINE(name, min_size, max_size, n_max) \ __asm__(".pushsection ._k_memory_pool.struct,\"aw\"," \ _SECTION_TYPE_SIGN "progbits\n\t"); \ __asm__("__memory_pool_min_block_size = " STRINGIFY(min_size) "\n\t"); \ __asm__("__memory_pool_max_block_size = " STRINGIFY(max_size) "\n\t"); \ __asm__("_build_quad_blocks " STRINGIFY(n_max) " " \ STRINGIFY(name) "\n\t"); \ __asm__(".popsection\n\t") #define _MEMORY_POOL_BLOCK_SETS_DEFINE(name, min_size, max_size, n_max) \ __asm__("__memory_pool_block_set_count = 0\n\t"); \ __asm__("__memory_pool_max_block_size = " STRINGIFY(max_size) "\n\t"); \ __asm__(".pushsection ._k_memory_pool.struct,\"aw\"," \ _SECTION_TYPE_SIGN "progbits\n\t"); \ __asm__(".balign 4\n\t"); \ __asm__("_mem_pool_block_sets_" STRINGIFY(name) ":\n\t"); \ __asm__("_build_block_set " STRINGIFY(n_max) " " \ STRINGIFY(name) "\n\t"); \ __asm__("_mem_pool_block_set_count_" STRINGIFY(name) ":\n\t"); \ __asm__(".int __memory_pool_block_set_count\n\t"); \ __asm__(".popsection\n\t"); \ extern uint32_t _mem_pool_block_set_count_##name; \ extern struct k_mem_pool_block_set _mem_pool_block_sets_##name[] #define _MEMORY_POOL_BUFFER_DEFINE(name, max_size, n_max, align) \ char __noinit __aligned(align) \ _mem_pool_buffer_##name[(max_size) * (n_max)] /* * Dummy function that assigns the value of sizeof(struct k_mem_pool_quad_block) * to __memory_pool_quad_block_size absolute symbol. * This function does not get called, but compiler calculates the value and * assigns it to the absolute symbol, that, in turn is used by assembler macros. */ static void __attribute__ ((used)) __k_mem_pool_quad_block_size_define(void) { __asm__(".globl __memory_pool_quad_block_size\n\t" #ifdef CONFIG_NIOS2 "__memory_pool_quad_block_size = %0\n\t" #else "__memory_pool_quad_block_size = %c0\n\t" #endif : : "n"(sizeof(struct k_mem_pool_quad_block))); } /** * INTERNAL_HIDDEN @endcond */ /** * @addtogroup mem_pool_apis * @{ */ /** * @brief Statically define and initialize a memory pool. * * The memory pool's buffer contains @a n_max blocks that are @a max_size bytes * long. The memory pool allows blocks to be repeatedly partitioned into * quarters, down to blocks of @a min_size bytes long. The buffer is aligned * to a @a align -byte boundary. To ensure that the minimum sized blocks are * similarly aligned to this boundary, @a min_size must also be a multiple of * @a align. * * If the pool is to be accessed outside the module where it is defined, it * can be declared via * * @code extern struct k_mem_pool <name>; @endcode * * @param name Name of the memory pool. * @param min_size Size of the smallest blocks in the pool (in bytes). * @param max_size Size of the largest blocks in the pool (in bytes). * @param n_max Number of maximum sized blocks in the pool. * @param align Alignment of the pool's buffer (power of 2). */ #define K_MEM_POOL_DEFINE(name, min_size, max_size, n_max, align) \ _MEMORY_POOL_QUAD_BLOCK_DEFINE(name, min_size, max_size, n_max); \ _MEMORY_POOL_BLOCK_SETS_DEFINE(name, min_size, max_size, n_max); \ _MEMORY_POOL_BUFFER_DEFINE(name, max_size, n_max, align); \ __asm__("_build_mem_pool " STRINGIFY(name) " " STRINGIFY(min_size) " " \ STRINGIFY(max_size) " " STRINGIFY(n_max) "\n\t"); \ extern struct k_mem_pool name /** * @brief Allocate memory from a memory pool. * * This routine allocates a memory block from a memory pool. * * @param pool Address of the memory pool. * @param block Pointer to block descriptor for the allocated memory. * @param size Amount of memory to allocate (in bytes). * @param timeout Maximum time to wait for operation to complete * (in milliseconds). Use K_NO_WAIT to return without waiting, * or K_FOREVER to wait as long as necessary. * * @retval 0 Memory allocated. The @a data field of the block descriptor * is set to the starting address of the memory block. * @retval -ENOMEM Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ extern int k_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block, size_t size, int32_t timeout); /** * @brief Free memory allocated from a memory pool. * * This routine releases a previously allocated memory block back to its * memory pool. * * @param block Pointer to block descriptor for the allocated memory. * * @return N/A */ extern void k_mem_pool_free(struct k_mem_block *block); /** * @brief Defragment a memory pool. * * This routine instructs a memory pool to concatenate unused memory blocks * into larger blocks wherever possible. Manually defragmenting the memory * pool may speed up future allocations of memory blocks by eliminating the * need for the memory pool to perform an automatic partial defragmentation. * * @param pool Address of the memory pool. * * @return N/A */ extern void k_mem_pool_defrag(struct k_mem_pool *pool); /** * @} end addtogroup mem_pool_apis */ /** * @defgroup heap_apis Heap Memory Pool APIs * @ingroup kernel_apis * @{ */ /** * @brief Allocate memory from heap. * * This routine provides traditional malloc() semantics. Memory is * allocated from the heap memory pool. * * @param size Amount of memory requested (in bytes). * * @return Address of the allocated memory if successful; otherwise NULL. */ extern void *k_malloc(size_t size); /** * @brief Free memory allocated from heap. * * This routine provides traditional free() semantics. The memory being * returned must have been allocated from the heap memory pool. * * @param ptr Pointer to previously allocated memory. * * @return N/A */ extern void k_free(void *ptr); /** * @} end defgroup heap_apis */ /* * legacy.h must be before arch/cpu.h to allow the ioapic/loapic drivers to * hook into the device subsystem, which itself uses nanokernel semaphores, * and thus currently requires the definition of nano_sem. */ #include <legacy.h> #include <arch/cpu.h> /* * private APIs that are utilized by one or more public APIs */ extern int _is_thread_essential(void); extern void _init_static_threads(void); extern void _timer_expiration_handler(struct _timeout *t); #ifdef __cplusplus } #endif #if defined(CONFIG_CPLUSPLUS) && defined(__cplusplus) /* * Define new and delete operators. * At this moment, the operators do nothing since objects are supposed * to be statically allocated. */ inline void operator delete(void *ptr) { (void)ptr; } inline void operator delete[](void *ptr) { (void)ptr; } inline void *operator new(size_t size) { (void)size; return NULL; } inline void *operator new[](size_t size) { (void)size; return NULL; } /* Placement versions of operator new and delete */ inline void operator delete(void *ptr1, void *ptr2) { (void)ptr1; (void)ptr2; } inline void operator delete[](void *ptr1, void *ptr2) { (void)ptr1; (void)ptr2; } inline void *operator new(size_t size, void *ptr) { (void)size; return ptr; } inline void *operator new[](size_t size, void *ptr) { (void)size; return ptr; } #endif /* defined(CONFIG_CPLUSPLUS) && defined(__cplusplus) */ #endif /* _kernel__h_ */ |