Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 | /* Malloc implementation for multiple threads without lock contention.
Copyright (C) 1996-2022 Free Software Foundation, Inc.
Copyright The GNU Toolchain Authors.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, see <https://www.gnu.org/licenses/>. */
/*
This is a version (aka ptmalloc2) of malloc/free/realloc written by
Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
There have been substantial changes made after the integration into
glibc in all parts of the code. Do not look for much commonality
with the ptmalloc2 version.
* Version ptmalloc2-20011215
based on:
VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
* Quickstart
In order to compile this implementation, a Makefile is provided with
the ptmalloc2 distribution, which has pre-defined targets for some
popular systems (e.g. "make posix" for Posix threads). All that is
typically required with regard to compiler flags is the selection of
the thread package via defining one out of USE_PTHREADS, USE_THR or
USE_SPROC. Check the thread-m.h file for what effects this has.
Many/most systems will additionally require USE_TSD_DATA_HACK to be
defined, so this is the default for "make posix".
* Why use this malloc?
This is not the fastest, most space-conserving, most portable, or
most tunable malloc ever written. However it is among the fastest
while also being among the most space-conserving, portable and tunable.
Consistent balance across these factors results in a good general-purpose
allocator for malloc-intensive programs.
The main properties of the algorithms are:
* For large (>= 512 bytes) requests, it is a pure best-fit allocator,
with ties normally decided via FIFO (i.e. least recently used).
* For small (<= 64 bytes by default) requests, it is a caching
allocator, that maintains pools of quickly recycled chunks.
* In between, and for combinations of large and small requests, it does
the best it can trying to meet both goals at once.
* For very large requests (>= 128KB by default), it relies on system
memory mapping facilities, if supported.
For a longer but slightly out of date high-level description, see
http://gee.cs.oswego.edu/dl/html/malloc.html
You may already by default be using a C library containing a malloc
that is based on some version of this malloc (for example in
linux). You might still want to use the one in this file in order to
customize settings or to avoid overheads associated with library
versions.
* Contents, described in more detail in "description of public routines" below.
Standard (ANSI/SVID/...) functions:
malloc(size_t n);
calloc(size_t n_elements, size_t element_size);
free(void* p);
realloc(void* p, size_t n);
memalign(size_t alignment, size_t n);
valloc(size_t n);
mallinfo()
mallopt(int parameter_number, int parameter_value)
Additional functions:
independent_calloc(size_t n_elements, size_t size, void* chunks[]);
independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
pvalloc(size_t n);
malloc_trim(size_t pad);
malloc_usable_size(void* p);
malloc_stats();
* Vital statistics:
Supported pointer representation: 4 or 8 bytes
Supported size_t representation: 4 or 8 bytes
Note that size_t is allowed to be 4 bytes even if pointers are 8.
You can adjust this by defining INTERNAL_SIZE_T
Alignment: 2 * sizeof(size_t) (default)
(i.e., 8 byte alignment with 4byte size_t). This suffices for
nearly all current machines and C compilers. However, you can
define MALLOC_ALIGNMENT to be wider than this if necessary.
Minimum overhead per allocated chunk: 4 or 8 bytes
Each malloced chunk has a hidden word of overhead holding size
and status information.
Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
needed; 4 (8) for a trailing size field and 8 (16) bytes for
free list pointers. Thus, the minimum allocatable size is
16/24/32 bytes.
Even a request for zero bytes (i.e., malloc(0)) returns a
pointer to something of the minimum allocatable size.
The maximum overhead wastage (i.e., number of extra bytes
allocated than were requested in malloc) is less than or equal
to the minimum size, except for requests >= mmap_threshold that
are serviced via mmap(), where the worst case wastage is 2 *
sizeof(size_t) bytes plus the remainder from a system page (the
minimal mmap unit); typically 4096 or 8192 bytes.
Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
8-byte size_t: 2^64 minus about two pages
It is assumed that (possibly signed) size_t values suffice to
represent chunk sizes. `Possibly signed' is due to the fact
that `size_t' may be defined on a system as either a signed or
an unsigned type. The ISO C standard says that it must be
unsigned, but a few systems are known not to adhere to this.
Additionally, even when size_t is unsigned, sbrk (which is by
default used to obtain memory from system) accepts signed
arguments, and may not be able to handle size_t-wide arguments
with negative sign bit. Generally, values that would
appear as negative after accounting for overhead and alignment
are supported only via mmap(), which does not have this
limitation.
Requests for sizes outside the allowed range will perform an optional
failure action and then return null. (Requests may also
also fail because a system is out of memory.)
Thread-safety: thread-safe
Compliance: I believe it is compliant with the 1997 Single Unix Specification
Also SVID/XPG, ANSI C, and probably others as well.
* Synopsis of compile-time options:
People have reported using previous versions of this malloc on all
versions of Unix, sometimes by tweaking some of the defines
below. It has been tested most extensively on Solaris and Linux.
People also report using it in stand-alone embedded systems.
The implementation is in straight, hand-tuned ANSI C. It is not
at all modular. (Sorry!) It uses a lot of macros. To be at all
usable, this code should be compiled using an optimizing compiler
(for example gcc -O3) that can simplify expressions and control
paths. (FAQ: some macros import variables as arguments rather than
declare locals because people reported that some debuggers
otherwise get confused.)
OPTION DEFAULT VALUE
Compilation Environment options:
HAVE_MREMAP 0
Changing default word sizes:
INTERNAL_SIZE_T size_t
Configuration and functionality options:
USE_PUBLIC_MALLOC_WRAPPERS NOT defined
USE_MALLOC_LOCK NOT defined
MALLOC_DEBUG NOT defined
REALLOC_ZERO_BYTES_FREES 1
TRIM_FASTBINS 0
Options for customizing MORECORE:
MORECORE sbrk
MORECORE_FAILURE -1
MORECORE_CONTIGUOUS 1
MORECORE_CANNOT_TRIM NOT defined
MORECORE_CLEARS 1
MMAP_AS_MORECORE_SIZE (1024 * 1024)
Tuning options that are also dynamically changeable via mallopt:
DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
DEFAULT_TRIM_THRESHOLD 128 * 1024
DEFAULT_TOP_PAD 0
DEFAULT_MMAP_THRESHOLD 128 * 1024
DEFAULT_MMAP_MAX 65536
There are several other #defined constants and macros that you
probably don't want to touch unless you are extending or adapting malloc. */
/*
void* is the pointer type that malloc should say it returns
*/
#ifndef void
#define void void
#endif /*void*/
#include <stddef.h> /* for size_t */
#include <stdlib.h> /* for getenv(), abort() */
#include <unistd.h> /* for __libc_enable_secure */
#include <atomic.h>
#include <_itoa.h>
#include <bits/wordsize.h>
#include <sys/sysinfo.h>
#include <ldsodefs.h>
#include <unistd.h>
#include <stdio.h> /* needed for malloc_stats */
#include <errno.h>
#include <assert.h>
#include <shlib-compat.h>
/* For uintptr_t. */
#include <stdint.h>
/* For va_arg, va_start, va_end. */
#include <stdarg.h>
/* For MIN, MAX, powerof2. */
#include <sys/param.h>
/* For ALIGN_UP et. al. */
#include <libc-pointer-arith.h>
/* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
#include <libc-diag.h>
/* For memory tagging. */
#include <libc-mtag.h>
#include <malloc/malloc-internal.h>
/* For SINGLE_THREAD_P. */
#include <sysdep-cancel.h>
#include <libc-internal.h>
/* For tcache double-free check. */
#include <random-bits.h>
#include <sys/random.h>
/*
Debugging:
Because freed chunks may be overwritten with bookkeeping fields, this
malloc will often die when freed memory is overwritten by user
programs. This can be very effective (albeit in an annoying way)
in helping track down dangling pointers.
If you compile with -DMALLOC_DEBUG, a number of assertion checks are
enabled that will catch more memory errors. You probably won't be
able to make much sense of the actual assertion errors, but they
should help you locate incorrectly overwritten memory. The checking
is fairly extensive, and will slow down execution
noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
will attempt to check every non-mmapped allocated and free chunk in
the course of computing the summmaries. (By nature, mmapped regions
cannot be checked very much automatically.)
Setting MALLOC_DEBUG may also be helpful if you are trying to modify
this code. The assertions in the check routines spell out in more
detail the assumptions and invariants underlying the algorithms.
Setting MALLOC_DEBUG does NOT provide an automated mechanism for
checking that all accesses to malloced memory stay within their
bounds. However, there are several add-ons and adaptations of this
or other mallocs available that do this.
*/
#ifndef MALLOC_DEBUG
#define MALLOC_DEBUG 0
#endif
#if IS_IN (libc)
#ifndef NDEBUG
# define __assert_fail(assertion, file, line, function) \
__malloc_assert(assertion, file, line, function)
_Noreturn static void
__malloc_assert (const char *assertion, const char *file, unsigned int line,
const char *function)
{
__libc_message (do_abort, "\
Fatal glibc error: malloc assertion failure in %s: %s\n",
function, assertion);
__builtin_unreachable ();
}
#endif
#endif
#if USE_TCACHE
/* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
# define TCACHE_MAX_BINS 64
# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
/* Only used to pre-fill the tunables. */
# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
/* When "x" is from chunksize(). */
# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
/* When "x" is a user-provided size. */
# define usize2tidx(x) csize2tidx (request2size (x))
/* With rounding and alignment, the bins are...
idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
idx 1 bytes 25..40 or 13..20
idx 2 bytes 41..56 or 21..28
etc. */
/* This is another arbitrary limit, which tunables can change. Each
tcache bin will hold at most this number of chunks. */
# define TCACHE_FILL_COUNT 7
/* Maximum chunks in tcache bins for tunables. This value must fit the range
of tcache->counts[] entries, else they may overflow. */
# define MAX_TCACHE_COUNT UINT16_MAX
#endif
/* Safe-Linking:
Use randomness from ASLR (mmap_base) to protect single-linked lists
of Fast-Bins and TCache. That is, mask the "next" pointers of the
lists' chunks, and also perform allocation alignment checks on them.
This mechanism reduces the risk of pointer hijacking, as was done with
Safe-Unlinking in the double-linked lists of Small-Bins.
It assumes a minimum page size of 4096 bytes (12 bits). Systems with
larger pages provide less entropy, although the pointer mangling
still works. */
#define PROTECT_PTR(pos, ptr) \
((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
/*
The REALLOC_ZERO_BYTES_FREES macro controls the behavior of realloc (p, 0)
when p is nonnull. If the macro is nonzero, the realloc call returns NULL;
otherwise, the call returns what malloc (0) would. In either case,
p is freed. Glibc uses a nonzero REALLOC_ZERO_BYTES_FREES, which
implements common historical practice.
ISO C17 says the realloc call has implementation-defined behavior,
and it might not even free p.
*/
#ifndef REALLOC_ZERO_BYTES_FREES
#define REALLOC_ZERO_BYTES_FREES 1
#endif
/*
TRIM_FASTBINS controls whether free() of a very small chunk can
immediately lead to trimming. Setting to true (1) can reduce memory
footprint, but will almost always slow down programs that use a lot
of small chunks.
Define this only if you are willing to give up some speed to more
aggressively reduce system-level memory footprint when releasing
memory in programs that use many small chunks. You can get
essentially the same effect by setting MXFAST to 0, but this can
lead to even greater slowdowns in programs using many small chunks.
TRIM_FASTBINS is an in-between compile-time option, that disables
only those chunks bordering topmost memory from being placed in
fastbins.
*/
#ifndef TRIM_FASTBINS
#define TRIM_FASTBINS 0
#endif
/* Definition for getting more memory from the OS. */
#include "morecore.c"
#define MORECORE (*__glibc_morecore)
#define MORECORE_FAILURE 0
/* Memory tagging. */
/* Some systems support the concept of tagging (sometimes known as
coloring) memory locations on a fine grained basis. Each memory
location is given a color (normally allocated randomly) and
pointers are also colored. When the pointer is dereferenced, the
pointer's color is checked against the memory's color and if they
differ the access is faulted (sometimes lazily).
We use this in glibc by maintaining a single color for the malloc
data structures that are interleaved with the user data and then
assigning separate colors for each block allocation handed out. In
this way simple buffer overruns will be rapidly detected. When
memory is freed, the memory is recolored back to the glibc default
so that simple use-after-free errors can also be detected.
If memory is reallocated the buffer is recolored even if the
address remains the same. This has a performance impact, but
guarantees that the old pointer cannot mistakenly be reused (code
that compares old against new will see a mismatch and will then
need to behave as though realloc moved the data to a new location).
Internal API for memory tagging support.
The aim is to keep the code for memory tagging support as close to
the normal APIs in glibc as possible, so that if tagging is not
enabled in the library, or is disabled at runtime then standard
operations can continue to be used. Support macros are used to do
this:
void *tag_new_zero_region (void *ptr, size_t size)
Allocates a new tag, colors the memory with that tag, zeros the
memory and returns a pointer that is correctly colored for that
location. The non-tagging version will simply call memset with 0.
void *tag_region (void *ptr, size_t size)
Color the region of memory pointed to by PTR and size SIZE with
the color of PTR. Returns the original pointer.
void *tag_new_usable (void *ptr)
Allocate a new random color and use it to color the user region of
a chunk; this may include data from the subsequent chunk's header
if tagging is sufficiently fine grained. Returns PTR suitably
recolored for accessing the memory there.
void *tag_at (void *ptr)
Read the current color of the memory at the address pointed to by
PTR (ignoring it's current color) and return PTR recolored to that
color. PTR must be valid address in all other respects. When
tagging is not enabled, it simply returns the original pointer.
*/
#ifdef USE_MTAG
static bool mtag_enabled = false;
static int mtag_mmap_flags = 0;
#else
# define mtag_enabled false
# define mtag_mmap_flags 0
#endif
static __always_inline void *
tag_region (void *ptr, size_t size)
{
if (__glibc_unlikely (mtag_enabled))
return __libc_mtag_tag_region (ptr, size);
return ptr;
}
static __always_inline void *
tag_new_zero_region (void *ptr, size_t size)
{
if (__glibc_unlikely (mtag_enabled))
return __libc_mtag_tag_zero_region (__libc_mtag_new_tag (ptr), size);
return memset (ptr, 0, size);
}
/* Defined later. */
static void *
tag_new_usable (void *ptr);
static __always_inline void *
tag_at (void *ptr)
{
if (__glibc_unlikely (mtag_enabled))
return __libc_mtag_address_get_tag (ptr);
return ptr;
}
#include <string.h>
/*
MORECORE-related declarations. By default, rely on sbrk
*/
/*
MORECORE is the name of the routine to call to obtain more memory
from the system. See below for general guidance on writing
alternative MORECORE functions, as well as a version for WIN32 and a
sample version for pre-OSX macos.
*/
#ifndef MORECORE
#define MORECORE sbrk
#endif
/*
MORECORE_FAILURE is the value returned upon failure of MORECORE
as well as mmap. Since it cannot be an otherwise valid memory address,
and must reflect values of standard sys calls, you probably ought not
try to redefine it.
*/
#ifndef MORECORE_FAILURE
#define MORECORE_FAILURE (-1)
#endif
/*
If MORECORE_CONTIGUOUS is true, take advantage of fact that
consecutive calls to MORECORE with positive arguments always return
contiguous increasing addresses. This is true of unix sbrk. Even
if not defined, when regions happen to be contiguous, malloc will
permit allocations spanning regions obtained from different
calls. But defining this when applicable enables some stronger
consistency checks and space efficiencies.
*/
#ifndef MORECORE_CONTIGUOUS
#define MORECORE_CONTIGUOUS 1
#endif
/*
Define MORECORE_CANNOT_TRIM if your version of MORECORE
cannot release space back to the system when given negative
arguments. This is generally necessary only if you are using
a hand-crafted MORECORE function that cannot handle negative arguments.
*/
/* #define MORECORE_CANNOT_TRIM */
/* MORECORE_CLEARS (default 1)
The degree to which the routine mapped to MORECORE zeroes out
memory: never (0), only for newly allocated space (1) or always
(2). The distinction between (1) and (2) is necessary because on
some systems, if the application first decrements and then
increments the break value, the contents of the reallocated space
are unspecified.
*/
#ifndef MORECORE_CLEARS
# define MORECORE_CLEARS 1
#endif
/*
MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
sbrk fails, and mmap is used as a backup. The value must be a
multiple of page size. This backup strategy generally applies only
when systems have "holes" in address space, so sbrk cannot perform
contiguous expansion, but there is still space available on system.
On systems for which this is known to be useful (i.e. most linux
kernels), this occurs only when programs allocate huge amounts of
memory. Between this, and the fact that mmap regions tend to be
limited, the size should be large, to avoid too many mmap calls and
thus avoid running out of kernel resources. */
#ifndef MMAP_AS_MORECORE_SIZE
#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
#endif
/*
Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
large blocks.
*/
#ifndef HAVE_MREMAP
#define HAVE_MREMAP 0
#endif
/*
This version of malloc supports the standard SVID/XPG mallinfo
routine that returns a struct containing usage properties and
statistics. It should work on any SVID/XPG compliant system that has
a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
install such a thing yourself, cut out the preliminary declarations
as described above and below and save them in a malloc.h file. But
there's no compelling reason to bother to do this.)
The main declaration needed is the mallinfo struct that is returned
(by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
bunch of fields that are not even meaningful in this version of
malloc. These fields are are instead filled by mallinfo() with
other numbers that might be of interest.
*/
/* ---------- description of public routines ------------ */
#if IS_IN (libc)
/*
malloc(size_t n)
Returns a pointer to a newly allocated chunk of at least n bytes, or null
if no space is available. Additionally, on failure, errno is
set to ENOMEM on ANSI C systems.
If n is zero, malloc returns a minimum-sized chunk. (The minimum
size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
systems.) On most systems, size_t is an unsigned type, so calls
with negative arguments are interpreted as requests for huge amounts
of space, which will often fail. The maximum supported value of n
differs across systems, but is in all cases less than the maximum
representable value of a size_t.
*/
void* __libc_malloc(size_t);
libc_hidden_proto (__libc_malloc)
/*
free(void* p)
Releases the chunk of memory pointed to by p, that had been previously
allocated using malloc or a related routine such as realloc.
It has no effect if p is null. It can have arbitrary (i.e., bad!)
effects if p has already been freed.
Unless disabled (using mallopt), freeing very large spaces will
when possible, automatically trigger operations that give
back unused memory to the system, thus reducing program footprint.
*/
void __libc_free(void*);
libc_hidden_proto (__libc_free)
/*
calloc(size_t n_elements, size_t element_size);
Returns a pointer to n_elements * element_size bytes, with all locations
set to zero.
*/
void* __libc_calloc(size_t, size_t);
/*
realloc(void* p, size_t n)
Returns a pointer to a chunk of size n that contains the same data
as does chunk p up to the minimum of (n, p's size) bytes, or null
if no space is available.
The returned pointer may or may not be the same as p. The algorithm
prefers extending p when possible, otherwise it employs the
equivalent of a malloc-copy-free sequence.
If p is null, realloc is equivalent to malloc.
If space is not available, realloc returns null, errno is set (if on
ANSI) and p is NOT freed.
if n is for fewer bytes than already held by p, the newly unused
space is lopped off and freed if possible. Unless the #define
REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
zero (re)allocates a minimum-sized chunk.
Large chunks that were internally obtained via mmap will always be
grown using malloc-copy-free sequences unless the system supports
MREMAP (currently only linux).
The old unix realloc convention of allowing the last-free'd chunk
to be used as an argument to realloc is not supported.
*/
void* __libc_realloc(void*, size_t);
libc_hidden_proto (__libc_realloc)
/*
memalign(size_t alignment, size_t n);
Returns a pointer to a newly allocated chunk of n bytes, aligned
in accord with the alignment argument.
The alignment argument should be a power of two. If the argument is
not a power of two, the nearest greater power is used.
8-byte alignment is guaranteed by normal malloc calls, so don't
bother calling memalign with an argument of 8 or less.
Overreliance on memalign is a sure way to fragment space.
*/
void* __libc_memalign(size_t, size_t);
libc_hidden_proto (__libc_memalign)
/*
valloc(size_t n);
Equivalent to memalign(pagesize, n), where pagesize is the page
size of the system. If the pagesize is unknown, 4096 is used.
*/
void* __libc_valloc(size_t);
/*
mallinfo()
Returns (by copy) a struct containing various summary statistics:
arena: current total non-mmapped bytes allocated from system
ordblks: the number of free chunks
smblks: the number of fastbin blocks (i.e., small chunks that
have been freed but not use resused or consolidated)
hblks: current number of mmapped regions
hblkhd: total bytes held in mmapped regions
usmblks: always 0
fsmblks: total bytes held in fastbin blocks
uordblks: current total allocated space (normal or mmapped)
fordblks: total free space
keepcost: the maximum number of bytes that could ideally be released
back to system via malloc_trim. ("ideally" means that
it ignores page restrictions etc.)
Because these fields are ints, but internal bookkeeping may
be kept as longs, the reported values may wrap around zero and
thus be inaccurate.
*/
struct mallinfo2 __libc_mallinfo2(void);
libc_hidden_proto (__libc_mallinfo2)
struct mallinfo __libc_mallinfo(void);
/*
pvalloc(size_t n);
Equivalent to valloc(minimum-page-that-holds(n)), that is,
round up n to nearest pagesize.
*/
void* __libc_pvalloc(size_t);
/*
malloc_trim(size_t pad);
If possible, gives memory back to the system (via negative
arguments to sbrk) if there is unused memory at the `high' end of
the malloc pool. You can call this after freeing large blocks of
memory to potentially reduce the system-level memory requirements
of a program. However, it cannot guarantee to reduce memory. Under
some allocation patterns, some large free blocks of memory will be
locked between two used chunks, so they cannot be given back to
the system.
The `pad' argument to malloc_trim represents the amount of free
trailing space to leave untrimmed. If this argument is zero,
only the minimum amount of memory to maintain internal data
structures will be left (one page or less). Non-zero arguments
can be supplied to maintain enough trailing space to service
future expected allocations without having to re-obtain memory
from the system.
Malloc_trim returns 1 if it actually released any memory, else 0.
On systems that do not support "negative sbrks", it will always
return 0.
*/
int __malloc_trim(size_t);
/*
malloc_usable_size(void* p);
Returns the number of bytes you can actually use in
an allocated chunk, which may be more than you requested (although
often not) due to alignment and minimum size constraints.
You can use this many bytes without worrying about
overwriting other allocated objects. This is not a particularly great
programming practice. malloc_usable_size can be more useful in
debugging and assertions, for example:
p = malloc(n);
assert(malloc_usable_size(p) >= 256);
*/
size_t __malloc_usable_size(void*);
/*
malloc_stats();
Prints on stderr the amount of space obtained from the system (both
via sbrk and mmap), the maximum amount (which may be more than
current if malloc_trim and/or munmap got called), and the current
number of bytes allocated via malloc (or realloc, etc) but not yet
freed. Note that this is the number of bytes allocated, not the
number requested. It will be larger than the number requested
because of alignment and bookkeeping overhead. Because it includes
alignment wastage as being in use, this figure may be greater than
zero even when no user-level chunks are allocated.
The reported current and maximum system memory can be inaccurate if
a program makes other calls to system memory allocation functions
(normally sbrk) outside of malloc.
malloc_stats prints only the most commonly interesting statistics.
More information can be obtained by calling mallinfo.
*/
void __malloc_stats(void);
/*
posix_memalign(void **memptr, size_t alignment, size_t size);
POSIX wrapper like memalign(), checking for validity of size.
*/
int __posix_memalign(void **, size_t, size_t);
#endif /* IS_IN (libc) */
/*
mallopt(int parameter_number, int parameter_value)
Sets tunable parameters The format is to provide a
(parameter-number, parameter-value) pair. mallopt then sets the
corresponding parameter to the argument value if it can (i.e., so
long as the value is meaningful), and returns 1 if successful else
0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
normally defined in malloc.h. Only one of these (M_MXFAST) is used
in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
so setting them has no effect. But this malloc also supports four
other options in mallopt. See below for details. Briefly, supported
parameters are as follows (listed defaults are for "typical"
configurations).
Symbol param # default allowed param values
M_MXFAST 1 64 0-80 (0 disables fastbins)
M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
M_TOP_PAD -2 0 any
M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
M_MMAP_MAX -4 65536 any (0 disables use of mmap)
*/
int __libc_mallopt(int, int);
#if IS_IN (libc)
libc_hidden_proto (__libc_mallopt)
#endif
/* mallopt tuning options */
/*
M_MXFAST is the maximum request size used for "fastbins", special bins
that hold returned chunks without consolidating their spaces. This
enables future requests for chunks of the same size to be handled
very quickly, but can increase fragmentation, and thus increase the
overall memory footprint of a program.
This malloc manages fastbins very conservatively yet still
efficiently, so fragmentation is rarely a problem for values less
than or equal to the default. The maximum supported value of MXFAST
is 80. You wouldn't want it any higher than this anyway. Fastbins
are designed especially for use with many small structs, objects or
strings -- the default handles structs/objects/arrays with sizes up
to 8 4byte fields, or small strings representing words, tokens,
etc. Using fastbins for larger objects normally worsens
fragmentation without improving speed.
M_MXFAST is set in REQUEST size units. It is internally used in
chunksize units, which adds padding and alignment. You can reduce
M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
algorithm to be a closer approximation of fifo-best-fit in all cases,
not just for larger requests, but will generally cause it to be
slower.
*/
/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
#ifndef M_MXFAST
#define M_MXFAST 1
#endif
#ifndef DEFAULT_MXFAST
#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
#endif
/*
M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
to keep before releasing via malloc_trim in free().
Automatic trimming is mainly useful in long-lived programs.
Because trimming via sbrk can be slow on some systems, and can
sometimes be wasteful (in cases where programs immediately
afterward allocate more large chunks) the value should be high
enough so that your overall system performance would improve by
releasing this much memory.
The trim threshold and the mmap control parameters (see below)
can be traded off with one another. Trimming and mmapping are
two different ways of releasing unused memory back to the
system. Between these two, it is often possible to keep
system-level demands of a long-lived program down to a bare
minimum. For example, in one test suite of sessions measuring
the XF86 X server on Linux, using a trim threshold of 128K and a
mmap threshold of 192K led to near-minimal long term resource
consumption.
If you are using this malloc in a long-lived program, it should
pay to experiment with these values. As a rough guide, you
might set to a value close to the average size of a process
(program) running on your system. Releasing this much memory
would allow such a process to run in memory. Generally, it's
worth it to tune for trimming rather tham memory mapping when a
program undergoes phases where several large chunks are
allocated and released in ways that can reuse each other's
storage, perhaps mixed with phases where there are no such
chunks at all. And in well-behaved long-lived programs,
controlling release of large blocks via trimming versus mapping
is usually faster.
However, in most programs, these parameters serve mainly as
protection against the system-level effects of carrying around
massive amounts of unneeded memory. Since frequent calls to
sbrk, mmap, and munmap otherwise degrade performance, the default
parameters are set to relatively high values that serve only as
safeguards.
The trim value It must be greater than page size to have any useful
effect. To disable trimming completely, you can set to
(unsigned long)(-1)
Trim settings interact with fastbin (MXFAST) settings: Unless
TRIM_FASTBINS is defined, automatic trimming never takes place upon
freeing a chunk with size less than or equal to MXFAST. Trimming is
instead delayed until subsequent freeing of larger chunks. However,
you can still force an attempted trim by calling malloc_trim.
Also, trimming is not generally possible in cases where
the main arena is obtained via mmap.
Note that the trick some people use of mallocing a huge space and
then freeing it at program startup, in an attempt to reserve system
memory, doesn't have the intended effect under automatic trimming,
since that memory will immediately be returned to the system.
*/
#define M_TRIM_THRESHOLD -1
#ifndef DEFAULT_TRIM_THRESHOLD
#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
#endif
/*
M_TOP_PAD is the amount of extra `padding' space to allocate or
retain whenever sbrk is called. It is used in two ways internally:
* When sbrk is called to extend the top of the arena to satisfy
a new malloc request, this much padding is added to the sbrk
request.
* When malloc_trim is called automatically from free(),
it is used as the `pad' argument.
In both cases, the actual amount of padding is rounded
so that the end of the arena is always a system page boundary.
The main reason for using padding is to avoid calling sbrk so
often. Having even a small pad greatly reduces the likelihood
that nearly every malloc request during program start-up (or
after trimming) will invoke sbrk, which needlessly wastes
time.
Automatic rounding-up to page-size units is normally sufficient
to avoid measurable overhead, so the default is 0. However, in
systems where sbrk is relatively slow, it can pay to increase
this value, at the expense of carrying around more memory than
the program needs.
*/
#define M_TOP_PAD -2
#ifndef DEFAULT_TOP_PAD
#define DEFAULT_TOP_PAD (0)
#endif
/*
MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
adjusted MMAP_THRESHOLD.
*/
#ifndef DEFAULT_MMAP_THRESHOLD_MIN
#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
#endif
#ifndef DEFAULT_MMAP_THRESHOLD_MAX
/* For 32-bit platforms we cannot increase the maximum mmap
threshold much because it is also the minimum value for the
maximum heap size and its alignment. Going above 512k (i.e., 1M
for new heaps) wastes too much address space. */
# if __WORDSIZE == 32
# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
# else
# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
# endif
#endif
/*
M_MMAP_THRESHOLD is the request size threshold for using mmap()
to service a request. Requests of at least this size that cannot
be allocated using already-existing space will be serviced via mmap.
(If enough normal freed space already exists it is used instead.)
Using mmap segregates relatively large chunks of memory so that
they can be individually obtained and released from the host
system. A request serviced through mmap is never reused by any
other request (at least not directly; the system may just so
happen to remap successive requests to the same locations).
Segregating space in this way has the benefits that:
1. Mmapped space can ALWAYS be individually released back
to the system, which helps keep the system level memory
demands of a long-lived program low.
2. Mapped memory can never become `locked' between
other chunks, as can happen with normally allocated chunks, which
means that even trimming via malloc_trim would not release them.
3. On some systems with "holes" in address spaces, mmap can obtain
memory that sbrk cannot.
However, it has the disadvantages that:
1. The space cannot be reclaimed, consolidated, and then
used to service later requests, as happens with normal chunks.
2. It can lead to more wastage because of mmap page alignment
requirements
3. It causes malloc performance to be more dependent on host
system memory management support routines which may vary in
implementation quality and may impose arbitrary
limitations. Generally, servicing a request via normal
malloc steps is faster than going through a system's mmap.
The advantages of mmap nearly always outweigh disadvantages for
"large" chunks, but the value of "large" varies across systems. The
default is an empirically derived value that works well in most
systems.
Update in 2006:
The above was written in 2001. Since then the world has changed a lot.
Memory got bigger. Applications got bigger. The virtual address space
layout in 32 bit linux changed.
In the new situation, brk() and mmap space is shared and there are no
artificial limits on brk size imposed by the kernel. What is more,
applications have started using transient allocations larger than the
128Kb as was imagined in 2001.
The price for mmap is also high now; each time glibc mmaps from the
kernel, the kernel is forced to zero out the memory it gives to the
application. Zeroing memory is expensive and eats a lot of cache and
memory bandwidth. This has nothing to do with the efficiency of the
virtual memory system, by doing mmap the kernel just has no choice but
to zero.
In 2001, the kernel had a maximum size for brk() which was about 800
megabytes on 32 bit x86, at that point brk() would hit the first
mmaped shared libaries and couldn't expand anymore. With current 2.6
kernels, the VA space layout is different and brk() and mmap
both can span the entire heap at will.
Rather than using a static threshold for the brk/mmap tradeoff,
we are now using a simple dynamic one. The goal is still to avoid
fragmentation. The old goals we kept are
1) try to get the long lived large allocations to use mmap()
2) really large allocations should always use mmap()
and we're adding now:
3) transient allocations should use brk() to avoid forcing the kernel
having to zero memory over and over again
The implementation works with a sliding threshold, which is by default
limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
out at 128Kb as per the 2001 default.
This allows us to satisfy requirement 1) under the assumption that long
lived allocations are made early in the process' lifespan, before it has
started doing dynamic allocations of the same size (which will
increase the threshold).
The upperbound on the threshold satisfies requirement 2)
The threshold goes up in value when the application frees memory that was
allocated with the mmap allocator. The idea is that once the application
starts freeing memory of a certain size, it's highly probable that this is
a size the application uses for transient allocations. This estimator
is there to satisfy the new third requirement.
*/
#define M_MMAP_THRESHOLD -3
#ifndef DEFAULT_MMAP_THRESHOLD
#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
#endif
/*
M_MMAP_MAX is the maximum number of requests to simultaneously
service using mmap. This parameter exists because
some systems have a limited number of internal tables for
use by mmap, and using more than a few of them may degrade
performance.
The default is set to a value that serves only as a safeguard.
Setting to 0 disables use of mmap for servicing large requests.
*/
#define M_MMAP_MAX -4
#ifndef DEFAULT_MMAP_MAX
#define DEFAULT_MMAP_MAX (65536)
#endif
#include <malloc.h>
#ifndef RETURN_ADDRESS
#define RETURN_ADDRESS(X_) (NULL)
#endif
/* Forward declarations. */
struct malloc_chunk;
typedef struct malloc_chunk* mchunkptr;
/* Internal routines. */
static void* _int_malloc(mstate, size_t);
static void _int_free(mstate, mchunkptr, int);
static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
INTERNAL_SIZE_T);
static void* _int_memalign(mstate, size_t, size_t);
#if IS_IN (libc)
static void* _mid_memalign(size_t, size_t, void *);
#endif
static void malloc_printerr(const char *str) __attribute__ ((noreturn));
static void munmap_chunk(mchunkptr p);
#if HAVE_MREMAP
static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
#endif
/* ------------------ MMAP support ------------------ */
#include <fcntl.h>
#include <sys/mman.h>
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
# define MAP_ANONYMOUS MAP_ANON
#endif
#ifndef MAP_NORESERVE
# define MAP_NORESERVE 0
#endif
#define MMAP(addr, size, prot, flags) \
__mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
/*
----------------------- Chunk representations -----------------------
*/
/*
This struct declaration is misleading (but accurate and necessary).
It declares a "view" into memory allowing access to necessary
fields at known offsets from a given base. See explanation below.
*/
struct malloc_chunk {
INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
struct malloc_chunk* fd; /* double links -- used only if free. */
struct malloc_chunk* bk;
/* Only used for large blocks: pointer to next larger size. */
struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
struct malloc_chunk* bk_nextsize;
};
/*
malloc_chunk details:
(The following includes lightly edited explanations by Colin Plumb.)
Chunks of memory are maintained using a `boundary tag' method as
described in e.g., Knuth or Standish. (See the paper by Paul
Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
survey of such techniques.) Sizes of free chunks are stored both
in the front of each chunk and at the end. This makes
consolidating fragmented chunks into bigger chunks very fast. The
size fields also hold bits representing whether chunks are free or
in use.
An allocated chunk looks like this:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of previous chunk, if unallocated (P clear) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of chunk, in bytes |A|M|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| User data starts here... .
. .
. (malloc_usable_size() bytes) .
. |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| (size of chunk, but used for application data) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of next chunk, in bytes |A|0|1|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Where "chunk" is the front of the chunk for the purpose of most of
the malloc code, but "mem" is the pointer that is returned to the
user. "Nextchunk" is the beginning of the next contiguous chunk.
Chunks always begin on even word boundaries, so the mem portion
(which is returned to the user) is also on an even word boundary, and
thus at least double-word aligned.
Free chunks are stored in circular doubly-linked lists, and look like this:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of previous chunk, if unallocated (P clear) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`head:' | Size of chunk, in bytes |A|0|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Forward pointer to next chunk in list |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Back pointer to previous chunk in list |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Unused space (may be 0 bytes long) .
. .
. |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`foot:' | Size of chunk, in bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of next chunk, in bytes |A|0|0|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
The P (PREV_INUSE) bit, stored in the unused low-order bit of the
chunk size (which is always a multiple of two words), is an in-use
bit for the *previous* chunk. If that bit is *clear*, then the
word before the current chunk size contains the previous chunk
size, and can be used to find the front of the previous chunk.
The very first chunk allocated always has this bit set,
preventing access to non-existent (or non-owned) memory. If
prev_inuse is set for any given chunk, then you CANNOT determine
the size of the previous chunk, and might even get a memory
addressing fault when trying to do so.
The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
main arena, described by the main_arena variable. When additional
threads are spawned, each thread receives its own arena (up to a
configurable limit, after which arenas are reused for multiple
threads), and the chunks in these arenas have the A bit set. To
find the arena for a chunk on such a non-main arena, heap_for_ptr
performs a bit mask operation and indirection through the ar_ptr
member of the per-heap header heap_info (see arena.c).
Note that the `foot' of the current chunk is actually represented
as the prev_size of the NEXT chunk. This makes it easier to
deal with alignments etc but can be very confusing when trying
to extend or adapt this code.
The three exceptions to all this are:
1. The special chunk `top' doesn't bother using the
trailing size field since there is no next contiguous chunk
that would have to index off it. After initialization, `top'
is forced to always exist. If it would become less than
MINSIZE bytes long, it is replenished.
2. Chunks allocated via mmap, which have the second-lowest-order
bit M (IS_MMAPPED) set in their size fields. Because they are
allocated one-by-one, each must contain its own trailing size
field. If the M bit is set, the other bits are ignored
(because mmapped chunks are neither in an arena, nor adjacent
to a freed chunk). The M bit is also used for chunks which
originally came from a dumped heap via malloc_set_state in
hooks.c.
3. Chunks in fastbins are treated as allocated chunks from the
point of view of the chunk allocator. They are consolidated
with their neighbors only in bulk, in malloc_consolidate.
*/
/*
---------- Size and alignment checks and conversions ----------
*/
/* Conversion from malloc headers to user pointers, and back. When
using memory tagging the user data and the malloc data structure
headers have distinct tags. Converting fully from one to the other
involves extracting the tag at the other address and creating a
suitable pointer using it. That can be quite expensive. There are
cases when the pointers are not dereferenced (for example only used
for alignment check) so the tags are not relevant, and there are
cases when user data is not tagged distinctly from malloc headers
(user data is untagged because tagging is done late in malloc and
early in free). User memory tagging across internal interfaces:
sysmalloc: Returns untagged memory.
_int_malloc: Returns untagged memory.
_int_free: Takes untagged memory.
_int_memalign: Returns untagged memory.
_int_memalign: Returns untagged memory.
_mid_memalign: Returns tagged memory.
_int_realloc: Takes and returns tagged memory.
*/
/* The chunk header is two SIZE_SZ elements, but this is used widely, so
we define it here for clarity later. */
#define CHUNK_HDR_SZ (2 * SIZE_SZ)
/* Convert a chunk address to a user mem pointer without correcting
the tag. */
#define chunk2mem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ))
/* Convert a chunk address to a user mem pointer and extract the right tag. */
#define chunk2mem_tag(p) ((void*)tag_at ((char*)(p) + CHUNK_HDR_SZ))
/* Convert a user mem pointer to a chunk address and extract the right tag. */
#define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ)))
/* The smallest possible chunk */
#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
/* The smallest size we can malloc is an aligned minimal chunk */
#define MINSIZE \
(unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
/* Check if m has acceptable alignment */
#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
#define misaligned_chunk(p) \
((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
& MALLOC_ALIGN_MASK)
/* pad request bytes into a usable size -- internal version */
/* Note: This must be a macro that evaluates to a compile time constant
if passed a literal constant. */
#define request2size(req) \
(((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
MINSIZE : \
((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
/* Check if REQ overflows when padded and aligned and if the resulting
value is less than PTRDIFF_T. Returns the requested size or
MINSIZE in case the value is less than MINSIZE, or 0 if any of the
previous checks fail. */
static inline size_t
checked_request2size (size_t req) __nonnull (1)
{
if (__glibc_unlikely (req > PTRDIFF_MAX))
return 0;
/* When using tagged memory, we cannot share the end of the user
block with the header for the next chunk, so ensure that we
allocate blocks that are rounded up to the granule size. Take
care not to overflow from close to MAX_SIZE_T to a small
number. Ideally, this would be part of request2size(), but that
must be a macro that produces a compile time constant if passed
a constant literal. */
if (__glibc_unlikely (mtag_enabled))
{
/* Ensure this is not evaluated if !mtag_enabled, see gcc PR 99551. */
asm ("");
req = (req + (__MTAG_GRANULE_SIZE - 1)) &
~(size_t)(__MTAG_GRANULE_SIZE - 1);
}
return request2size (req);
}
/*
--------------- Physical chunk operations ---------------
*/
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
#define PREV_INUSE 0x1
/* extract inuse bit of previous chunk */
#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
#define IS_MMAPPED 0x2
/* check for mmap()'ed chunk */
#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
from a non-main arena. This is only set immediately before handing
the chunk to the user, if necessary. */
#define NON_MAIN_ARENA 0x4
/* Check for chunk from main arena. */
#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
/* Mark a chunk as not being on the main arena. */
#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
/*
Bits to mask off when extracting size
Note: IS_MMAPPED is intentionally not masked off from size field in
macros for which mmapped chunks should never be seen. This should
cause helpful core dumps to occur if it is tried by accident by
people extending or adapting this malloc.
*/
#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
/* Get size, ignoring use bits */
#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
/* Like chunksize, but do not mask SIZE_BITS. */
#define chunksize_nomask(p) ((p)->mchunk_size)
/* Ptr to next physical malloc_chunk. */
#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
/* Size of the chunk below P. Only valid if !prev_inuse (P). */
#define prev_size(p) ((p)->mchunk_prev_size)
/* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
/* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
/* Treat space at ptr + offset as a chunk */
#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
/* extract p's inuse bit */
#define inuse(p) \
((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
/* set/clear chunk as being inuse without otherwise disturbing */
#define set_inuse(p) \
((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
#define clear_inuse(p) \
((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
/* check/set/clear inuse bits in known places */
#define inuse_bit_at_offset(p, s) \
(((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
#define set_inuse_bit_at_offset(p, s) \
(((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
#define clear_inuse_bit_at_offset(p, s) \
(((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
/* Set size at head, without disturbing its use bit */
#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
/* Set size/use field */
#define set_head(p, s) ((p)->mchunk_size = (s))
/* Set size at footer (only when chunk is not in use) */
#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
#pragma GCC poison mchunk_size
#pragma GCC poison mchunk_prev_size
/* This is the size of the real usable data in the chunk. Not valid for
dumped heap chunks. */
#define memsize(p) \
(__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
chunksize (p) - CHUNK_HDR_SZ : \
chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
/* If memory tagging is enabled the layout changes to accommodate the granule
size, this is wasteful for small allocations so not done by default.
Both the chunk header and user data has to be granule aligned. */
_Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
"memory tagging is not supported with large granule.");
static __always_inline void *
tag_new_usable (void *ptr)
{
if (__glibc_unlikely (mtag_enabled) && ptr)
{
mchunkptr cp = mem2chunk(ptr);
ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp));
}
return ptr;
}
/*
-------------------- Internal data structures --------------------
All internal state is held in an instance of malloc_state defined
below. There are no other static variables, except in two optional
cases:
* If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
* If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
for mmap.
Beware of lots of tricks that minimize the total bookkeeping space
requirements. The result is a little over 1K bytes (for 4byte
pointers and size_t.)
*/
/*
Bins
An array of bin headers for free chunks. Each bin is doubly
linked. The bins are approximately proportionally (log) spaced.
There are a lot of these bins (128). This may look excessive, but
works very well in practice. Most bins hold sizes that are
unusual as malloc request sizes, but are more usual for fragments
and consolidated sets of chunks, which is what these bins hold, so
they can be found quickly. All procedures maintain the invariant
that no consolidated chunk physically borders another one, so each
chunk in a list is known to be preceeded and followed by either
inuse chunks or the ends of memory.
Chunks in bins are kept in size order, with ties going to the
approximately least recently used chunk. Ordering isn't needed
for the small bins, which all contain the same-sized chunks, but
facilitates best-fit allocation for larger chunks. These lists
are just sequential. Keeping them in order almost never requires
enough traversal to warrant using fancier ordered data
structures.
Chunks of the same size are linked with the most
recently freed at the front, and allocations are taken from the
back. This results in LRU (FIFO) allocation order, which tends
to give each chunk an equal opportunity to be consolidated with
adjacent freed chunks, resulting in larger free chunks and less
fragmentation.
To simplify use in double-linked lists, each bin header acts
as a malloc_chunk. This avoids special-casing for headers.
But to conserve space and improve locality, we allocate
only the fd/bk pointers of bins, and then use repositioning tricks
to treat these as the fields of a malloc_chunk*.
*/
typedef struct malloc_chunk *mbinptr;
/* addressing -- note that bin_at(0) does not exist */
#define bin_at(m, i) \
(mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
- offsetof (struct malloc_chunk, fd))
/* analog of ++bin */
#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
/* Reminders about list directionality within bins */
#define first(b) ((b)->fd)
#define last(b) ((b)->bk)
/*
Indexing
Bins for sizes < 512 bytes contain chunks of all the same size, spaced
8 bytes apart. Larger bins are approximately logarithmically spaced:
64 bins of size 8
32 bins of size 64
16 bins of size 512
8 bins of size 4096
4 bins of size 32768
2 bins of size 262144
1 bin of size what's left
There is actually a little bit of slop in the numbers in bin_index
for the sake of speed. This makes no difference elsewhere.
The bins top out around 1MB because we expect to service large
requests via mmap.
Bin 0 does not exist. Bin 1 is the unordered list; if that would be
a valid chunk size the small bins are bumped up one.
*/
#define NBINS 128
#define NSMALLBINS 64
#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ)
#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
#define in_smallbin_range(sz) \
((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
#define smallbin_index(sz) \
((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
+ SMALLBIN_CORRECTION)
#define largebin_index_32(sz) \
(((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
126)
#define largebin_index_32_big(sz) \
(((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
126)
// XXX It remains to be seen whether it is good to keep the widths of
// XXX the buckets the same or whether it should be scaled by a factor
// XXX of two as well.
#define largebin_index_64(sz) \
(((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
126)
#define largebin_index(sz) \
(SIZE_SZ == 8 ? largebin_index_64 (sz) \
: MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
: largebin_index_32 (sz))
#define bin_index(sz) \
((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
/* Take a chunk off a bin list. */
static void
unlink_chunk (mstate av, mchunkptr p)
{
if (chunksize (p) != prev_size (next_chunk (p)))
malloc_printerr ("corrupted size vs. prev_size");
mchunkptr fd = p->fd;
mchunkptr bk = p->bk;
if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
malloc_printerr ("corrupted double-linked list");
fd->bk = bk;
bk->fd = fd;
if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
{
if (p->fd_nextsize->bk_nextsize != p
|| p->bk_nextsize->fd_nextsize != p)
malloc_printerr ("corrupted double-linked list (not small)");
if (fd->fd_nextsize == NULL)
{
if (p->fd_nextsize == p)
fd->fd_nextsize = fd->bk_nextsize = fd;
else
{
fd->fd_nextsize = p->fd_nextsize;
fd->bk_nextsize = p->bk_nextsize;
p->fd_nextsize->bk_nextsize = fd;
p->bk_nextsize->fd_nextsize = fd;
}
}
else
{
p->fd_nextsize->bk_nextsize = p->bk_nextsize;
p->bk_nextsize->fd_nextsize = p->fd_nextsize;
}
}
}
/*
Unsorted chunks
All remainders from chunk splits, as well as all returned chunks,
are first placed in the "unsorted" bin. They are then placed
in regular bins after malloc gives them ONE chance to be used before
binning. So, basically, the unsorted_chunks list acts as a queue,
with chunks being placed on it in free (and malloc_consolidate),
and taken off (to be either used or placed in bins) in malloc.
The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
does not have to be taken into account in size comparisons.
*/
/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
#define unsorted_chunks(M) (bin_at (M, 1))
/*
Top
The top-most available chunk (i.e., the one bordering the end of
available memory) is treated specially. It is never included in
any bin, is used only if no other chunk is available, and is
released back to the system if it is very large (see
M_TRIM_THRESHOLD). Because top initially
points to its own bin with initial zero size, thus forcing
extension on the first malloc request, we avoid having any special
code in malloc to check whether it even exists yet. But we still
need to do so when getting memory from system, so we make
initial_top treat the bin as a legal but unusable chunk during the
interval between initialization and the first call to
sysmalloc. (This is somewhat delicate, since it relies on
the 2 preceding words to be zero during this interval as well.)
*/
/* Conveniently, the unsorted bin can be used as dummy top on first call */
#define initial_top(M) (unsorted_chunks (M))
/*
Binmap
To help compensate for the large number of bins, a one-level index
structure is used for bin-by-bin searching. `binmap' is a
bitvector recording whether bins are definitely empty so they can
be skipped over during during traversals. The bits are NOT always
cleared as soon as bins are empty, but instead only
when they are noticed to be empty during traversal in malloc.
*/
/* Conservatively use 32 bits per map word, even if on 64bit system */
#define BINMAPSHIFT 5
#define BITSPERMAP (1U << BINMAPSHIFT)
#define BINMAPSIZE (NBINS / BITSPERMAP)
#define idx2block(i) ((i) >> BINMAPSHIFT)
#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
/*
Fastbins
An array of lists holding recently freed small chunks. Fastbins
are not doubly linked. It is faster to single-link them, and
since chunks are never removed from the middles of these lists,
double linking is not necessary. Also, unlike regular bins, they
are not even processed in FIFO order (they use faster LIFO) since
ordering doesn't much matter in the transient contexts in which
fastbins are normally used.
Chunks in fastbins keep their inuse bit set, so they cannot
be consolidated with other free chunks. malloc_consolidate
releases all chunks in fastbins and consolidates them with
other free chunks.
*/
typedef struct malloc_chunk *mfastbinptr;
#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
/* offset 2 to use otherwise unindexable first 2 bins */
#define fastbin_index(sz) \
((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
/* The maximum fastbin request size we support */
#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
/*
FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
that triggers automatic consolidation of possibly-surrounding
fastbin chunks. This is a heuristic, so the exact value should not
matter too much. It is defined at half the default trim threshold as a
compromise heuristic to only attempt consolidation if it is likely
to lead to trimming. However, it is not dynamically tunable, since
consolidation reduces fragmentation surrounding large chunks even
if trimming is not used.
*/
#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
/*
NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
regions. Otherwise, contiguity is exploited in merging together,
when possible, results from consecutive MORECORE calls.
The initial value comes from MORECORE_CONTIGUOUS, but is
changed dynamically if mmap is ever used as an sbrk substitute.
*/
#define NONCONTIGUOUS_BIT (2U)
#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
/* Maximum size of memory handled in fastbins. */
static INTERNAL_SIZE_T global_max_fast;
/*
Set value of max_fast.
Use impossibly small value if 0.
Precondition: there are no existing fastbin chunks in the main arena.
Since do_check_malloc_state () checks this, we call malloc_consolidate ()
before changing max_fast. Note other arenas will leak their fast bin
entries if max_fast is reduced.
*/
#define set_max_fast(s) \
global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \
? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
static inline INTERNAL_SIZE_T
get_max_fast (void)
{
/* Tell the GCC optimizers that global_max_fast is never larger
than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
_int_malloc after constant propagation of the size parameter.
(The code never executes because malloc preserves the
global_max_fast invariant, but the optimizers may not recognize
this.) */
if (global_max_fast > MAX_FAST_SIZE)
__builtin_unreachable ();
return global_max_fast;
}
/*
----------- Internal state representation and initialization -----------
*/
/*
have_fastchunks indicates that there are probably some fastbin chunks.
It is set true on entering a chunk into any fastbin, and cleared early in
malloc_consolidate. The value is approximate since it may be set when there
are no fastbin chunks, or it may be clear even if there are fastbin chunks
available. Given it's sole purpose is to reduce number of redundant calls to
malloc_consolidate, it does not affect correctness. As a result we can safely
use relaxed atomic accesses.
*/
struct malloc_state
{
/* Serialize access. */
__libc_lock_define (, mutex);
/* Flags (formerly in max_fast). */
int flags;
/* Set if the fastbin chunks contain recently inserted free blocks. */
/* Note this is a bool but not all targets support atomics on booleans. */
int have_fastchunks;
/* Fastbins */
mfastbinptr fastbinsY[NFASTBINS];
/* Base of the topmost chunk -- not otherwise kept in a bin */
mchunkptr top;
/* The remainder from the most recent split of a small request */
mchunkptr last_remainder;
/* Normal bins packed as described above */
mchunkptr bins[NBINS * 2 - 2];
/* Bitmap of bins */
unsigned int binmap[BINMAPSIZE];
/* Linked list */
struct malloc_state *next;
/* Linked list for free arenas. Access to this field is serialized
by free_list_lock in arena.c. */
struct malloc_state *next_free;
/* Number of threads attached to this arena. 0 if the arena is on
the free list. Access to this field is serialized by
free_list_lock in arena.c. */
INTERNAL_SIZE_T attached_threads;
/* Memory allocated from the system in this arena. */
INTERNAL_SIZE_T system_mem;
INTERNAL_SIZE_T max_system_mem;
};
struct malloc_par
{
/* Tunable parameters */
unsigned long trim_threshold;
INTERNAL_SIZE_T top_pad;
INTERNAL_SIZE_T mmap_threshold;
INTERNAL_SIZE_T arena_test;
INTERNAL_SIZE_T arena_max;
#if HAVE_TUNABLES
/* Transparent Large Page support. */
INTERNAL_SIZE_T thp_pagesize;
/* A value different than 0 means to align mmap allocation to hp_pagesize
add hp_flags on flags. */
INTERNAL_SIZE_T hp_pagesize;
int hp_flags;
#endif
/* Memory map support */
int n_mmaps;
int n_mmaps_max;
int max_n_mmaps;
/* the mmap_threshold is dynamic, until the user sets
it manually, at which point we need to disable any
dynamic behavior. */
int no_dyn_threshold;
/* Statistics */
INTERNAL_SIZE_T mmapped_mem;
INTERNAL_SIZE_T max_mmapped_mem;
/* First address handed out by MORECORE/sbrk. */
char *sbrk_base;
#if USE_TCACHE
/* Maximum number of buckets to use. */
size_t tcache_bins;
size_t tcache_max_bytes;
/* Maximum number of chunks in each bucket. */
size_t tcache_count;
/* Maximum number of chunks to remove from the unsorted list, which
aren't used to prefill the cache. */
size_t tcache_unsorted_limit;
#endif
};
/* There are several instances of this struct ("arenas") in this
malloc. If you are adapting this malloc in a way that does NOT use
a static or mmapped malloc_state, you MUST explicitly zero-fill it
before using. This malloc relies on the property that malloc_state
is initialized to all zeroes (as is true of C statics). */
static struct malloc_state main_arena =
{
.mutex = _LIBC_LOCK_INITIALIZER,
.next = &main_arena,
.attached_threads = 1
};
/* There is only one instance of the malloc parameters. */
static struct malloc_par mp_ =
{
.top_pad = DEFAULT_TOP_PAD,
.n_mmaps_max = DEFAULT_MMAP_MAX,
.mmap_threshold = DEFAULT_MMAP_THRESHOLD,
.trim_threshold = DEFAULT_TRIM_THRESHOLD,
#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
.arena_test = NARENAS_FROM_NCORES (1)
#if USE_TCACHE
,
.tcache_count = TCACHE_FILL_COUNT,
.tcache_bins = TCACHE_MAX_BINS,
.tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
.tcache_unsorted_limit = 0 /* No limit. */
#endif
};
/*
Initialize a malloc_state struct.
This is called from ptmalloc_init () or from _int_new_arena ()
when creating a new arena.
*/
static void
malloc_init_state (mstate av)
{
int i;
mbinptr bin;
/* Establish circular links for normal bins */
for (i = 1; i < NBINS; ++i)
{
bin = bin_at (av, i);
bin->fd = bin->bk = bin;
}
#if MORECORE_CONTIGUOUS
if (av != &main_arena)
#endif
set_noncontiguous (av);
if (av == &main_arena)
set_max_fast (DEFAULT_MXFAST);
atomic_store_relaxed (&av->have_fastchunks, false);
av->top = initial_top (av);
}
/*
Other internal utilities operating on mstates
*/
static void *sysmalloc (INTERNAL_SIZE_T, mstate);
static int systrim (size_t, mstate);
static void malloc_consolidate (mstate);
/* -------------- Early definitions for debugging hooks ---------------- */
/* This function is called from the arena shutdown hook, to free the
thread cache (if it exists). */
static void tcache_thread_shutdown (void);
/* ------------------ Testing support ----------------------------------*/
static int perturb_byte;
static void
alloc_perturb (char *p, size_t n)
{
if (__glibc_unlikely (perturb_byte))
memset (p, perturb_byte ^ 0xff, n);
}
static void
free_perturb (char *p, size_t n)
{
if (__glibc_unlikely (perturb_byte))
memset (p, perturb_byte, n);
}
#include <stap-probe.h>
/* ----------- Routines dealing with transparent huge pages ----------- */
static inline void
madvise_thp (void *p, INTERNAL_SIZE_T size)
{
#if HAVE_TUNABLES && defined (MADV_HUGEPAGE)
/* Do not consider areas smaller than a huge page or if the tunable is
not active. */
if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize)
return;
/* Linux requires the input address to be page-aligned, and unaligned
inputs happens only for initial data segment. */
if (__glibc_unlikely (!PTR_IS_ALIGNED (p, GLRO (dl_pagesize))))
{
void *q = PTR_ALIGN_DOWN (p, GLRO (dl_pagesize));
size += PTR_DIFF (p, q);
p = q;
}
__madvise (p, size, MADV_HUGEPAGE);
#endif
}
/* ------------------- Support for multiple arenas -------------------- */
#include "arena.c"
/*
Debugging support
These routines make a number of assertions about the states
of data structures that should be true at all times. If any
are not true, it's very likely that a user program has somehow
trashed memory. (It's also possible that there is a coding error
in malloc. In which case, please report it!)
*/
#if !MALLOC_DEBUG
# define check_chunk(A, P)
# define check_free_chunk(A, P)
# define check_inuse_chunk(A, P)
# define check_remalloced_chunk(A, P, N)
# define check_malloced_chunk(A, P, N)
# define check_malloc_state(A)
#else
# define check_chunk(A, P) do_check_chunk (A, P)
# define check_free_chunk(A, P) do_check_free_chunk (A, P)
# define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
# define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
# define check_malloc_state(A) do_check_malloc_state (A)
/*
Properties of all chunks
*/
static void
do_check_chunk (mstate av, mchunkptr p)
{
unsigned long sz = chunksize (p);
/* min and max possible addresses assuming contiguous allocation */
char *max_address = (char *) (av->top) + chunksize (av->top);
char *min_address = max_address - av->system_mem;
if (!chunk_is_mmapped (p))
{
/* Has legal address ... */
if (p != av->top)
{
if (contiguous (av))
{
assert (((char *) p) >= min_address);
assert (((char *) p + sz) <= ((char *) (av->top)));
}
}
else
{
/* top size is always at least MINSIZE */
assert ((unsigned long) (sz) >= MINSIZE);
/* top predecessor always marked inuse */
assert (prev_inuse (p));
}
}
else
{
/* address is outside main heap */
if (contiguous (av) && av->top != initial_top (av))
{
assert (((char *) p) < min_address || ((char *) p) >= max_address);
}
/* chunk is page-aligned */
assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
/* mem is aligned */
assert (aligned_OK (chunk2mem (p)));
}
}
/*
Properties of free chunks
*/
static void
do_check_free_chunk (mstate av, mchunkptr p)
{
INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
mchunkptr next = chunk_at_offset (p, sz);
do_check_chunk (av, p);
/* Chunk must claim to be free ... */
assert (!inuse (p));
assert (!chunk_is_mmapped (p));
/* Unless a special marker, must have OK fields */
if ((unsigned long) (sz) >= MINSIZE)
{
assert ((sz & MALLOC_ALIGN_MASK) == 0);
assert (aligned_OK (chunk2mem (p)));
/* ... matching footer field */
assert (prev_size (next_chunk (p)) == sz);
/* ... and is fully consolidated */
assert (prev_inuse (p));
assert (next == av->top || inuse (next));
/* ... and has minimally sane links */
assert (p->fd->bk == p);
assert (p->bk->fd == p);
}
else /* markers are always of size SIZE_SZ */
assert (sz == SIZE_SZ);
}
/*
Properties of inuse chunks
*/
static void
do_check_inuse_chunk (mstate av, mchunkptr p)
{
mchunkptr next;
do_check_chunk (av, p);
if (chunk_is_mmapped (p))
return; /* mmapped chunks have no next/prev */
/* Check whether it claims to be in use ... */
assert (inuse (p));
next = next_chunk (p);
/* ... and is surrounded by OK chunks.
Since more things can be checked with free chunks than inuse ones,
if an inuse chunk borders them and debug is on, it's worth doing them.
*/
if (!prev_inuse (p))
{
/* Note that we cannot even look at prev unless it is not inuse */
mchunkptr prv = prev_chunk (p);
assert (next_chunk (prv) == p);
do_check_free_chunk (av, prv);
}
if (next == av->top)
{
assert (prev_inuse (next));
assert (chunksize (next) >= MINSIZE);
}
else if (!inuse (next))
do_check_free_chunk (av, next);
}
/*
Properties of chunks recycled from fastbins
*/
static void
do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
{
INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
if (!chunk_is_mmapped (p))
{
assert (av == arena_for_chunk (p));
if (chunk_main_arena (p))
assert (av == &main_arena);
else
assert (av != &main_arena);
}
do_check_inuse_chunk (av, p);
/* Legal size ... */
assert ((sz & MALLOC_ALIGN_MASK) == 0);
assert ((unsigned long) (sz) >= MINSIZE);
/* ... and alignment */
assert (aligned_OK (chunk2mem (p)));
/* chunk is less than MINSIZE more than request */
assert ((long) (sz) - (long) (s) >= 0);
assert ((long) (sz) - (long) (s + MINSIZE) < 0);
}
/*
Properties of nonrecycled chunks at the point they are malloced
*/
static void
do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
{
/* same as recycled case ... */
do_check_remalloced_chunk (av, p, s);
/*
... plus, must obey implementation invariant that prev_inuse is
always true of any allocated chunk; i.e., that each allocated
chunk borders either a previously allocated and still in-use
chunk, or the base of its memory arena. This is ensured
by making all allocations from the `lowest' part of any found
chunk. This does not necessarily hold however for chunks
recycled via fastbins.
*/
assert (prev_inuse (p));
}
/*
Properties of malloc_state.
This may be useful for debugging malloc, as well as detecting user
programmer errors that somehow write into malloc_state.
If you are extending or experimenting with this malloc, you can
probably figure out how to hack this routine to print out or
display chunk addresses, sizes, bins, and other instrumentation.
*/
static void
do_check_malloc_state (mstate av)
{
int i;
mchunkptr p;
mchunkptr q;
mbinptr b;
unsigned int idx;
INTERNAL_SIZE_T size;
unsigned long total = 0;
int max_fast_bin;
/* internal size_t must be no wider than pointer type */
assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
/* alignment is a power of 2 */
assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
/* Check the arena is initialized. */
assert (av->top != 0);
/* No memory has been allocated yet, so doing more tests is not possible. */
if (av->top == initial_top (av))
return;
/* pagesize is a power of 2 */
assert (powerof2(GLRO (dl_pagesize)));
/* A contiguous main_arena is consistent with sbrk_base. */
if (av == &main_arena && contiguous (av))
assert ((char *) mp_.sbrk_base + av->system_mem ==
(char *) av->top + chunksize (av->top));
/* properties of fastbins */
/* max_fast is in allowed range */
assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
max_fast_bin = fastbin_index (get_max_fast ());
for (i = 0; i < NFASTBINS; ++i)
{
p = fastbin (av, i);
/* The following test can only be performed for the main arena.
While mallopt calls malloc_consolidate to get rid of all fast
bins (especially those larger than the new maximum) this does
only happen for the main arena. Trying to do this for any
other arena would mean those arenas have to be locked and
malloc_consolidate be called for them. This is excessive. And
even if this is acceptable to somebody it still cannot solve
the problem completely since if the arena is locked a
concurrent malloc call might create a new arena which then
could use the newly invalid fast bins. */
/* all bins past max_fast are empty */
if (av == &main_arena && i > max_fast_bin)
assert (p == 0);
while (p != 0)
{
if (__glibc_unlikely (misaligned_chunk (p)))
malloc_printerr ("do_check_malloc_state(): "
"unaligned fastbin chunk detected");
/* each chunk claims to be inuse */
do_check_inuse_chunk (av, p);
total += chunksize (p);
/* chunk belongs in this bin */
assert (fastbin_index (chunksize (p)) == i);
p = REVEAL_PTR (p->fd);
}
}
/* check normal bins */
for (i = 1; i < NBINS; ++i)
{
b = bin_at (av, i);
/* binmap is accurate (except for bin 1 == unsorted_chunks) */
if (i >= 2)
{
unsigned int binbit = get_binmap (av, i);
int empty = last (b) == b;
if (!binbit)
assert (empty);
else if (!empty)
assert (binbit);
}
for (p = last (b); p != b; p = p->bk)
{
/* each chunk claims to be free */
do_check_free_chunk (av, p);
size = chunksize (p);
total += size;
if (i >= 2)
{
/* chunk belongs in bin */
idx = bin_index (size);
assert (idx == i);
/* lists are sorted */
assert (p->bk == b ||
(unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
if (!in_smallbin_range (size))
{
if (p->fd_nextsize != NULL)
{
if (p->fd_nextsize == p)
assert (p->bk_nextsize == p);
else
{
if (p->fd_nextsize == first (b))
assert (chunksize (p) < chunksize (p->fd_nextsize));
else
assert (chunksize (p) > chunksize (p->fd_nextsize));
if (p == first (b))
assert (chunksize (p) > chunksize (p->bk_nextsize));
else
assert (chunksize (p) < chunksize (p->bk_nextsize));
}
}
else
assert (p->bk_nextsize == NULL);
}
}
else if (!in_smallbin_range (size))
assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
/* chunk is followed by a legal chain of inuse chunks */
for (q = next_chunk (p);
(q != av->top && inuse (q) &&
(unsigned long) (chunksize (q)) >= MINSIZE);
q = next_chunk (q))
do_check_inuse_chunk (av, q);
}
}
/* top chunk is OK */
check_chunk (av, av->top);
}
#endif
/* ----------------- Support for debugging hooks -------------------- */
#if IS_IN (libc)
#include "hooks.c"
#endif
/* ----------- Routines dealing with system allocation -------------- */
/*
sysmalloc handles malloc cases requiring more memory from the system.
On entry, it is assumed that av->top does not have enough
space to service request for nb bytes, thus requiring that av->top
be extended or replaced.
*/
static void *
sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
{
long int size;
/*
Round up size to nearest page. For mmapped chunks, the overhead is one
SIZE_SZ unit larger than for normal chunks, because there is no
following chunk whose prev_size field could be used.
See the front_misalign handling below, for glibc there is no need for
further alignments unless we have have high alignment.
*/
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
size = ALIGN_UP (nb + SIZE_SZ, pagesize);
else
size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
/* Don't try if size wraps around 0. */
if ((unsigned long) (size) <= (unsigned long) (nb))
return MAP_FAILED;
char *mm = (char *) MMAP (0, size,
mtag_mmap_flags | PROT_READ | PROT_WRITE,
extra_flags);
if (mm == MAP_FAILED)
return mm;
#ifdef MAP_HUGETLB
if (!(extra_flags & MAP_HUGETLB))
madvise_thp (mm, size);
#endif
/*
The offset to the start of the mmapped region is stored in the prev_size
field of the chunk. This allows us to adjust returned start address to
meet alignment requirements here and in memalign(), and still be able to
compute proper address argument for later munmap in free() and realloc().
*/
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
{
/* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
front_misalign = 0;
}
else
front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
mchunkptr p; /* the allocated/returned chunk */
if (front_misalign > 0)
{
ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
p = (mchunkptr) (mm + correction);
set_prev_size (p, correction);
set_head (p, (size - correction) | IS_MMAPPED);
}
else
{
p = (mchunkptr) mm;
set_prev_size (p, 0);
set_head (p, size | IS_MMAPPED);
}
/* update statistics */
int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
atomic_max (&mp_.max_n_mmaps, new);
unsigned long sum;
sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
atomic_max (&mp_.max_mmapped_mem, sum);
check_chunk (av, p);
return chunk2mem (p);
}
/*
Allocate memory using mmap() based on S and NB requested size, aligning to
PAGESIZE if required. The EXTRA_FLAGS is used on mmap() call. If the call
succeedes S is updated with the allocated size. This is used as a fallback
if MORECORE fails.
*/
static void *
sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb,
INTERNAL_SIZE_T old_size, size_t minsize,
size_t pagesize, int extra_flags, mstate av)
{
long int size = *s;
/* Cannot merge with old top, so add its size back in */
if (contiguous (av))
size = ALIGN_UP (size + old_size, pagesize);
/* If we are relying on mmap as backup, then use larger units */
if ((unsigned long) (size) < minsize)
size = minsize;
/* Don't try if size wraps around 0 */
if ((unsigned long) (size) <= (unsigned long) (nb))
return MORECORE_FAILURE;
char *mbrk = (char *) (MMAP (0, size,
mtag_mmap_flags | PROT_READ | PROT_WRITE,
extra_flags));
if (mbrk == MAP_FAILED)
return MAP_FAILED;
#ifdef MAP_HUGETLB
if (!(extra_flags & MAP_HUGETLB))
madvise_thp (mbrk, size);
#endif
/* Record that we no longer have a contiguous sbrk region. After the first
time mmap is used as backup, we do not ever rely on contiguous space
since this could incorrectly bridge regions. */
set_noncontiguous (av);
*s = size;
return mbrk;
}
static void *
sysmalloc (INTERNAL_SIZE_T nb, mstate av)
{
mchunkptr old_top; /* incoming value of av->top */
INTERNAL_SIZE_T old_size; /* its size */
char *old_end; /* its end address */
long size; /* arg to first MORECORE or mmap call */
char *brk; /* return value from MORECORE */
long correction; /* arg to 2nd MORECORE call */
char *snd_brk; /* 2nd return val */
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
char *aligned_brk; /* aligned offset into brk */
mchunkptr p; /* the allocated/returned chunk */
mchunkptr remainder; /* remainder from allocation */
unsigned long remainder_size; /* its size */
size_t pagesize = GLRO (dl_pagesize);
bool tried_mmap = false;
/*
If have mmap, and the request size meets the mmap threshold, and
the system supports mmap, and there are few enough currently
allocated mmapped regions, try to directly map this request
rather than expanding top.
*/
if (av == NULL
|| ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
&& (mp_.n_mmaps < mp_.n_mmaps_max)))
{
char *mm;
#if HAVE_TUNABLES
if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
{
/* There is no need to isse the THP madvise call if Huge Pages are
used directly. */
mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
if (mm != MAP_FAILED)
return mm;
}
#endif
mm = sysmalloc_mmap (nb, pagesize, 0, av);
if (mm != MAP_FAILED)
return mm;
tried_mmap = true;
}
/* There are no usable arenas and mmap also failed. */
if (av == NULL)
return 0;
/* Record incoming configuration of top */
old_top = av->top;
old_size = chunksize (old_top);
old_end = (char *) (chunk_at_offset (old_top, old_size));
brk = snd_brk = (char *) (MORECORE_FAILURE);
/*
If not the first time through, we require old_size to be
at least MINSIZE and to have prev_inuse set.
*/
assert ((old_top == initial_top (av) && old_size == 0) ||
((unsigned long) (old_size) >= MINSIZE &&
prev_inuse (old_top) &&
((unsigned long) old_end & (pagesize - 1)) == 0));
/* Precondition: not enough current space to satisfy nb request */
assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
if (av != &main_arena)
{
heap_info *old_heap, *heap;
size_t old_heap_size;
/* First try to extend the current heap. */
old_heap = heap_for_ptr (old_top);
old_heap_size = old_heap->size;
if ((long) (MINSIZE + nb - old_size) > 0
&& grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
{
av->system_mem += old_heap->size - old_heap_size;
set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
| PREV_INUSE);
}
else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
{
/* Use a newly allocated heap. */
heap->ar_ptr = av;
heap->prev = old_heap;
av->system_mem += heap->size;
/* Set up the new top. */
top (av) = chunk_at_offset (heap, sizeof (*heap));
set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
/* Setup fencepost and free the old top chunk with a multiple of
MALLOC_ALIGNMENT in size. */
/* The fencepost takes at least MINSIZE bytes, because it might
become the top chunk again later. Note that a footer is set
up, too, although the chunk is marked in use. */
old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
0 | PREV_INUSE);
if (old_size >= MINSIZE)
{
set_head (chunk_at_offset (old_top, old_size),
CHUNK_HDR_SZ | PREV_INUSE);
set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
_int_free (av, old_top, 1);
}
else
{
set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
set_foot (old_top, (old_size + CHUNK_HDR_SZ));
}
}
else if (!tried_mmap)
{
/* We can at least try to use to mmap memory. If new_heap fails
it is unlikely that trying to allocate huge pages will
succeed. */
char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
if (mm != MAP_FAILED)
return mm;
}
}
else /* av == main_arena */
{ /* Request enough space for nb + pad + overhead */
size = nb + mp_.top_pad + MINSIZE;
/*
If contiguous, we can subtract out existing space that we hope to
combine with new space. We add it back later only if
we don't actually get contiguous space.
*/
if (contiguous (av))
size -= old_size;
/*
Round to a multiple of page size or huge page size.
If MORECORE is not contiguous, this ensures that we only call it
with whole-page arguments. And if MORECORE is contiguous and
this is not first time through, this preserves page-alignment of
previous calls. Otherwise, we correct to page-align below.
*/
#if HAVE_TUNABLES && defined (MADV_HUGEPAGE)
/* Defined in brk.c. */
extern void *__curbrk;
if (__glibc_unlikely (mp_.thp_pagesize != 0))
{
uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size,
mp_.thp_pagesize);
size = top - (uintptr_t) __curbrk;
}
else
#endif
size = ALIGN_UP (size, GLRO(dl_pagesize));
/*
Don't try to call MORECORE if argument is so big as to appear
negative. Note that since mmap takes size_t arg, it may succeed
below even if we cannot call MORECORE.
*/
if (size > 0)
{
brk = (char *) (MORECORE (size));
if (brk != (char *) (MORECORE_FAILURE))
madvise_thp (brk, size);
LIBC_PROBE (memory_sbrk_more, 2, brk, size);
}
if (brk == (char *) (MORECORE_FAILURE))
{
/*
If have mmap, try using it as a backup when MORECORE fails or
cannot be used. This is worth doing on systems that have "holes" in
address space, so sbrk cannot extend to give contiguous space, but
space is available elsewhere. Note that we ignore mmap max count
and threshold limits, since the space will not be used as a
segregated mmap region.
*/
char *mbrk = MAP_FAILED;
#if HAVE_TUNABLES
if (mp_.hp_pagesize > 0)
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
mp_.hp_pagesize, mp_.hp_pagesize,
mp_.hp_flags, av);
#endif
if (mbrk == MAP_FAILED)
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize,
MMAP_AS_MORECORE_SIZE, 0, av);
if (mbrk != MAP_FAILED)
{
/* We do not need, and cannot use, another sbrk call to find end */
brk = mbrk;
snd_brk = brk + size;
}
}
if (brk != (char *) (MORECORE_FAILURE))
{
if (mp_.sbrk_base == 0)
mp_.sbrk_base = brk;
av->system_mem += size;
/*
If MORECORE extends previous space, we can likewise extend top size.
*/
if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
set_head (old_top, (size + old_size) | PREV_INUSE);
else if (contiguous (av) && old_size && brk < old_end)
/* Oops! Someone else killed our space.. Can't touch anything. */
malloc_printerr ("break adjusted to free malloc space");
/*
Otherwise, make adjustments:
* If the first time through or noncontiguous, we need to call sbrk
just to find out where the end of memory lies.
* We need to ensure that all returned chunks from malloc will meet
MALLOC_ALIGNMENT
* If there was an intervening foreign sbrk, we need to adjust sbrk
request size to account for fact that we will not be able to
combine new space with existing space in old_top.
* Almost all systems internally allocate whole pages at a time, in
which case we might as well use the whole last page of request.
So we allocate enough more memory to hit a page boundary now,
which in turn causes future contiguous calls to page-align.
*/
else
{
front_misalign = 0;
end_misalign = 0;
correction = 0;
aligned_brk = brk;
/* handle contiguous cases */
if (contiguous (av))
{
/* Count foreign sbrk as system_mem. */
if (old_size)
av->system_mem += brk - old_end;
/* Guarantee alignment of first new chunk made from this space */
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
{
/*
Skip over some bytes to arrive at an aligned position.
We don't need to specially mark these wasted front bytes.
They will never be accessed anyway because
prev_inuse of av->top (and any chunk created from its start)
is always true after initialization.
*/
correction = MALLOC_ALIGNMENT - front_misalign;
aligned_brk += correction;
}
/*
If this isn't adjacent to existing space, then we will not
be able to merge with old_top space, so must add to 2nd request.
*/
correction += old_size;
/* Extend the end address to hit a page boundary */
end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
assert (correction >= 0);
snd_brk = (char *) (MORECORE (correction));
/*
If can't allocate correction, try to at least find out current
brk. It might be enough to proceed without failing.
Note that if second sbrk did NOT fail, we assume that space
is contiguous with first sbrk. This is a safe assumption unless
program is multithreaded but doesn't use locks and a foreign sbrk
occurred between our first and second calls.
*/
if (snd_brk == (char *) (MORECORE_FAILURE))
{
correction = 0;
snd_brk = (char *) (MORECORE (0));
}
else
madvise_thp (snd_brk, correction);
}
/* handle non-contiguous cases */
else
{
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
/* MORECORE/mmap must correctly align */
assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
else
{
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
{
/*
Skip over some bytes to arrive at an aligned position.
We don't need to specially mark these wasted front bytes.
They will never be accessed anyway because
prev_inuse of av->top (and any chunk created from its start)
is always true after initialization.
*/
aligned_brk += MALLOC_ALIGNMENT - front_misalign;
}
}
/* Find out current end of memory */
if (snd_brk == (char *) (MORECORE_FAILURE))
{
snd_brk = (char *) (MORECORE (0));
}
}
/* Adjust top based on results of second sbrk */
if (snd_brk != (char *) (MORECORE_FAILURE))
{
av->top = (< |