Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
/*
 * GPL HEADER START
 *
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 only,
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License version 2 for more details (a copy is included
 * in the LICENSE file that accompanied this code).
 *
 * You should have received a copy of the GNU General Public License
 * version 2 along with this program; If not, see
 * http://www.gnu.org/licenses/gpl-2.0.html
 *
 * GPL HEADER END
 */
/*
 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
 * Use is subject to license terms.
 *
 * Copyright (c) 2011, 2015, Intel Corporation.
 */
/*
 * This file is part of Lustre, http://www.lustre.org/
 * Lustre is a trademark of Sun Microsystems, Inc.
 *
 * lustre/include/lustre/lustre_idl.h
 *
 * Lustre wire protocol definitions.
 */

/** \defgroup lustreidl lustreidl
 *
 * Lustre wire protocol definitions.
 *
 * ALL structs passing over the wire should be declared here.  Structs
 * that are used in interfaces with userspace should go in lustre_user.h.
 *
 * All structs being declared here should be built from simple fixed-size
 * types (__u8, __u16, __u32, __u64) or be built from other types or
 * structs also declared in this file.  Similarly, all flags and magic
 * values in those structs should also be declared here.  This ensures
 * that the Lustre wire protocol is not influenced by external dependencies.
 *
 * The only other acceptable items in this file are VERY SIMPLE accessor
 * functions to avoid callers grubbing inside the structures. Nothing that
 * depends on external functions or definitions should be in here.
 *
 * Structs must be properly aligned to put 64-bit values on an 8-byte
 * boundary.  Any structs being added here must also be added to
 * utils/wirecheck.c and "make newwiretest" run to regenerate the
 * utils/wiretest.c sources.  This allows us to verify that wire structs
 * have the proper alignment/size on all architectures.
 *
 * DO NOT CHANGE any of the structs, flags, values declared here and used
 * in released Lustre versions.  Some structs may have padding fields that
 * can be used.  Some structs might allow addition at the end (verify this
 * in the code to ensure that new/old clients that see this larger struct
 * do not fail, otherwise you need to implement protocol compatibility).
 *
 * @{
 */

#ifndef _LUSTRE_IDL_H_
#define _LUSTRE_IDL_H_

#include "../../../include/linux/libcfs/libcfs.h"
#include "../../../include/linux/lnet/types.h"

/* Defn's shared with user-space. */
#include "lustre_user.h"
#include "lustre_errno.h"
#include "../lustre_ver.h"

/*
 *  GENERAL STUFF
 */
/* FOO_REQUEST_PORTAL is for incoming requests on the FOO
 * FOO_REPLY_PORTAL   is for incoming replies on the FOO
 * FOO_BULK_PORTAL    is for incoming bulk on the FOO
 */

/* Lustre service names are following the format
 * service name + MDT + seq name
 */
#define LUSTRE_MDT_MAXNAMELEN	80

#define CONNMGR_REQUEST_PORTAL	  1
#define CONNMGR_REPLY_PORTAL	    2
/*#define OSC_REQUEST_PORTAL	    3 */
#define OSC_REPLY_PORTAL		4
/*#define OSC_BULK_PORTAL	       5 */
#define OST_IO_PORTAL		   6
#define OST_CREATE_PORTAL	       7
#define OST_BULK_PORTAL		 8
/*#define MDC_REQUEST_PORTAL	    9 */
#define MDC_REPLY_PORTAL	       10
/*#define MDC_BULK_PORTAL	      11 */
#define MDS_REQUEST_PORTAL	     12
/*#define MDS_REPLY_PORTAL	     13 */
#define MDS_BULK_PORTAL		14
#define LDLM_CB_REQUEST_PORTAL	 15
#define LDLM_CB_REPLY_PORTAL	   16
#define LDLM_CANCEL_REQUEST_PORTAL     17
#define LDLM_CANCEL_REPLY_PORTAL       18
/*#define PTLBD_REQUEST_PORTAL	   19 */
/*#define PTLBD_REPLY_PORTAL	     20 */
/*#define PTLBD_BULK_PORTAL	      21 */
#define MDS_SETATTR_PORTAL	     22
#define MDS_READPAGE_PORTAL	    23
#define OUT_PORTAL		    24

#define MGC_REPLY_PORTAL	       25
#define MGS_REQUEST_PORTAL	     26
#define MGS_REPLY_PORTAL	       27
#define OST_REQUEST_PORTAL	     28
#define FLD_REQUEST_PORTAL	     29
#define SEQ_METADATA_PORTAL	    30
#define SEQ_DATA_PORTAL		31
#define SEQ_CONTROLLER_PORTAL	  32
#define MGS_BULK_PORTAL		33

/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com,
 *						n8851@cray.com
 */

/* packet types */
#define PTL_RPC_MSG_REQUEST 4711
#define PTL_RPC_MSG_ERR     4712
#define PTL_RPC_MSG_REPLY   4713

/* DON'T use swabbed values of MAGIC as magic! */
#define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
#define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B

#define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2

#define PTLRPC_MSG_VERSION  0x00000003
#define LUSTRE_VERSION_MASK 0xffff0000
#define LUSTRE_OBD_VERSION  0x00010000
#define LUSTRE_MDS_VERSION  0x00020000
#define LUSTRE_OST_VERSION  0x00030000
#define LUSTRE_DLM_VERSION  0x00040000
#define LUSTRE_LOG_VERSION  0x00050000
#define LUSTRE_MGS_VERSION  0x00060000

/**
 * Describes a range of sequence, lsr_start is included but lsr_end is
 * not in the range.
 * Same structure is used in fld module where lsr_index field holds mdt id
 * of the home mdt.
 */
struct lu_seq_range {
	__u64 lsr_start;
	__u64 lsr_end;
	__u32 lsr_index;
	__u32 lsr_flags;
};

struct lu_seq_range_array {
	__u32 lsra_count;
	__u32 lsra_padding;
	struct lu_seq_range lsra_lsr[0];
};

#define LU_SEQ_RANGE_MDT	0x0
#define LU_SEQ_RANGE_OST	0x1
#define LU_SEQ_RANGE_ANY	0x3

#define LU_SEQ_RANGE_MASK	0x3

/** \defgroup lu_fid lu_fid
 * @{
 */

/**
 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
 * xattr.
 */
enum lma_compat {
	LMAC_HSM	= 0x00000001,
/*	LMAC_SOM	= 0x00000002, obsolete since 2.8.0 */
	LMAC_NOT_IN_OI	= 0x00000004, /* the object does NOT need OI mapping */
	LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
				       * under /O/<seq>/d<x>.
				       */
};

/**
 * Masks for all features that should be supported by a Lustre version to
 * access a specific file.
 * This information is stored in lustre_mdt_attrs::lma_incompat.
 */
enum lma_incompat {
	LMAI_RELEASED		= 0x00000001, /* file is released */
	LMAI_AGENT		= 0x00000002, /* agent inode */
	LMAI_REMOTE_PARENT	= 0x00000004, /* the parent of the object
					       * is on the remote MDT
					       */
};

#define LMA_INCOMPAT_SUPP	(LMAI_AGENT | LMAI_REMOTE_PARENT)

/**
 * fid constants
 */
enum {
	/** LASTID file has zero OID */
	LUSTRE_FID_LASTID_OID = 0UL,
	/** initial fid id value */
	LUSTRE_FID_INIT_OID  = 1UL
};

/** returns fid object sequence */
static inline __u64 fid_seq(const struct lu_fid *fid)
{
	return fid->f_seq;
}

/** returns fid object id */
static inline __u32 fid_oid(const struct lu_fid *fid)
{
	return fid->f_oid;
}

/** returns fid object version */
static inline __u32 fid_ver(const struct lu_fid *fid)
{
	return fid->f_ver;
}

static inline void fid_zero(struct lu_fid *fid)
{
	memset(fid, 0, sizeof(*fid));
}

static inline __u64 fid_ver_oid(const struct lu_fid *fid)
{
	return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
}

/* copytool uses a 32b bitmask field to encode archive-Ids during register
 * with MDT thru kuc.
 * archive num = 0 => all
 * archive num from 1 to 32
 */
#define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)

/**
 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
 * used for other purposes and not risk collisions with existing inodes.
 *
 * Different FID Format
 * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
 */
enum fid_seq {
	FID_SEQ_OST_MDT0	= 0,
	FID_SEQ_LLOG		= 1, /* unnamed llogs */
	FID_SEQ_ECHO		= 2,
	FID_SEQ_OST_MDT1	= 3,
	FID_SEQ_OST_MAX		= 9, /* Max MDT count before OST_on_FID */
	FID_SEQ_LLOG_NAME	= 10, /* named llogs */
	FID_SEQ_RSVD		= 11,
	FID_SEQ_IGIF		= 12,
	FID_SEQ_IGIF_MAX	= 0x0ffffffffULL,
	FID_SEQ_IDIF		= 0x100000000ULL,
	FID_SEQ_IDIF_MAX	= 0x1ffffffffULL,
	/* Normal FID sequence starts from this value, i.e. 1<<33 */
	FID_SEQ_START		= 0x200000000ULL,
	/* sequence for local pre-defined FIDs listed in local_oid */
	FID_SEQ_LOCAL_FILE	= 0x200000001ULL,
	FID_SEQ_DOT_LUSTRE	= 0x200000002ULL,
	/* sequence is used for local named objects FIDs generated
	 * by local_object_storage library
	 */
	FID_SEQ_LOCAL_NAME	= 0x200000003ULL,
	/* Because current FLD will only cache the fid sequence, instead
	 * of oid on the client side, if the FID needs to be exposed to
	 * clients sides, it needs to make sure all of fids under one
	 * sequence will be located in one MDT.
	 */
	FID_SEQ_SPECIAL		= 0x200000004ULL,
	FID_SEQ_QUOTA		= 0x200000005ULL,
	FID_SEQ_QUOTA_GLB	= 0x200000006ULL,
	FID_SEQ_ROOT		= 0x200000007ULL,  /* Located on MDT0 */
	FID_SEQ_NORMAL		= 0x200000400ULL,
	FID_SEQ_LOV_DEFAULT	= 0xffffffffffffffffULL
};

#define OBIF_OID_MAX_BITS	   32
#define OBIF_MAX_OID		(1ULL << OBIF_OID_MAX_BITS)
#define OBIF_OID_MASK	       ((1ULL << OBIF_OID_MAX_BITS) - 1)
#define IDIF_OID_MAX_BITS	   48
#define IDIF_MAX_OID		(1ULL << IDIF_OID_MAX_BITS)
#define IDIF_OID_MASK	       ((1ULL << IDIF_OID_MAX_BITS) - 1)

/** OID for FID_SEQ_SPECIAL */
enum special_oid {
	/* Big Filesystem Lock to serialize rename operations */
	FID_OID_SPECIAL_BFL     = 1UL,
};

/** OID for FID_SEQ_DOT_LUSTRE */
enum dot_lustre_oid {
	FID_OID_DOT_LUSTRE  = 1UL,
	FID_OID_DOT_LUSTRE_OBF = 2UL,
};

static inline bool fid_seq_is_mdt0(__u64 seq)
{
	return (seq == FID_SEQ_OST_MDT0);
}

static inline bool fid_seq_is_mdt(__u64 seq)
{
	return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
};

static inline bool fid_seq_is_echo(__u64 seq)
{
	return (seq == FID_SEQ_ECHO);
}

static inline bool fid_is_echo(const struct lu_fid *fid)
{
	return fid_seq_is_echo(fid_seq(fid));
}

static inline bool fid_seq_is_llog(__u64 seq)
{
	return (seq == FID_SEQ_LLOG);
}

static inline bool fid_is_llog(const struct lu_fid *fid)
{
	/* file with OID == 0 is not llog but contains last oid */
	return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
}

static inline bool fid_seq_is_rsvd(__u64 seq)
{
	return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
};

static inline bool fid_seq_is_special(__u64 seq)
{
	return seq == FID_SEQ_SPECIAL;
};

static inline bool fid_seq_is_local_file(__u64 seq)
{
	return seq == FID_SEQ_LOCAL_FILE ||
	       seq == FID_SEQ_LOCAL_NAME;
};

static inline bool fid_seq_is_root(__u64 seq)
{
	return seq == FID_SEQ_ROOT;
}

static inline bool fid_seq_is_dot(__u64 seq)
{
	return seq == FID_SEQ_DOT_LUSTRE;
}

static inline bool fid_seq_is_default(__u64 seq)
{
	return seq == FID_SEQ_LOV_DEFAULT;
}

static inline bool fid_is_mdt0(const struct lu_fid *fid)
{
	return fid_seq_is_mdt0(fid_seq(fid));
}

static inline void lu_root_fid(struct lu_fid *fid)
{
	fid->f_seq = FID_SEQ_ROOT;
	fid->f_oid = 1;
	fid->f_ver = 0;
}

/**
 * Check if a fid is igif or not.
 * \param fid the fid to be tested.
 * \return true if the fid is a igif; otherwise false.
 */
static inline bool fid_seq_is_igif(__u64 seq)
{
	return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
}

static inline bool fid_is_igif(const struct lu_fid *fid)
{
	return fid_seq_is_igif(fid_seq(fid));
}

/**
 * Check if a fid is idif or not.
 * \param fid the fid to be tested.
 * \return true if the fid is a idif; otherwise false.
 */
static inline bool fid_seq_is_idif(__u64 seq)
{
	return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
}

static inline bool fid_is_idif(const struct lu_fid *fid)
{
	return fid_seq_is_idif(fid_seq(fid));
}

static inline bool fid_is_local_file(const struct lu_fid *fid)
{
	return fid_seq_is_local_file(fid_seq(fid));
}

static inline bool fid_seq_is_norm(__u64 seq)
{
	return (seq >= FID_SEQ_NORMAL);
}

static inline bool fid_is_norm(const struct lu_fid *fid)
{
	return fid_seq_is_norm(fid_seq(fid));
}

/* convert an OST objid into an IDIF FID SEQ number */
static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
{
	return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
}

/* convert a packed IDIF FID into an OST objid */
static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
{
	return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
}

/* extract ost index from IDIF FID */
static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
{
	return (fid_seq(fid) >> 16) & 0xffff;
}

/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
static inline __u64 ostid_seq(const struct ost_id *ostid)
{
	if (fid_seq_is_mdt0(ostid->oi.oi_seq))
		return FID_SEQ_OST_MDT0;

	if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
		return FID_SEQ_LOV_DEFAULT;

	if (fid_is_idif(&ostid->oi_fid))
		return FID_SEQ_OST_MDT0;

	return fid_seq(&ostid->oi_fid);
}

/* extract OST objid from a wire ost_id (id/seq) pair */
static inline __u64 ostid_id(const struct ost_id *ostid)
{
	if (fid_seq_is_mdt0(ostid->oi.oi_seq))
		return ostid->oi.oi_id & IDIF_OID_MASK;

	if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
		return ostid->oi.oi_id;

	if (fid_is_idif(&ostid->oi_fid))
		return fid_idif_id(fid_seq(&ostid->oi_fid),
				   fid_oid(&ostid->oi_fid), 0);

	return fid_oid(&ostid->oi_fid);
}

static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
{
	if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
		oi->oi.oi_seq = seq;
	} else {
		oi->oi_fid.f_seq = seq;
		/* Note: if f_oid + f_ver is zero, we need init it
		 * to be 1, otherwise, ostid_seq will treat this
		 * as old ostid (oi_seq == 0)
		 */
		if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
			oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
	}
}

static inline void ostid_set_seq_mdt0(struct ost_id *oi)
{
	ostid_set_seq(oi, FID_SEQ_OST_MDT0);
}

static inline void ostid_set_seq_echo(struct ost_id *oi)
{
	ostid_set_seq(oi, FID_SEQ_ECHO);
}

static inline void ostid_set_seq_llog(struct ost_id *oi)
{
	ostid_set_seq(oi, FID_SEQ_LLOG);
}

/**
 * Note: we need check oi_seq to decide where to set oi_id,
 * so oi_seq should always be set ahead of oi_id.
 */
static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
{
	if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
		if (oid >= IDIF_MAX_OID) {
			CERROR("Too large OID %#llx to set MDT0 " DOSTID "\n",
			       oid, POSTID(oi));
			return;
		}
		oi->oi.oi_id = oid;
	} else if (fid_is_idif(&oi->oi_fid)) {
		if (oid >= IDIF_MAX_OID) {
			CERROR("Too large OID %#llx to set IDIF " DOSTID "\n",
			       oid, POSTID(oi));
			return;
		}
		oi->oi_fid.f_seq = fid_idif_seq(oid,
						fid_idif_ost_idx(&oi->oi_fid));
		oi->oi_fid.f_oid = oid;
		oi->oi_fid.f_ver = oid >> 48;
	} else {
		if (oid >= OBIF_MAX_OID) {
			CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
			return;
		}
		oi->oi_fid.f_oid = oid;
	}
}

static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
{
	if (unlikely(fid_seq_is_igif(fid->f_seq))) {
		CERROR("bad IGIF, "DFID"\n", PFID(fid));
		return -EBADF;
	}

	if (fid_is_idif(fid)) {
		if (oid >= IDIF_MAX_OID) {
			CERROR("Too large OID %#llx to set IDIF " DFID "\n",
			       (unsigned long long)oid, PFID(fid));
			return -EBADF;
		}
		fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
		fid->f_oid = oid;
		fid->f_ver = oid >> 48;
	} else {
		if (oid >= OBIF_MAX_OID) {
			CERROR("Too large OID %#llx to set REG " DFID "\n",
			       (unsigned long long)oid, PFID(fid));
			return -EBADF;
		}
		fid->f_oid = oid;
	}
	return 0;
}

/**
 * Unpack an OST object id/seq (group) into a FID.  This is needed for
 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
 * FIDs.  Note that if an id/seq is already in FID/IDIF format it will
 * be passed through unchanged.  Only legacy OST objects in "group 0"
 * will be mapped into the IDIF namespace so that they can fit into the
 * struct lu_fid fields without loss.  For reference see:
 * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
 */
static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
			       __u32 ost_idx)
{
	__u64 seq = ostid_seq(ostid);

	if (ost_idx > 0xffff) {
		CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
		       ost_idx);
		return -EBADF;
	}

	if (fid_seq_is_mdt0(seq)) {
		__u64 oid = ostid_id(ostid);

		/* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
		 * that we map into the IDIF namespace.  It allows up to 2^48
		 * objects per OST, as this is the object namespace that has
		 * been in production for years.  This can handle create rates
		 * of 1M objects/s/OST for 9 years, or combinations thereof.
		 */
		if (oid >= IDIF_MAX_OID) {
			CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
			       POSTID(ostid), ost_idx);
			return -EBADF;
		}
		fid->f_seq = fid_idif_seq(oid, ost_idx);
		/* truncate to 32 bits by assignment */
		fid->f_oid = oid;
		/* in theory, not currently used */
		fid->f_ver = oid >> 48;
	} else if (likely(!fid_seq_is_default(seq))) {
	       /* This is either an IDIF object, which identifies objects across
		* all OSTs, or a regular FID.  The IDIF namespace maps legacy
		* OST objects into the FID namespace.  In both cases, we just
		* pass the FID through, no conversion needed.
		*/
		if (ostid->oi_fid.f_ver != 0) {
			CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
			       POSTID(ostid), ost_idx);
			return -EBADF;
		}
		*fid = ostid->oi_fid;
	}

	return 0;
}

/* pack any OST FID into an ostid (id/seq) for the wire/disk */
static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
{
	if (unlikely(fid_seq_is_igif(fid->f_seq))) {
		CERROR("bad IGIF, "DFID"\n", PFID(fid));
		return -EBADF;
	}

	if (fid_is_idif(fid)) {
		ostid_set_seq_mdt0(ostid);
		ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
						fid_ver(fid)));
	} else {
		ostid->oi_fid = *fid;
	}

	return 0;
}

/* Check whether the fid is for LAST_ID */
static inline bool fid_is_last_id(const struct lu_fid *fid)
{
	return (fid_oid(fid) == 0);
}

/**
 * Get inode number from a igif.
 * \param fid a igif to get inode number from.
 * \return inode number for the igif.
 */
static inline ino_t lu_igif_ino(const struct lu_fid *fid)
{
	return fid_seq(fid);
}

/**
 * Get inode generation from a igif.
 * \param fid a igif to get inode generation from.
 * \return inode generation for the igif.
 */
static inline __u32 lu_igif_gen(const struct lu_fid *fid)
{
	return fid_oid(fid);
}

/**
 * Build igif from the inode number/generation.
 */
static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
{
	fid->f_seq = ino;
	fid->f_oid = gen;
	fid->f_ver = 0;
}

/*
 * Fids are transmitted across network (in the sender byte-ordering),
 * and stored on disk in big-endian order.
 */
static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
{
	dst->f_seq = cpu_to_le64(fid_seq(src));
	dst->f_oid = cpu_to_le32(fid_oid(src));
	dst->f_ver = cpu_to_le32(fid_ver(src));
}

static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
	dst->f_seq = le64_to_cpu(fid_seq(src));
	dst->f_oid = le32_to_cpu(fid_oid(src));
	dst->f_ver = le32_to_cpu(fid_ver(src));
}

static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
{
	dst->f_seq = cpu_to_be64(fid_seq(src));
	dst->f_oid = cpu_to_be32(fid_oid(src));
	dst->f_ver = cpu_to_be32(fid_ver(src));
}

static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
	dst->f_seq = be64_to_cpu(fid_seq(src));
	dst->f_oid = be32_to_cpu(fid_oid(src));
	dst->f_ver = be32_to_cpu(fid_ver(src));
}

static inline bool fid_is_sane(const struct lu_fid *fid)
{
	return fid &&
	       ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
		fid_is_igif(fid) || fid_is_idif(fid) ||
		fid_seq_is_rsvd(fid_seq(fid)));
}

static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
	return memcmp(f0, f1, sizeof(*f0)) == 0;
}

#define __diff_normalize(val0, val1)			    \
({							      \
	typeof(val0) __val0 = (val0);			   \
	typeof(val1) __val1 = (val1);			   \
								\
	(__val0 == __val1 ? 0 : __val0 > __val1 ? 1 : -1);     \
})

static inline int lu_fid_cmp(const struct lu_fid *f0,
			     const struct lu_fid *f1)
{
	return
		__diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
		__diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
		__diff_normalize(fid_ver(f0), fid_ver(f1));
}

static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
				   struct ost_id *dst_oi)
{
	if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
		dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
		dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
	} else {
		fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
	}
}

static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
				   struct ost_id *dst_oi)
{
	if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
		dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
		dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
	} else {
		fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
	}
}

/** @} lu_fid */

/** \defgroup lu_dir lu_dir
 * @{
 */

/**
 * Enumeration of possible directory entry attributes.
 *
 * Attributes follow directory entry header in the order they appear in this
 * enumeration.
 */
enum lu_dirent_attrs {
	LUDA_FID		= 0x0001,
	LUDA_TYPE		= 0x0002,
	LUDA_64BITHASH		= 0x0004,
};

/**
 * Layout of readdir pages, as transmitted on wire.
 */
struct lu_dirent {
	/** valid if LUDA_FID is set. */
	struct lu_fid lde_fid;
	/** a unique entry identifier: a hash or an offset. */
	__u64	 lde_hash;
	/** total record length, including all attributes. */
	__u16	 lde_reclen;
	/** name length */
	__u16	 lde_namelen;
	/** optional variable size attributes following this entry.
	 *  taken from enum lu_dirent_attrs.
	 */
	__u32	 lde_attrs;
	/** name is followed by the attributes indicated in ->ldp_attrs, in
	 *  their natural order. After the last attribute, padding bytes are
	 *  added to make ->lde_reclen a multiple of 8.
	 */
	char	  lde_name[0];
};

/*
 * Definitions of optional directory entry attributes formats.
 *
 * Individual attributes do not have their length encoded in a generic way. It
 * is assumed that consumer of an attribute knows its format. This means that
 * it is impossible to skip over an unknown attribute, except by skipping over all
 * remaining attributes (by using ->lde_reclen), which is not too
 * constraining, because new server versions will append new attributes at
 * the end of an entry.
 */

/**
 * Fid directory attribute: a fid of an object referenced by the entry. This
 * will be almost always requested by the client and supplied by the server.
 *
 * Aligned to 8 bytes.
 */
/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */

/**
 * File type.
 *
 * Aligned to 2 bytes.
 */
struct luda_type {
	__u16 lt_type;
};

#ifndef IFSHIFT
#define IFSHIFT                 12
#endif

#ifndef IFTODT
#define IFTODT(type)		(((type) & S_IFMT) >> IFSHIFT)
#endif
#ifndef DTTOIF
#define DTTOIF(dirtype)		((dirtype) << IFSHIFT)
#endif

struct lu_dirpage {
	__u64	    ldp_hash_start;
	__u64	    ldp_hash_end;
	__u32	    ldp_flags;
	__u32	    ldp_pad0;
	struct lu_dirent ldp_entries[0];
};

enum lu_dirpage_flags {
	/**
	 * dirpage contains no entry.
	 */
	LDF_EMPTY   = 1 << 0,
	/**
	 * last entry's lde_hash equals ldp_hash_end.
	 */
	LDF_COLLIDE = 1 << 1
};

static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
{
	if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
		return NULL;
	else
		return dp->ldp_entries;
}

static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
{
	struct lu_dirent *next;

	if (le16_to_cpu(ent->lde_reclen) != 0)
		next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
	else
		next = NULL;

	return next;
}

static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr)
{
	size_t size;

	if (attr & LUDA_TYPE) {
		const size_t align = sizeof(struct luda_type) - 1;

		size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
		size += sizeof(struct luda_type);
	} else {
		size = sizeof(struct lu_dirent) + namelen;
	}

	return (size + 7) & ~7;
}

#define MDS_DIR_END_OFF 0xfffffffffffffffeULL

/**
 * MDS_READPAGE page size
 *
 * This is the directory page size packed in MDS_READPAGE RPC.
 * It's different than PAGE_SIZE because the client needs to
 * access the struct lu_dirpage header packed at the beginning of
 * the "page" and without this there isn't any way to know find the
 * lu_dirpage header is if client and server PAGE_SIZE differ.
 */
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE  (1UL << LU_PAGE_SHIFT)
#define LU_PAGE_MASK  (~(LU_PAGE_SIZE - 1))

#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))

/** @} lu_dir */

struct lustre_handle {
	__u64 cookie;
};

#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL

static inline bool lustre_handle_is_used(const struct lustre_handle *lh)
{
	return lh->cookie != 0ull;
}

static inline bool lustre_handle_equal(const struct lustre_handle *lh1,
				       const struct lustre_handle *lh2)
{
	return lh1->cookie == lh2->cookie;
}

static inline void lustre_handle_copy(struct lustre_handle *tgt,
				      const struct lustre_handle *src)
{
	tgt->cookie = src->cookie;
}

/* flags for lm_flags */
#define MSGHDR_AT_SUPPORT	       0x1
#define MSGHDR_CKSUM_INCOMPAT18	 0x2

#define lustre_msg lustre_msg_v2
/* we depend on this structure to be 8-byte aligned */
/* this type is only endian-adjusted in lustre_unpack_msg() */
struct lustre_msg_v2 {
	__u32 lm_bufcount;
	__u32 lm_secflvr;
	__u32 lm_magic;
	__u32 lm_repsize;
	__u32 lm_cksum;
	__u32 lm_flags;
	__u32 lm_padding_2;
	__u32 lm_padding_3;
	__u32 lm_buflens[0];
};

/* without gss, ptlrpc_body is put at the first buffer. */
#define PTLRPC_NUM_VERSIONS     4

struct ptlrpc_body_v3 {
	struct lustre_handle pb_handle;
	__u32 pb_type;
	__u32 pb_version;
	__u32 pb_opc;
	__u32 pb_status;
	__u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
	__u16 pb_tag;      /* virtual slot idx for multiple modifying RPCs */
	__u16 pb_padding0;
	__u32 pb_padding1;
	__u64 pb_last_committed;
	__u64 pb_transno;
	__u32 pb_flags;
	__u32 pb_op_flags;
	__u32 pb_conn_cnt;
	__u32 pb_timeout;  /* for req, the deadline, for rep, the service est */
	__u32 pb_service_time; /* for rep, actual service time */
	__u32 pb_limit;
	__u64 pb_slv;
	/* VBR: pre-versions */
	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
	__u64 pb_mbits; /**< match bits for bulk request */
	/* padding for future needs */
	__u64 pb_padding64_0;
	__u64 pb_padding64_1;
	__u64 pb_padding64_2;
	char  pb_jobid[LUSTRE_JOBID_SIZE];
};

#define ptlrpc_body     ptlrpc_body_v3

struct ptlrpc_body_v2 {
	struct lustre_handle pb_handle;
	__u32 pb_type;
	__u32 pb_version;
	__u32 pb_opc;
	__u32 pb_status;
	__u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
	__u16 pb_tag;      /* virtual slot idx for multiple modifying RPCs */
	__u16 pb_padding0;
	__u32 pb_padding1;
	__u64 pb_last_committed;
	__u64 pb_transno;
	__u32 pb_flags;
	__u32 pb_op_flags;
	__u32 pb_conn_cnt;
	__u32 pb_timeout;  /* for req, the deadline, for rep, the service est */
	__u32 pb_service_time; /* for rep, actual service time, also used for
				* net_latency of req
				*/
	__u32 pb_limit;
	__u64 pb_slv;
	/* VBR: pre-versions */
	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
	__u64 pb_mbits; /**< unused in V2 */
	/* padding for future needs */
	__u64 pb_padding64_0;
	__u64 pb_padding64_1;
	__u64 pb_padding64_2;
};

/* message body offset for lustre_msg_v2 */
/* ptlrpc body offset in all request/reply messages */
#define MSG_PTLRPC_BODY_OFF	     0

/* normal request/reply message record offset */
#define REQ_REC_OFF		     1
#define REPLY_REC_OFF		   1

/* ldlm request message body offset */
#define DLM_LOCKREQ_OFF		 1 /* lockreq offset */
#define DLM_REQ_REC_OFF		 2 /* normal dlm request record offset */

/* ldlm intent lock message body offset */
#define DLM_INTENT_IT_OFF	       2 /* intent lock it offset */
#define DLM_INTENT_REC_OFF	      3 /* intent lock record offset */

/* ldlm reply message body offset */
#define DLM_LOCKREPLY_OFF	       1 /* lockrep offset */
#define DLM_REPLY_REC_OFF	       2 /* reply record offset */

/** only use in req->rq_{req,rep}_swab_mask */
#define MSG_PTLRPC_HEADER_OFF	   31

/* Flags that are operation-specific go in the top 16 bits. */
#define MSG_OP_FLAG_MASK   0xffff0000
#define MSG_OP_FLAG_SHIFT  16

/* Flags that apply to all requests are in the bottom 16 bits */
#define MSG_GEN_FLAG_MASK     0x0000ffff
#define MSG_LAST_REPLAY	   0x0001
#define MSG_RESENT		0x0002
#define MSG_REPLAY		0x0004
/* #define MSG_AT_SUPPORT	 0x0008
 * This was used in early prototypes of adaptive timeouts, and while there
 * shouldn't be any users of that code there also isn't a need for using this
 * bits. Defer usage until at least 1.10 to avoid potential conflict.
 */
#define MSG_DELAY_REPLAY	  0x0010
#define MSG_VERSION_REPLAY	0x0020
#define MSG_REQ_REPLAY_DONE       0x0040
#define MSG_LOCK_REPLAY_DONE      0x0080

/*
 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
 */

#define MSG_CONNECT_RECOVERING  0x00000001
#define MSG_CONNECT_RECONNECT   0x00000002
#define MSG_CONNECT_REPLAYABLE  0x00000004
/*#define MSG_CONNECT_PEER	0x8 */
#define MSG_CONNECT_LIBCLIENT   0x00000010
#define MSG_CONNECT_INITIAL     0x00000020
#define MSG_CONNECT_ASYNC       0x00000040
#define MSG_CONNECT_NEXT_VER    0x00000080 /* use next version of lustre_msg */
#define MSG_CONNECT_TRANSNO     0x00000100 /* report transno */

/* Connect flags */
#define OBD_CONNECT_RDONLY		  0x1ULL /*client has read-only access*/
#define OBD_CONNECT_INDEX		  0x2ULL /*connect specific LOV idx */
#define OBD_CONNECT_MDS			  0x4ULL /*connect from MDT to OST */
#define OBD_CONNECT_GRANT		  0x8ULL /*OSC gets grant at connect */
#define OBD_CONNECT_SRVLOCK		 0x10ULL /*server takes locks for cli */
#define OBD_CONNECT_VERSION		 0x20ULL /*Lustre versions in ocd */
#define OBD_CONNECT_REQPORTAL		 0x40ULL /*Separate non-IO req portal */
#define OBD_CONNECT_ACL			 0x80ULL /*access control lists */
#define OBD_CONNECT_XATTR		0x100ULL /*client use extended attr */
#define OBD_CONNECT_CROW		0x200ULL /*MDS+OST create obj on write*/
#define OBD_CONNECT_TRUNCLOCK		0x400ULL /*locks on server for punch */
#define OBD_CONNECT_TRANSNO		0x800ULL /*replay sends init transno */
#define OBD_CONNECT_IBITS	       0x1000ULL /*support for inodebits locks*/
#define OBD_CONNECT_JOIN	       0x2000ULL /*files can be concatenated.
						  *We do not support JOIN FILE
						  *anymore, reserve this flags
						  *just for preventing such bit
						  *to be reused.
						  */
#define OBD_CONNECT_ATTRFID	       0x4000ULL /*Server can GetAttr By Fid*/
#define OBD_CONNECT_NODEVOH	       0x8000ULL /*No open hndl on specl nodes*/
#define OBD_CONNECT_RMT_CLIENT	      0x10000ULL /* Remote client, never used
						  * in production. Removed in
						  * 2.9. Keep this flag to
						  * avoid reuse.
						  */
#define OBD_CONNECT_RMT_CLIENT_FORCE  0x20000ULL /* Remote client by force,
						  * never used in production.
						  * Removed in 2.9. Keep this
						  * flag to avoid reuse
						  */
#define OBD_CONNECT_BRW_SIZE	      0x40000ULL /*Max bytes per rpc */
#define OBD_CONNECT_QUOTA64	      0x80000ULL /*Not used since 2.4 */
#define OBD_CONNECT_MDS_CAPA	     0x100000ULL /*MDS capability */
#define OBD_CONNECT_OSS_CAPA	     0x200000ULL /*OSS capability */
#define OBD_CONNECT_CANCELSET	     0x400000ULL /*Early batched cancels. */
#define OBD_CONNECT_SOM		     0x800000ULL /*Size on MDS */
#define OBD_CONNECT_AT		    0x1000000ULL /*client uses AT */
#define OBD_CONNECT_LRU_RESIZE      0x2000000ULL /*LRU resize feature. */
#define OBD_CONNECT_MDS_MDS	    0x4000000ULL /*MDS-MDS connection */
#define OBD_CONNECT_REAL	    0x8000000ULL /*real connection */
#define OBD_CONNECT_CHANGE_QS      0x10000000ULL /*Not used since 2.4 */
#define OBD_CONNECT_CKSUM	   0x20000000ULL /*support several cksum algos*/
#define OBD_CONNECT_FID		   0x40000000ULL /*FID is supported by server */
#define OBD_CONNECT_VBR		   0x80000000ULL /*version based recovery */
#define OBD_CONNECT_LOV_V3	  0x100000000ULL /*client supports LOV v3 EA */
#define OBD_CONNECT_GRANT_SHRINK  0x200000000ULL /* support grant shrink */
#define OBD_CONNECT_SKIP_ORPHAN   0x400000000ULL /* don't reuse orphan objids */
#define OBD_CONNECT_MAX_EASIZE    0x800000000ULL /* preserved for large EA */
#define OBD_CONNECT_FULL20       0x1000000000ULL /* it is 2.0 client */
#define OBD_CONNECT_LAYOUTLOCK   0x2000000000ULL /* client uses layout lock */
#define OBD_CONNECT_64BITHASH    0x4000000000ULL /* client supports 64-bits
						  * directory hash
						  */
#define OBD_CONNECT_MAXBYTES     0x8000000000ULL /* max stripe size */
#define OBD_CONNECT_IMP_RECOV   0x10000000000ULL /* imp recovery support */
#define OBD_CONNECT_JOBSTATS    0x20000000000ULL /* jobid in ptlrpc_body */
#define OBD_CONNECT_UMASK       0x40000000000ULL /* create uses client umask */
#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
						  * RPC error properly
						  */
#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
						  * finer space reservation
						  */
#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
						   * policy and 2.x server
						   */
#define OBD_CONNECT_LVB_TYPE	0x400000000000ULL /* variable type of LVB */
#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
#define OBD_CONNECT_SHORTIO     0x2000000000000ULL/* short io */
#define OBD_CONNECT_PINGLESS	0x4000000000000ULL/* pings not required */
#define OBD_CONNECT_FLOCK_DEAD	0x8000000000000ULL/* flock deadlock detection */
#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
#define OBD_CONNECT_OPEN_BY_FID	0x20000000000000ULL	/* open by fid won't pack
							 * name in request
							 */
#define OBD_CONNECT_LFSCK	0x40000000000000ULL/* support online LFSCK */
#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
						       *  RPCs in parallel
						       */
#define OBD_CONNECT_DIR_STRIPE	 0x400000000000000ULL/* striped DNE dir */
#define OBD_CONNECT_SUBTREE	 0x800000000000000ULL /* fileset mount */
#define OBD_CONNECT_LOCK_AHEAD	 0x1000000000000000ULL /* lock ahead */
/** bulk matchbits is sent within ptlrpc_body */
#define OBD_CONNECT_BULK_MBITS	 0x2000000000000000ULL
#define OBD_CONNECT_OBDOPACK	 0x4000000000000000ULL /* compact OUT obdo */
#define OBD_CONNECT_FLAGS2	 0x8000000000000000ULL /* second flags word */

/* XXX README XXX:
 * Please DO NOT add flag values here before first ensuring that this same
 * flag value is not in use on some other branch.  Please clear any such
 * changes with senior engineers before starting to use a new flag.  Then,
 * submit a small patch against EVERY branch that ONLY adds the new flag,
 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
 * can be approved and landed easily to reserve the flag for future use.
 */

/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
 * connection.  It is a temporary bug fix for Imperative Recovery interop
 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
 * 2.2 clients/servers is no longer needed.  LU-1252/LU-1644.
 */
#define OBD_CONNECT_MNE_SWAB		 OBD_CONNECT_MDS_MDS

#define OCD_HAS_FLAG(ocd, flg)  \
	(!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))

/* Features required for this version of the client to work with server */
#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
				 OBD_CONNECT_FULL20)

/* This structure is used for both request and reply.
 *
 * If we eventually have separate connect data for different types, which we
 * almost certainly will, then perhaps we stick a union in here.
 */
struct obd_connect_data {
	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
	__u32 ocd_version;	 /* lustre release version number */
	__u32 ocd_grant;	 /* initial cache grant amount (bytes) */
	__u32 ocd_index;	 /* LOV index to connect to */
	__u32 ocd_brw_size;	 /* Maximum BRW size in bytes */
	__u64 ocd_ibits_known;   /* inode bits this client understands */
	__u8  ocd_blocksize;     /* log2 of the backend filesystem blocksize */
	__u8  ocd_inodespace;    /* log2 of the per-inode space consumption */
	__u16 ocd_grant_extent;  /* per-extent grant overhead, in 1K blocks */
	__u32 ocd_unused;	 /* also fix lustre_swab_connect */
	__u64 ocd_transno;       /* first transno from client to be replayed */
	__u32 ocd_group;	 /* MDS group on OST */
	__u32 ocd_cksum_types;   /* supported checksum algorithms */
	__u32 ocd_max_easize;    /* How big LOV EA can be on MDS */
	__u32 ocd_instance;      /* instance # of this target */
	__u64 ocd_maxbytes;      /* Maximum stripe size in bytes */
	/* Fields after ocd_maxbytes are only accessible by the receiver
	 * if the corresponding flag in ocd_connect_flags is set. Accessing
	 * any field after ocd_maxbytes on the receiver without a valid flag
	 * may result in out-of-bound memory access and kernel oops.
	 */
	__u16 ocd_maxmodrpcs;	/* Maximum modify RPCs in parallel */
	__u16 padding0;		/* added 2.1.0. also fix lustre_swab_connect */
	__u32 padding1;		/* added 2.1.0. also fix lustre_swab_connect */
	__u64 ocd_connect_flags2;
	__u64 padding3;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding4;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding5;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding6;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding7;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding8;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 padding9;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingA;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingB;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingC;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingD;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingE;	  /* added 2.1.0. also fix lustre_swab_connect */
	__u64 paddingF;	  /* added 2.1.0. also fix lustre_swab_connect */
};

/* XXX README XXX:
 * Please DO NOT use any fields here before first ensuring that this same
 * field is not in use on some other branch.  Please clear any such changes
 * with senior engineers before starting to use a new field.  Then, submit
 * a small patch against EVERY branch that ONLY adds the new field along with
 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
 * reserve the flag for future use.
 */

/*
 * Supported checksum algorithms. Up to 32 checksum types are supported.
 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
 * algorithm and also the OBD_FL_CKSUM* flags.
 */
enum cksum_type {
	OBD_CKSUM_CRC32  = 0x00000001,
	OBD_CKSUM_ADLER  = 0x00000002,
	OBD_CKSUM_CRC32C = 0x00000004,
};

/*
 *   OST requests: OBDO & OBD request records
 */

/* opcodes */
enum ost_cmd {
	OST_REPLY      =  0,       /* reply ? */
	OST_GETATTR    =  1,
	OST_SETATTR    =  2,
	OST_READ       =  3,
	OST_WRITE      =  4,
	OST_CREATE     =  5,
	OST_DESTROY    =  6,
	OST_GET_INFO   =  7,
	OST_CONNECT    =  8,
	OST_DISCONNECT =  9,
	OST_PUNCH      = 10,
	OST_OPEN       = 11,
	OST_CLOSE      = 12,
	OST_STATFS     = 13,
	OST_SYNC       = 16,
	OST_SET_INFO   = 17,
	OST_QUOTACHECK = 18, /* not used since 2.4 */
	OST_QUOTACTL   = 19,
	OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
	OST_LAST_OPC
};
#define OST_FIRST_OPC  OST_REPLY

enum obdo_flags {
	OBD_FL_INLINEDATA   = 0x00000001,
	OBD_FL_OBDMDEXISTS  = 0x00000002,
	OBD_FL_DELORPHAN    = 0x00000004, /* if set in o_flags delete orphans */
	OBD_FL_NORPC	    = 0x00000008, /* set in o_flags do in OSC not OST */
	OBD_FL_IDONLY       = 0x00000010, /* set in o_flags only adjust obj id*/
	OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
	OBD_FL_DEBUG_CHECK  = 0x00000040, /* echo client/server debug check */
	OBD_FL_NO_USRQUOTA  = 0x00000100, /* the object's owner is over quota */
	OBD_FL_NO_GRPQUOTA  = 0x00000200, /* the object's group is over quota */
	OBD_FL_CREATE_CROW  = 0x00000400, /* object should be create on write */
	OBD_FL_SRVLOCK      = 0x00000800, /* delegate DLM locking to server */
	OBD_FL_CKSUM_CRC32  = 0x00001000, /* CRC32 checksum type */
	OBD_FL_CKSUM_ADLER  = 0x00002000, /* ADLER checksum type */
	OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
	OBD_FL_CKSUM_RSVD2  = 0x00008000, /* for future cksum types */
	OBD_FL_CKSUM_RSVD3  = 0x00010000, /* for future cksum types */
	OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
	OBD_FL_MMAP	    = 0x00040000, /* object is mmapped on the client.
					   * XXX: obsoleted - reserved for old
					   * clients prior than 2.2
					   */
	OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
	OBD_FL_NOSPC_BLK    = 0x00100000, /* no more block space on OST */
	OBD_FL_FLUSH	    = 0x00200000, /* flush pages on the OST */
	OBD_FL_SHORT_IO	    = 0x00400000, /* short io request */

	/* Note that while these checksum values are currently separate bits,
	 * in 2.x we can actually allow all values from 1-31 if we wanted.
	 */
	OBD_FL_CKSUM_ALL    = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
			      OBD_FL_CKSUM_CRC32C,

	/* mask for local-only flag, which won't be sent over network */
	OBD_FL_LOCAL_MASK   = 0xF0000000,
};

/*
 * All LOV EA magics should have the same postfix, if some new version
 * Lustre instroduces new LOV EA magic, then when down-grade to an old
 * Lustre, even though the old version system does not recognizes such
 * new magic, it still can distinguish the corrupted cases by checking
 * the magic's postfix.
 */
#define LOV_MAGIC_MAGIC 0x0BD0
#define LOV_MAGIC_MASK  0xFFFF

#define LOV_MAGIC_V1		(0x0BD10000 | LOV_MAGIC_MAGIC)
#define LOV_MAGIC_JOIN_V1	(0x0BD20000 | LOV_MAGIC_MAGIC)
#define LOV_MAGIC_V3		(0x0BD30000 | LOV_MAGIC_MAGIC)
#define LOV_MAGIC_MIGRATE	(0x0BD40000 | LOV_MAGIC_MAGIC)
/* reserved for specifying OSTs */
#define LOV_MAGIC_SPECIFIC	(0x0BD50000 | LOV_MAGIC_MAGIC)
#define LOV_MAGIC		LOV_MAGIC_V1

/*
 * magic for fully defined striping
 * the idea is that we should have different magics for striping "hints"
 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
 * we can't just change it w/o long way preparation, but we still need a
 * mechanism to allow LOD to differentiate hint versus ready striping.
 * so, at the moment we do a trick: MDT knows what to expect from request
 * depending on the case (replay uses ready striping, non-replay req uses
 * hints), so MDT replaces magic with appropriate one and now LOD can
 * easily understand what's inside -bzzz
 */
#define LOV_MAGIC_V1_DEF  0x0CD10BD0
#define LOV_MAGIC_V3_DEF  0x0CD30BD0

#define lov_pattern(pattern)		(pattern & ~LOV_PATTERN_F_MASK)
#define lov_pattern_flags(pattern)	(pattern & LOV_PATTERN_F_MASK)

#define lov_ost_data lov_ost_data_v1
struct lov_ost_data_v1 {	  /* per-stripe data structure (little-endian)*/
	struct ost_id l_ost_oi;	  /* OST object ID */
	__u32 l_ost_gen;	  /* generation of this l_ost_idx */
	__u32 l_ost_idx;	  /* OST index in LOV (lov_tgt_desc->tgts) */
};

#define lov_mds_md lov_mds_md_v1
struct lov_mds_md_v1 {	    /* LOV EA mds/wire data (little-endian) */
	__u32 lmm_magic;	  /* magic number = LOV_MAGIC_V1 */
	__u32 lmm_pattern;	/* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
	struct ost_id	lmm_oi;	  /* LOV object ID */
	__u32 lmm_stripe_size;    /* size of stripe in bytes */
	/* lmm_stripe_count used to be __u32 */
	__u16 lmm_stripe_count;   /* num stripes in use for this object */
	__u16 lmm_layout_gen;     /* layout generation number */
	struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
};

/**
 * Sigh, because pre-2.4 uses
 * struct lov_mds_md_v1 {
 *	........
 *	__u64 lmm_object_id;
 *	__u64 lmm_object_seq;
 *      ......
 *      }
 * to identify the LOV(MDT) object, and lmm_object_seq will
 * be normal_fid, which make it hard to combine these conversion
 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
 *
 * We can tell the lmm_oi by this way,
 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
 *      lmm_oi.f_ver = 0
 *
 * But currently lmm_oi/lsm_oi does not have any "real" usages,
 * except for printing some information, and the user can always
 * get the real FID from LMA, besides this multiple case check might
 * make swab more complicate. So we will keep using id/seq for lmm_oi.
 */

static inline void fid_to_lmm_oi(const struct lu_fid *fid,
				 struct ost_id *oi)
{
	oi->oi.oi_id = fid_oid(fid);
	oi->oi.oi_seq = fid_seq(fid);
}

static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
{
	oi->oi.oi_seq = seq;
}

static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
{
	oi->oi.oi_id = oid;
}

static inline __u64 lmm_oi_id(const struct ost_id *oi)
{
	return oi->oi.oi_id;
}

static inline __u64 lmm_oi_seq(const struct ost_id *oi)
{
	return oi->oi.oi_seq;
}

static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
				    const struct ost_id *src_oi)
{
	dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
	dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
}

static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
				    const struct ost_id *src_oi)
{
	dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
	dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
}

#define MAX_MD_SIZE							\
	(sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
#define MIN_MD_SIZE							\
	(sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))

#define XATTR_NAME_ACL_ACCESS   "system.posix_acl_access"
#define XATTR_NAME_ACL_DEFAULT  "system.posix_acl_default"
#define XATTR_USER_PREFIX       "user."
#define XATTR_TRUSTED_PREFIX    "trusted."
#define XATTR_SECURITY_PREFIX   "security."
#define XATTR_LUSTRE_PREFIX     "lustre."

#define XATTR_NAME_LOV	  "trusted.lov"
#define XATTR_NAME_LMA	  "trusted.lma"
#define XATTR_NAME_LMV	  "trusted.lmv"
#define XATTR_NAME_DEFAULT_LMV	"trusted.dmv"
#define XATTR_NAME_LINK	 "trusted.link"
#define XATTR_NAME_FID	  "trusted.fid"
#define XATTR_NAME_VERSION      "trusted.version"
#define XATTR_NAME_SOM		"trusted.som"
#define XATTR_NAME_HSM		"trusted.hsm"
#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"

struct lov_mds_md_v3 {	    /* LOV EA mds/wire data (little-endian) */
	__u32 lmm_magic;	  /* magic number = LOV_MAGIC_V3 */
	__u32 lmm_pattern;	/* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
	struct ost_id	lmm_oi;	  /* LOV object ID */
	__u32 lmm_stripe_size;    /* size of stripe in bytes */
	/* lmm_stripe_count used to be __u32 */
	__u16 lmm_stripe_count;   /* num stripes in use for this object */
	__u16 lmm_layout_gen;     /* layout generation number */
	char  lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */
	struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
};

static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
{
	if (lmm_magic == LOV_MAGIC_V3)
		return sizeof(struct lov_mds_md_v3) +
				stripes * sizeof(struct lov_ost_data_v1);
	else
		return sizeof(struct lov_mds_md_v1) +
				stripes * sizeof(struct lov_ost_data_v1);
}

static inline __u32
lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
{
	switch (lmm_magic) {
	case LOV_MAGIC_V1: {
		struct lov_mds_md_v1 lmm;

		if (buf_size < sizeof(lmm))
			return 0;

		return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
	}
	case LOV_MAGIC_V3: {
		struct lov_mds_md_v3 lmm;

		if (buf_size < sizeof(lmm))
			return 0;

		return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
	}
	default:
		return 0;
	}
}

#define OBD_MD_FLID	   (0x00000001ULL) /* object ID */
#define OBD_MD_FLATIME     (0x00000002ULL) /* access time */
#define OBD_MD_FLMTIME     (0x00000004ULL) /* data modification time */
#define OBD_MD_FLCTIME     (0x00000008ULL) /* change time */
#define OBD_MD_FLSIZE      (0x00000010ULL) /* size */
#define OBD_MD_FLBLOCKS    (0x00000020ULL) /* allocated blocks count */
#define OBD_MD_FLBLKSZ     (0x00000040ULL) /* block size */
#define OBD_MD_FLMODE      (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
#define OBD_MD_FLTYPE      (0x00000100ULL) /* object type (mode & S_IFMT) */
#define OBD_MD_FLUID       (0x00000200ULL) /* user ID */
#define OBD_MD_FLGID       (0x00000400ULL) /* group ID */
#define OBD_MD_FLFLAGS     (0x00000800ULL) /* flags word */
#define OBD_MD_FLNLINK     (0x00002000ULL) /* link count */
#define OBD_MD_FLGENER     (0x00004000ULL) /* generation number */
/*#define OBD_MD_FLINLINE    (0x00008000ULL)  inline data. used until 1.6.5 */
#define OBD_MD_FLRDEV      (0x00010000ULL) /* device number */
#define OBD_MD_FLEASIZE    (0x00020000ULL) /* extended attribute data */
#define OBD_MD_LINKNAME    (0x00040000ULL) /* symbolic link target */
#define OBD_MD_FLHANDLE    (0x00080000ULL) /* file/lock handle */
#define OBD_MD_FLCKSUM     (0x00100000ULL) /* bulk data checksum */
#define OBD_MD_FLQOS       (0x00200000ULL) /* quality of service stats */
/*#define OBD_MD_FLOSCOPQ    (0x00400000ULL) osc opaque data, never used */
/*	OBD_MD_FLCOOKIE    (0x00800000ULL) obsolete in 2.8 */
#define OBD_MD_FLGROUP     (0x01000000ULL) /* group */
#define OBD_MD_FLFID       (0x02000000ULL) /* ->ost write inline fid */
#define OBD_MD_FLEPOCH     (0x04000000ULL) /* ->ost write with ioepoch */
					   /* ->mds if epoch opens or closes
					    */
#define OBD_MD_FLGRANT     (0x08000000ULL) /* ost preallocation space grant */
#define OBD_MD_FLDIREA     (0x10000000ULL) /* dir's extended attribute data */
#define OBD_MD_FLUSRQUOTA  (0x20000000ULL) /* over quota flags sent from ost */
#define OBD_MD_FLGRPQUOTA  (0x40000000ULL) /* over quota flags sent from ost */
#define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */

#define OBD_MD_MDS	   (0x0000000100000000ULL) /* where an inode lives on */
#define OBD_MD_REINT       (0x0000000200000000ULL) /* reintegrate oa */
#define OBD_MD_MEA	   (0x0000000400000000ULL) /* CMD split EA  */
#define OBD_MD_TSTATE      (0x0000000800000000ULL) /* transient state field */

#define OBD_MD_FLXATTR       (0x0000001000000000ULL) /* xattr */
#define OBD_MD_FLXATTRLS     (0x0000002000000000ULL) /* xattr list */
#define OBD_MD_FLXATTRRM     (0x0000004000000000ULL) /* xattr remove */
#define OBD_MD_FLACL	     (0x0000008000000000ULL) /* ACL */
/*	OBD_MD_FLRMTPERM     (0x0000010000000000ULL) remote perm, obsolete */
#define OBD_MD_FLMDSCAPA     (0x0000020000000000ULL) /* MDS capability */
#define OBD_MD_FLOSSCAPA     (0x0000040000000000ULL) /* OSS capability */
#define OBD_MD_FLCKSPLIT     (0x0000080000000000ULL) /* Check split on server */
#define OBD_MD_FLCROSSREF    (0x0000100000000000ULL) /* Cross-ref case */
#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
						      * under lock; for xattr
						      * requests means the
						      * client holds the lock
						      */
#define OBD_MD_FLOBJCOUNT    (0x0000400000000000ULL) /* for multiple destroy */

/*	OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) lfs lsetfacl, obsolete */
/*	OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) lfs lgetfacl, obsolete */
/*	OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) lfs rsetfacl, obsolete */
/*	OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */

#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent
							    * executed
							    */

#define OBD_MD_DEFAULT_MEA   (0x0040000000000000ULL) /* default MEA */

#define OBD_MD_FLGETATTR (OBD_MD_FLID    | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
			  OBD_MD_FLCTIME | OBD_MD_FLSIZE  | OBD_MD_FLBLKSZ | \
			  OBD_MD_FLMODE  | OBD_MD_FLTYPE  | OBD_MD_FLUID   | \
			  OBD_MD_FLGID   | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
			  OBD_MD_FLGENER | OBD_MD_FLRDEV  | OBD_MD_FLGROUP)

#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)

/* don't forget obdo_fid which is way down at the bottom so it can
 * come after the definition of llog_cookie
 */

enum hss_valid {
	HSS_SETMASK	= 0x01,
	HSS_CLEARMASK	= 0x02,
	HSS_ARCHIVE_ID	= 0x04,
};

struct hsm_state_set {
	__u32	hss_valid;
	__u32	hss_archive_id;
	__u64	hss_setmask;
	__u64	hss_clearmask;
};

/* ost_body.data values for OST_BRW */

#define OBD_BRW_READ		0x01
#define OBD_BRW_WRITE		0x02
#define OBD_BRW_RWMASK		(OBD_BRW_READ | OBD_BRW_WRITE)
#define OBD_BRW_SYNC		0x08 /* this page is a part of synchronous
				      * transfer and is not accounted in
				      * the grant.
				      */
#define OBD_BRW_CHECK		0x10
#define OBD_BRW_FROM_GRANT      0x20 /* the osc manages this under llite */
#define OBD_BRW_GRANTED		0x40 /* the ost manages this */
#define OBD_BRW_NOCACHE		0x80 /* this page is a part of non-cached IO */
#define OBD_BRW_NOQUOTA	       0x100
#define OBD_BRW_SRVLOCK	       0x200 /* Client holds no lock over this page */
#define OBD_BRW_ASYNC	       0x400 /* Server may delay commit to disk */
#define OBD_BRW_MEMALLOC       0x800 /* Client runs in the "kswapd" context */
#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
#define OBD_BRW_SOFT_SYNC     0x4000 /* This flag notifies the server
				      * that the client is running low on
				      * space for unstable pages; asking
				      * it to sync quickly
				      */

#define OBD_OBJECT_EOF	LUSTRE_EOF

#define OST_MIN_PRECREATE 32
#define OST_MAX_PRECREATE 20000

struct obd_ioobj {
	struct ost_id	ioo_oid;	/* object ID, if multi-obj BRW */
	__u32		ioo_max_brw;	/* low 16 bits were o_mode before 2.4,
					 * now (PTLRPC_BULK_OPS_COUNT - 1) in
					 * high 16 bits in 2.4 and later
					 */
	__u32		ioo_bufcnt;	/* number of niobufs for this object */
};

/*
 * NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in
 * ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS.
 * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits.
 */
#define IOOBJ_MAX_BRW_BITS	16
#define ioobj_max_brw_get(ioo)	(((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
#define ioobj_max_brw_set(ioo, num)					\
do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)

/* multiple of 8 bytes => can array */
struct niobuf_remote {
	__u64	rnb_offset;
	__u32	rnb_len;
	__u32	rnb_flags;
};

/* lock value block communicated between the filter and llite */

/* OST_LVB_ERR_INIT is needed because the return code in rc is
 * negative, i.e. because ((MASK + rc) & MASK) != MASK.
 */
#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
#define OST_LVB_IS_ERR(blocks)					  \
	((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
#define OST_LVB_SET_ERR(blocks, rc)				     \
	do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
#define OST_LVB_GET_ERR(blocks)    (int)(blocks - OST_LVB_ERR_INIT)

struct ost_lvb_v1 {
	__u64		lvb_size;
	__s64		lvb_mtime;
	__s64		lvb_atime;
	__s64		lvb_ctime;
	__u64		lvb_blocks;
};

struct ost_lvb {
	__u64		lvb_size;
	__s64		lvb_mtime;
	__s64		lvb_atime;
	__s64		lvb_ctime;
	__u64		lvb_blocks;
	__u32		lvb_mtime_ns;
	__u32		lvb_atime_ns;
	__u32		lvb_ctime_ns;
	__u32		lvb_padding;
};

/*
 *   lquota data structures
 */

/* The lquota_id structure is an union of all the possible identifier types that
 * can be used with quota, this includes:
 * - 64-bit user ID
 * - 64-bit group ID
 * - a FID which can be used for per-directory quota in the future
 */
union lquota_id {
	struct lu_fid	qid_fid; /* FID for per-directory quota */
	__u64		qid_uid; /* user identifier */
	__u64		qid_gid; /* group identifier */
};

/* quotactl management */
struct obd_quotactl {
	__u32			qc_cmd;
	__u32			qc_type; /* see Q_* flag below */
	__u32			qc_id;
	__u32			qc_stat;
	struct obd_dqinfo	qc_dqinfo;
	struct obd_dqblk	qc_dqblk;
};

#define Q_COPY(out, in, member) (out)->member = (in)->member

#define QCTL_COPY(out, in)		\
do {					\
	Q_COPY(out, in, qc_cmd);	\
	Q_COPY(out, in, qc_type);	\
	Q_COPY(out, in, qc_id);		\
	Q_COPY(out, in, qc_stat);	\
	Q_COPY(out, in, qc_dqinfo);	\
	Q_COPY(out, in, qc_dqblk);	\
} while (0)

/* Data structures associated with the quota locks */

/* Glimpse descriptor used for the index & per-ID quota locks */
struct ldlm_gl_lquota_desc {
	union lquota_id	gl_id;    /* quota ID subject to the glimpse */
	__u64		gl_flags; /* see LQUOTA_FL* below */
	__u64		gl_ver;   /* new index version */
	__u64		gl_hardlimit; /* new hardlimit or qunit value */
	__u64		gl_softlimit; /* new softlimit */
	__u64		gl_time;
	__u64		gl_pad2;
};

/* quota glimpse flags */
#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */

/* LVB used with quota (global and per-ID) locks */
struct lquota_lvb {
	__u64	lvb_flags;	/* see LQUOTA_FL* above */
	__u64	lvb_id_may_rel; /* space that might be released later */
	__u64	lvb_id_rel;     /* space released by the slave for this ID */
	__u64	lvb_id_qunit;   /* current qunit value */
	__u64	lvb_pad1;
};

/* op codes */
enum quota_cmd {
	QUOTA_DQACQ	= 601,
	QUOTA_DQREL	= 602,
	QUOTA_LAST_OPC
};
#define QUOTA_FIRST_OPC	QUOTA_DQACQ

/*
 *   MDS REQ RECORDS
 */

/* opcodes */
enum mds_cmd {
	MDS_GETATTR		= 33,
	MDS_GETATTR_NAME	= 34,
	MDS_CLOSE		= 35,
	MDS_REINT		= 36,
	MDS_READPAGE		= 37,
	MDS_CONNECT		= 38,
	MDS_DISCONNECT		= 39,
	MDS_GETSTATUS		= 40,
	MDS_STATFS		= 41,
	MDS_PIN			= 42, /* obsolete, never used in a release */
	MDS_UNPIN		= 43, /* obsolete, never used in a release */
	MDS_SYNC		= 44,
	MDS_DONE_WRITING	= 45, /* obsolete since 2.8.0 */
	MDS_SET_INFO		= 46,
	MDS_QUOTACHECK		= 47, /* not used since 2.4 */
	MDS_QUOTACTL		= 48,
	MDS_GETXATTR		= 49,
	MDS_SETXATTR		= 50, /* obsolete, now it's MDS_REINT op */
	MDS_WRITEPAGE		= 51,
	MDS_IS_SUBDIR		= 52, /* obsolete, never used in a release */
	MDS_GET_INFO		= 53,
	MDS_HSM_STATE_GET	= 54,
	MDS_HSM_STATE_SET	= 55,
	MDS_HSM_ACTION		= 56,
	MDS_HSM_PROGRESS	= 57,
	MDS_HSM_REQUEST		= 58,
	MDS_HSM_CT_REGISTER	= 59,
	MDS_HSM_CT_UNREGISTER	= 60,
	MDS_SWAP_LAYOUTS	= 61,
	MDS_LAST_OPC
};

#define MDS_FIRST_OPC    MDS_GETATTR

/*
 * Do not exceed 63
 */

enum mdt_reint_cmd {
	REINT_SETATTR  = 1,
	REINT_CREATE   = 2,
	REINT_LINK     = 3,
	REINT_UNLINK   = 4,
	REINT_RENAME   = 5,
	REINT_OPEN     = 6,
	REINT_SETXATTR = 7,
	REINT_RMENTRY  = 8,
	REINT_MIGRATE  = 9,
	REINT_MAX
};

/* the disposition of the intent outlines what was executed */
#define DISP_IT_EXECD	0x00000001
#define DISP_LOOKUP_EXECD    0x00000002
#define DISP_LOOKUP_NEG      0x00000004
#define DISP_LOOKUP_POS      0x00000008
#define DISP_OPEN_CREATE     0x00000010
#define DISP_OPEN_OPEN       0x00000020
#define DISP_ENQ_COMPLETE    0x00400000		/* obsolete and unused */
#define DISP_ENQ_OPEN_REF    0x00800000
#define DISP_ENQ_CREATE_REF  0x01000000
#define DISP_OPEN_LOCK       0x02000000
#define DISP_OPEN_LEASE      0x04000000
#define DISP_OPEN_STRIPE     0x08000000
#define DISP_OPEN_DENY		0x10000000

/* INODE LOCK PARTS */
#define MDS_INODELOCK_LOOKUP 0x000001	/* For namespace, dentry etc, and also
					 * was used to protect permission (mode,
					 * owner, group etc) before 2.4.
					 */
#define MDS_INODELOCK_UPDATE 0x000002	/* size, links, timestamps */
#define MDS_INODELOCK_OPEN   0x000004	/* For opened files */
#define MDS_INODELOCK_LAYOUT 0x000008	/* for layout */

/* The PERM bit is added int 2.4, and it is used to protect permission(mode,
 * owner, group, acl etc), so to separate the permission from LOOKUP lock.
 * Because for remote directories(in DNE), these locks will be granted by
 * different MDTs(different ldlm namespace).
 *
 * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
 * For Remote directory, the master MDT, where the remote directory is, will
 * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
 * will grant LOOKUP_LOCK.
 */
#define MDS_INODELOCK_PERM   0x000010
#define MDS_INODELOCK_XATTR  0x000020	/* extended attributes */

#define MDS_INODELOCK_MAXSHIFT 5
/* This FULL lock is useful to take on unlink sort of operations */
#define MDS_INODELOCK_FULL ((1 << (MDS_INODELOCK_MAXSHIFT + 1)) - 1)

/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
 * but was moved into name[1] along with the OID to avoid consuming the
 * name[2,3] fields that need to be used for the quota id (also a FID).
 */
enum {
	LUSTRE_RES_ID_SEQ_OFF = 0,
	LUSTRE_RES_ID_VER_OID_OFF = 1,
	LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
	LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
	LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
	LUSTRE_RES_ID_HSH_OFF = 3
};

#define MDS_STATUS_CONN 1
#define MDS_STATUS_LOV 2

/* these should be identical to their EXT4_*_FL counterparts, they are
 * redefined here only to avoid dragging in fs/ext4/ext4.h
 */
#define LUSTRE_SYNC_FL	 0x00000008 /* Synchronous updates */
#define LUSTRE_IMMUTABLE_FL    0x00000010 /* Immutable file */
#define LUSTRE_APPEND_FL       0x00000020 /* writes to file may only append */
#define LUSTRE_NODUMP_FL	0x00000040 /* do not dump file */
#define LUSTRE_NOATIME_FL      0x00000080 /* do not update atime */
#define LUSTRE_INDEX_FL		0x00001000 /* hash-indexed directory */
#define LUSTRE_DIRSYNC_FL      0x00010000 /* dirsync behaviour (dir only) */
#define LUSTRE_TOPDIR_FL	0x00020000 /* Top of directory hierarchies*/
#define LUSTRE_DIRECTIO_FL	0x00100000 /* Use direct i/o */
#define LUSTRE_INLINE_DATA_FL	0x10000000 /* Inode has inline data. */

/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
 * for the client inode i_flags.  The LUSTRE_*_FL are the Lustre wire
 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
 * the S_* flags are kernel-internal values that change between kernel
 * versions.  These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
 * See b=16526 for a full history.
 */
static inline int ll_ext_to_inode_flags(int flags)
{
	return (((flags & LUSTRE_SYNC_FL)      ? S_SYNC      : 0) |
		((flags & LUSTRE_NOATIME_FL)   ? S_NOATIME   : 0) |
		((flags & LUSTRE_APPEND_FL)    ? S_APPEND    : 0) |
		((flags & LUSTRE_DIRSYNC_FL)   ? S_DIRSYNC   : 0) |
		((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
}

static inline int ll_inode_to_ext_flags(int iflags)
{
	return (((iflags & S_SYNC)      ? LUSTRE_SYNC_FL      : 0) |
		((iflags & S_NOATIME)   ? LUSTRE_NOATIME_FL   : 0) |
		((iflags & S_APPEND)    ? LUSTRE_APPEND_FL    : 0) |
		((iflags & S_DIRSYNC)   ? LUSTRE_DIRSYNC_FL   : 0) |
		((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
}

/* 64 possible states */
enum md_transient_state {
	MS_RESTORE	= (1 << 0),	/* restore is running */
};

struct mdt_body {
	struct lu_fid mbo_fid1;
	struct lu_fid mbo_fid2;
	struct lustre_handle mbo_handle;
	__u64	mbo_valid;
	__u64	mbo_size;	/* Offset, in the case of MDS_READPAGE */
	__s64	mbo_mtime;
	__s64	mbo_atime;
	__s64	mbo_ctime;
	__u64	mbo_blocks;	/* XID, in the case of MDS_READPAGE */
	__u64	mbo_ioepoch;
	__u64	mbo_t_state;	/* transient file state defined in
				 * enum md_transient_state
				 * was "ino" until 2.4.0
				 */
	__u32	mbo_fsuid;
	__u32	mbo_fsgid;
	__u32	mbo_capability;
	__u32	mbo_mode;
	__u32	mbo_uid;
	__u32	mbo_gid;
	__u32	mbo_flags;	/* LUSTRE_*_FL file attributes */
	__u32	mbo_rdev;
	__u32	mbo_nlink;	/* #bytes to read in the case of MDS_READPAGE */
	__u32	mbo_unused2;	/* was "generation" until 2.4.0 */
	__u32	mbo_suppgid;
	__u32	mbo_eadatasize;
	__u32	mbo_aclsize;
	__u32	mbo_max_mdsize;
	__u32	mbo_unused3;	/* was max_cookiesize until 2.8 */
	__u32	mbo_uid_h;	/* high 32-bits of uid, for FUID */
	__u32	mbo_gid_h;	/* high 32-bits of gid, for FUID */
	__u32	mbo_padding_5;	/* also fix lustre_swab_mdt_body */
	__u64	mbo_padding_6;
	__u64	mbo_padding_7;
	__u64	mbo_padding_8;
	__u64	mbo_padding_9;
	__u64	mbo_padding_10;
}; /* 216 */

struct mdt_ioepoch {
	struct lustre_handle mio_handle;
	__u64 mio_unused1; /* was ioepoch */
	__u32 mio_unused2; /* was flags */
	__u32 mio_padding;
};

/* permissions for md_perm.mp_perm */
enum {
	CFS_SETUID_PERM = 0x01,
	CFS_SETGID_PERM = 0x02,
	CFS_SETGRP_PERM = 0x04,
};

struct mdt_rec_setattr {
	__u32	   sa_opcode;
	__u32	   sa_cap;
	__u32	   sa_fsuid;
	__u32	   sa_fsuid_h;
	__u32	   sa_fsgid;
	__u32	   sa_fsgid_h;
	__u32	   sa_suppgid;
	__u32	   sa_suppgid_h;
	__u32	   sa_padding_1;
	__u32	   sa_padding_1_h;
	struct lu_fid   sa_fid;
	__u64	   sa_valid;
	__u32	   sa_uid;
	__u32	   sa_gid;
	__u64	   sa_size;
	__u64	   sa_blocks;
	__s64	   sa_mtime;
	__s64	   sa_atime;
	__s64	   sa_ctime;
	__u32	   sa_attr_flags;
	__u32	   sa_mode;
	__u32	   sa_bias;      /* some operation flags */
	__u32	   sa_padding_3;
	__u32	   sa_padding_4;
	__u32	   sa_padding_5;
};

/*
 * Attribute flags used in mdt_rec_setattr::sa_valid.
 * The kernel's #defines for ATTR_* should not be used over the network
 * since the client and MDS may run different kernels (see bug 13828)
 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
 */
#define MDS_ATTR_MODE	       0x1ULL /* = 1 */
#define MDS_ATTR_UID	       0x2ULL /* = 2 */
#define MDS_ATTR_GID	       0x4ULL /* = 4 */
#define MDS_ATTR_SIZE	       0x8ULL /* = 8 */
#define MDS_ATTR_ATIME	      0x10ULL /* = 16 */
#define MDS_ATTR_MTIME	      0x20ULL /* = 32 */
#define MDS_ATTR_CTIME	      0x40ULL /* = 64 */
#define MDS_ATTR_ATIME_SET    0x80ULL /* = 128 */
#define MDS_ATTR_MTIME_SET   0x100ULL /* = 256 */
#define MDS_ATTR_FORCE       0x200ULL /* = 512, Not a change, but a change it */
#define MDS_ATTR_ATTR_FLAG   0x400ULL /* = 1024 */
#define MDS_ATTR_KILL_SUID   0x800ULL /* = 2048 */
#define MDS_ATTR_KILL_SGID  0x1000ULL /* = 4096 */
#define MDS_ATTR_CTIME_SET  0x2000ULL /* = 8192 */
#define MDS_ATTR_FROM_OPEN  0x4000ULL /* = 16384, called from open path,
				       * ie O_TRUNC
				       */
#define MDS_ATTR_BLOCKS     0x8000ULL /* = 32768 */

#define MDS_FMODE_CLOSED	 00000000
#define MDS_FMODE_EXEC	   00000004
/*	MDS_FMODE_EPOCH		01000000 obsolete since 2.8.0 */
/*	MDS_FMODE_TRUNC		02000000 obsolete since 2.8.0 */
/*	MDS_FMODE_SOM		04000000 obsolete since 2.8.0 */

#define MDS_OPEN_CREATED	 00000010
#define MDS_OPEN_CROSS	   00000020

#define MDS_OPEN_CREAT	   00000100
#define MDS_OPEN_EXCL	    00000200
#define MDS_OPEN_TRUNC	   00001000
#define MDS_OPEN_APPEND	  00002000
#define MDS_OPEN_SYNC	    00010000
#define MDS_OPEN_DIRECTORY       00200000

#define MDS_OPEN_BY_FID		040000000 /* open_by_fid for known object */
#define MDS_OPEN_DELAY_CREATE  0100000000 /* delay initial object create */
#define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
#define MDS_OPEN_JOIN_FILE     0400000000 /* open for join file.
					   * We do not support JOIN FILE
					   * anymore, reserve this flags
					   * just for preventing such bit
					   * to be reused.
					   */

#define MDS_OPEN_LOCK	      04000000000 /* This open requires open lock */
#define MDS_OPEN_HAS_EA      010000000000 /* specify object create pattern */
#define MDS_OPEN_HAS_OBJS    020000000000 /* Just set the EA the obj exist */
#define MDS_OPEN_NORESTORE  0100000000000ULL /* Do not restore file at open */
#define MDS_OPEN_NEWSTRIPE  0200000000000ULL /* New stripe needed (restripe or
					      * hsm restore) */
#define MDS_OPEN_VOLATILE   0400000000000ULL /* File is volatile = created
						unlinked */
#define MDS_OPEN_LEASE	   01000000000000ULL /* Open the file and grant lease
					      * delegation, succeed if it's not
					      * being opened with conflict mode.
					      */
#define MDS_OPEN_RELEASE   02000000000000ULL /* Open the file for HSM release */

#define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS |	\
			      MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK |	\
			      MDS_OPEN_BY_FID | MDS_OPEN_LEASE |	\
			      MDS_OPEN_RELEASE)

enum mds_op_bias {
	MDS_CHECK_SPLIT		= 1 << 0,
	MDS_CROSS_REF		= 1 << 1,
	MDS_VTX_BYPASS		= 1 << 2,
	MDS_PERM_BYPASS		= 1 << 3,
/*	MDS_SOM			= 1 << 4, obsolete since 2.8.0 */
	MDS_QUOTA_IGNORE	= 1 << 5,
	MDS_CLOSE_CLEANUP	= 1 << 6,
	MDS_KEEP_ORPHAN		= 1 << 7,
	MDS_RECOV_OPEN		= 1 << 8,
	MDS_DATA_MODIFIED	= 1 << 9,
	MDS_CREATE_VOLATILE	= 1 << 10,
	MDS_OWNEROVERRIDE	= 1 << 11,
	MDS_HSM_RELEASE		= 1 << 12,
	MDS_RENAME_MIGRATE	= BIT(13),
	MDS_CLOSE_LAYOUT_SWAP   = BIT(14),
};

/* instance of mdt_reint_rec */
struct mdt_rec_create {
	__u32	   cr_opcode;
	__u32	   cr_cap;
	__u32	   cr_fsuid;
	__u32	   cr_fsuid_h;
	__u32	   cr_fsgid;
	__u32	   cr_fsgid_h;
	__u32	   cr_suppgid1;
	__u32	   cr_suppgid1_h;
	__u32	   cr_suppgid2;
	__u32	   cr_suppgid2_h;
	struct lu_fid   cr_fid1;
	struct lu_fid   cr_fid2;
	struct lustre_handle cr_old_handle; /* handle in case of open replay */
	__s64	   cr_time;
	__u64	   cr_rdev;
	__u64	   cr_ioepoch;
	__u64	   cr_padding_1;   /* rr_blocks */
	__u32	   cr_mode;
	__u32	   cr_bias;
	/* use of helpers set/get_mrc_cr_flags() is needed to access
	 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
	 * extend cr_flags size without breaking 1.8 compat
	 */
	__u32	   cr_flags_l;     /* for use with open, low  32 bits  */
	__u32	   cr_flags_h;     /* for use with open, high 32 bits */
	__u32	   cr_umask;       /* umask for create */
	__u32	   cr_padding_4;   /* rr_padding_4 */
};

static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
{
	mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
	mrc->cr_flags_h = (__u32)(flags >> 32);
}

static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
{
	return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
}

/* instance of mdt_reint_rec */
struct mdt_rec_link {
	__u32	   lk_opcode;
	__u32	   lk_cap;
	__u32	   lk_fsuid;
	__u32	   lk_fsuid_h;
	__u32	   lk_fsgid;
	__u32	   lk_fsgid_h;
	__u32	   lk_suppgid1;
	__u32	   lk_suppgid1_h;
	__u32	   lk_suppgid2;
	__u32	   lk_suppgid2_h;
	struct lu_fid   lk_fid1;
	struct lu_fid   lk_fid2;
	__s64	   lk_time;
	__u64	   lk_padding_1;   /* rr_atime */
	__u64	   lk_padding_2;   /* rr_ctime */
	__u64	   lk_padding_3;   /* rr_size */
	__u64	   lk_padding_4;   /* rr_blocks */
	__u32	   lk_bias;
	__u32	   lk_padding_5;   /* rr_mode */
	__u32	   lk_padding_6;   /* rr_flags */
	__u32	   lk_padding_7;   /* rr_padding_2 */
	__u32	   lk_padding_8;   /* rr_padding_3 */
	__u32	   lk_padding_9;   /* rr_padding_4 */
};

/* instance of mdt_reint_rec */
struct mdt_rec_unlink {
	__u32	   ul_opcode;
	__u32	   ul_cap;
	__u32	   ul_fsuid;
	__u32	   ul_fsuid_h;
	__u32	   ul_fsgid;
	__u32	   ul_fsgid_h;
	__u32	   ul_suppgid1;
	__u32	   ul_suppgid1_h;
	__u32	   ul_suppgid2;
	__u32	   ul_suppgid2_h;
	struct lu_fid   ul_fid1;
	struct lu_fid   ul_fid2;
	__s64	   ul_time;
	__u64	   ul_padding_2;   /* rr_atime */
	__u64	   ul_padding_3;   /* rr_ctime */
	__u64	   ul_padding_4;   /* rr_size */
	__u64	   ul_padding_5;   /* rr_blocks */
	__u32	   ul_bias;
	__u32	   ul_mode;
	__u32	   ul_padding_6;   /* rr_flags */
	__u32	   ul_padding_7;   /* rr_padding_2 */
	__u32	   ul_padding_8;   /* rr_padding_3 */
	__u32	   ul_padding_9;   /* rr_padding_4 */
};

/* instance of mdt_reint_rec */
struct mdt_rec_rename {
	__u32	   rn_opcode;
	__u32	   rn_cap;
	__u32	   rn_fsuid;
	__u32	   rn_fsuid_h;
	__u32	   rn_fsgid;
	__u32	   rn_fsgid_h;
	__u32	   rn_suppgid1;
	__u32	   rn_suppgid1_h;
	__u32	   rn_suppgid2;
	__u32	   rn_suppgid2_h;
	struct lu_fid   rn_fid1;
	struct lu_fid   rn_fid2;
	__s64	   rn_time;
	__u64	   rn_padding_1;   /* rr_atime */
	__u64	   rn_padding_2;   /* rr_ctime */
	__u64	   rn_padding_3;   /* rr_size */
	__u64	   rn_padding_4;   /* rr_blocks */
	__u32	   rn_bias;	/* some operation flags */
	__u32	   rn_mode;	/* cross-ref rename has mode */
	__u32	   rn_padding_5;   /* rr_flags */
	__u32	   rn_padding_6;   /* rr_padding_2 */
	__u32	   rn_padding_7;   /* rr_padding_3 */
	__u32	   rn_padding_8;   /* rr_padding_4 */
};

/* instance of mdt_reint_rec */
struct mdt_rec_setxattr {
	__u32	   sx_opcode;
	__u32	   sx_cap;
	__u32	   sx_fsuid;
	__u32	   sx_fsuid_h;
	__u32	   sx_fsgid;
	__u32	   sx_fsgid_h;
	__u32	   sx_suppgid1;
	__u32	   sx_suppgid1_h;
	__u32	   sx_suppgid2;
	__u32	   sx_suppgid2_h;
	struct lu_fid   sx_fid;
	__u64	   sx_padding_1;   /* These three are rr_fid2 */
	__u32	   sx_padding_2;
	__u32	   sx_padding_3;
	__u64	   sx_valid;
	__s64	   sx_time;
	__u64	   sx_padding_5;   /* rr_ctime */
	__u64	   sx_padding_6;   /* rr_size */
	__u64	   sx_padding_7;   /* rr_blocks */
	__u32	   sx_size;
	__u32	   sx_flags;
	__u32	   sx_padding_8;   /* rr_flags */
	__u32	   sx_padding_9;   /* rr_padding_2 */
	__u32	   sx_padding_10;  /* rr_padding_3 */
	__u32	   sx_padding_11;  /* rr_padding_4 */
};

/*
 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
 * Do NOT change the size of various members, otherwise the value
 * will be broken in lustre_swab_mdt_rec_reint().
 *
 * If you add new members in other mdt_reint_xxx structures and need to use the
 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
 */
struct mdt_rec_reint {
	__u32	   rr_opcode;
	__u32	   rr_cap;
	__u32	   rr_fsuid;
	__u32	   rr_fsuid_h;
	__u32	   rr_fsgid;
	__u32	   rr_fsgid_h;
	__u32	   rr_suppgid1;
	__u32	   rr_suppgid1_h;
	__u32	   rr_suppgid2;
	__u32	   rr_suppgid2_h;
	struct lu_fid   rr_fid1;
	struct lu_fid   rr_fid2;
	__s64	   rr_mtime;
	__s64	   rr_atime;
	__s64	   rr_ctime;
	__u64	   rr_size;
	__u64	   rr_blocks;
	__u32	   rr_bias;
	__u32	   rr_mode;
	__u32	   rr_flags;
	__u32	   rr_flags_h;
	__u32	   rr_umask;
	__u32	   rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
};

/* lmv structures */
struct lmv_desc {
	__u32 ld_tgt_count;		/* how many MDS's */
	__u32 ld_active_tgt_count;	 /* how many active */
	__u32 ld_default_stripe_count;     /* how many objects are used */
	__u32 ld_pattern;		  /* default hash pattern */
	__u64 ld_default_hash_size;
	__u64 ld_padding_1;		/* also fix lustre_swab_lmv_desc */
	__u32 ld_padding_2;		/* also fix lustre_swab_lmv_desc */
	__u32 ld_qos_maxage;	       /* in second */
	__u32 ld_padding_3;		/* also fix lustre_swab_lmv_desc */
	__u32 ld_padding_4;		/* also fix lustre_swab_lmv_desc */
	struct obd_uuid ld_uuid;
};

/* LMV layout EA, and it will be stored both in master and slave object */
struct lmv_mds_md_v1 {
	__u32 lmv_magic;
	__u32 lmv_stripe_count;
	__u32 lmv_master_mdt_index;	/* On master object, it is master
					 * MDT index, on slave object, it
					 * is stripe index of the slave obj
					 */
	__u32 lmv_hash_type;		/* dir stripe policy, i.e. indicate
					 * which hash function to be used,
					 * Note: only lower 16 bits is being
					 * used for now. Higher 16 bits will
					 * be used to mark the object status,
					 * for example migrating or dead.
					 */
	__u32 lmv_layout_version;	/* Used for directory restriping */
	__u32 lmv_padding1;
	__u64 lmv_padding2;
	__u64 lmv_padding3;
	char lmv_pool_name[LOV_MAXPOOLNAME + 1];/* pool name */
	struct lu_fid lmv_stripe_fids[0];	/* FIDs for each stripe */
};

#define LMV_MAGIC_V1	 0x0CD20CD0	/* normal stripe lmv magic */
#define LMV_MAGIC	 LMV_MAGIC_V1

/* #define LMV_USER_MAGIC 0x0CD30CD0 */
#define LMV_MAGIC_STRIPE 0x0CD40CD0	/* magic for dir sub_stripe */

/*
 *Right now only the lower part(0-16bits) of lmv_hash_type is being used,
 * and the higher part will be the flag to indicate the status of object,
 * for example the object is being migrated. And the hash function
 * might be interpreted differently with different flags.
 */
#define LMV_HASH_TYPE_MASK		0x0000ffff

#define LMV_HASH_FLAG_MIGRATION		0x80000000
#define LMV_HASH_FLAG_DEAD		0x40000000

/**
 * The FNV-1a hash algorithm is as follows:
 *     hash = FNV_offset_basis
 *     for each octet_of_data to be hashed
 *             hash = hash XOR octet_of_data
 *             hash = hash × FNV_prime
 *     return hash
 * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
 *
 * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source
 * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL
 **/
#define LUSTRE_FNV_1A_64_PRIME		0x100000001b3ULL
#define LUSTRE_FNV_1A_64_OFFSET_BIAS	0xcbf29ce484222325ULL
static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size)
{
	__u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS;
	const unsigned char *p = buf;
	size_t i;

	for (i = 0; i < size; i++) {
		hash ^= p[i];
		hash *= LUSTRE_FNV_1A_64_PRIME;
	}

	return hash;
}

union lmv_mds_md {
	__u32			lmv_magic;
	struct lmv_mds_md_v1	lmv_md_v1;
	struct lmv_user_md	lmv_user_md;
};

static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
{
	ssize_t len = -EINVAL;

	switch (lmm_magic) {
	case LMV_MAGIC_V1: {
		struct lmv_mds_md_v1 *lmm1;

		len = sizeof(*lmm1);
		len += stripe_count * sizeof(lmm1->lmv_stripe_fids[0]);
		break; }
	default:
		break;
	}
	return len;
}

static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm)
{
	switch (le32_to_cpu(lmm->lmv_magic)) {
	case LMV_MAGIC_V1:
		return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count);
	case LMV_USER_MAGIC:
		return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count);
	default:
		return -EINVAL;
	}
}

static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm,
					      unsigned int stripe_count)
{
	int rc = 0;

	switch (le32_to_cpu(lmm->lmv_magic)) {
	case LMV_MAGIC_V1:
		lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count);
		break;
	case LMV_USER_MAGIC:
		lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count);
		break;
	default:
		rc = -EINVAL;
		break;
	}
	return rc;
}

enum fld_rpc_opc {
	FLD_QUERY	= 900,
	FLD_READ	= 901,
	FLD_LAST_OPC,
	FLD_FIRST_OPC	= FLD_QUERY
};

enum seq_rpc_opc {
	SEQ_QUERY		       = 700,
	SEQ_LAST_OPC,
	SEQ_FIRST_OPC		   = SEQ_QUERY
};

enum seq_op {
	SEQ_ALLOC_SUPER = 0,
	SEQ_ALLOC_META = 1
};

enum fld_op {
	FLD_CREATE = 0,
	FLD_DELETE = 1,
	FLD_LOOKUP = 2,
};

/*
 *  LOV data structures
 */

#define LOV_MAX_UUID_BUFFER_SIZE  8192
/* The size of the buffer the lov/mdc reserves for the
 * array of UUIDs returned by the MDS.  With the current
 * protocol, this will limit the max number of OSTs per LOV
 */

#define LOV_DESC_MAGIC 0xB0CCDE5C
#define LOV_DESC_QOS_MAXAGE_DEFAULT 5  /* Seconds */
#define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)

/* LOV settings descriptor (should only contain static info) */
struct lov_desc {
	__u32 ld_tgt_count;		/* how many OBD's */
	__u32 ld_active_tgt_count;	/* how many active */
	__u32 ld_default_stripe_count;  /* how many objects are used */
	__u32 ld_pattern;		/* default PATTERN_RAID0 */
	__u64 ld_default_stripe_size;   /* in bytes */
	__u64 ld_default_stripe_offset; /* in bytes */
	__u32 ld_padding_0;		/* unused */
	__u32 ld_qos_maxage;		/* in second */
	__u32 ld_padding_1;		/* also fix lustre_swab_lov_desc */
	__u32 ld_padding_2;		/* also fix lustre_swab_lov_desc */
	struct obd_uuid ld_uuid;
};

#define ld_magic ld_active_tgt_count       /* for swabbing from llogs */

/*
 *   LDLM requests:
 */
/* opcodes -- MUST be distinct from OST/MDS opcodes */
enum ldlm_cmd {
	LDLM_ENQUEUE     = 101,
	LDLM_CONVERT     = 102,
	LDLM_CANCEL      = 103,
	LDLM_BL_CALLBACK = 104,
	LDLM_CP_CALLBACK = 105,
	LDLM_GL_CALLBACK = 106,
	LDLM_SET_INFO    = 107,
	LDLM_LAST_OPC
};
#define LDLM_FIRST_OPC LDLM_ENQUEUE

#define RES_NAME_SIZE 4
struct ldlm_res_id {
	__u64 name[RES_NAME_SIZE];
};

#define DLDLMRES	"[%#llx:%#llx:%#llx].%llx"
#define PLDLMRES(res)	(res)->lr_name.name[0], (res)->lr_name.name[1], \
			(res)->lr_name.name[2], (res)->lr_name.name[3]

static inline bool ldlm_res_eq(const struct ldlm_res_id *res0,
			       const struct ldlm_res_id *res1)
{
	return !memcmp(res0, res1, sizeof(*res0));
}

/* lock types */
enum ldlm_mode {
	LCK_MINMODE = 0,
	LCK_EX      = 1,
	LCK_PW      = 2,
	LCK_PR      = 4,
	LCK_CW      = 8,
	LCK_CR      = 16,
	LCK_NL      = 32,
	LCK_GROUP   = 64,
	LCK_COS     = 128,
	LCK_MAXMODE
};

#define LCK_MODE_NUM    8

enum ldlm_type {
	LDLM_PLAIN     = 10,
	LDLM_EXTENT    = 11,
	LDLM_FLOCK     = 12,
	LDLM_IBITS     = 13,
	LDLM_MAX_TYPE
};

#define LDLM_MIN_TYPE LDLM_PLAIN

struct ldlm_extent {
	__u64 start;
	__u64 end;
	__u64 gid;
};

static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
				      const struct ldlm_extent *ex2)
{
	return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
}

/* check if @ex1 contains @ex2 */
static inline int ldlm_extent_contain(const struct ldlm_extent *ex1,
				      const struct ldlm_extent *ex2)
{
	return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
}

struct ldlm_inodebits {
	__u64 bits;
};

struct ldlm_flock_wire {
	__u64 lfw_start;
	__u64 lfw_end;
	__u64 lfw_owner;
	__u32 lfw_padding;
	__u32 lfw_pid;
};

/* it's important that the fields of the ldlm_extent structure match
 * the first fields of the ldlm_flock structure because there is only
 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
 * this ever changes we will need to swab the union differently based
 * on the resource type.
 */

union ldlm_wire_policy_data {
	struct ldlm_extent l_extent;
	struct ldlm_flock_wire l_flock;
	struct ldlm_inodebits l_inodebits;
};

union ldlm_gl_desc {
	struct ldlm_gl_lquota_desc	lquota_desc;
};

enum ldlm_intent_flags {
	IT_OPEN		= BIT(0),
	IT_CREAT	= BIT(1),
	IT_OPEN_CREAT	= BIT(1) | BIT(0),
	IT_READDIR	= BIT(2),
	IT_GETATTR	= BIT(3),
	IT_LOOKUP	= BIT(4),
	IT_UNLINK	= BIT(5),
	IT_TRUNC	= BIT(6),
	IT_GETXATTR	= BIT(7),
	IT_EXEC		= BIT(8),
	IT_PIN		= BIT(9),
	IT_LAYOUT	= BIT(10),
	IT_QUOTA_DQACQ	= BIT(11),
	IT_QUOTA_CONN	= BIT(12),
	IT_SETXATTR	= BIT(13),
};

struct ldlm_intent {
	__u64 opc;
};

struct ldlm_resource_desc {
	enum ldlm_type lr_type;
	__u32 lr_padding;       /* also fix lustre_swab_ldlm_resource_desc */
	struct ldlm_res_id lr_name;
};

struct ldlm_lock_desc {
	struct ldlm_resource_desc l_resource;
	enum ldlm_mode l_req_mode;
	enum ldlm_mode l_granted_mode;
	union ldlm_wire_policy_data l_policy_data;
};

#define LDLM_LOCKREQ_HANDLES 2
#define LDLM_ENQUEUE_CANCEL_OFF 1

struct ldlm_request {
	__u32 lock_flags;
	__u32 lock_count;
	struct ldlm_lock_desc lock_desc;
	struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
};

/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
 * Otherwise, 2 are available.
 */
#define ldlm_request_bufsize(count, type)				\
({								      \
	int _avail = LDLM_LOCKREQ_HANDLES;			      \
	_avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
	sizeof(struct ldlm_request) +				   \
	(count > _avail ? count - _avail : 0) *			 \
	sizeof(struct lustre_handle);				   \
})

struct ldlm_reply {
	__u32 lock_flags;
	__u32 lock_padding;     /* also fix lustre_swab_ldlm_reply */
	struct ldlm_lock_desc lock_desc;
	struct lustre_handle lock_handle;
	__u64  lock_policy_res1;
	__u64  lock_policy_res2;
};

#define ldlm_flags_to_wire(flags)    ((__u32)(flags))
#define ldlm_flags_from_wire(flags)  ((__u64)(flags))

/*
 * Opcodes for mountconf (mgs and mgc)
 */
enum mgs_cmd {
	MGS_CONNECT = 250,
	MGS_DISCONNECT,
	MGS_EXCEPTION,	 /* node died, etc. */
	MGS_TARGET_REG,	/* whenever target starts up */
	MGS_TARGET_DEL,
	MGS_SET_INFO,
	MGS_CONFIG_READ,
	MGS_LAST_OPC
};
#define MGS_FIRST_OPC MGS_CONNECT

#define MGS_PARAM_MAXLEN 1024
#define KEY_SET_INFO "set_info"

struct mgs_send_param {
	char	     mgs_param[MGS_PARAM_MAXLEN];
};

/* We pass this info to the MGS so it can write config logs */
#define MTI_NAME_MAXLEN  64
#define MTI_PARAM_MAXLEN 4096
#define MTI_NIDS_MAX     32
struct mgs_target_info {
	__u32	    mti_lustre_ver;
	__u32	    mti_stripe_index;
	__u32	    mti_config_ver;
	__u32	    mti_flags;
	__u32	    mti_nid_count;
	__u32	    mti_instance; /* Running instance of target */
	char	     mti_fsname[MTI_NAME_MAXLEN];
	char	     mti_svname[MTI_NAME_MAXLEN];
	char	     mti_uuid[sizeof(struct obd_uuid)];
	__u64	    mti_nids[MTI_NIDS_MAX];     /* host nids (lnet_nid_t)*/
	char	     mti_params[MTI_PARAM_MAXLEN];
};

struct mgs_nidtbl_entry {
	__u64	   mne_version;    /* table version of this entry */
	__u32	   mne_instance;   /* target instance # */
	__u32	   mne_index;      /* target index */
	__u32	   mne_length;     /* length of this entry - by bytes */
	__u8	    mne_type;       /* target type LDD_F_SV_TYPE_OST/MDT */
	__u8	    mne_nid_type;   /* type of nid(mbz). for ipv6. */
	__u8	    mne_nid_size;   /* size of each NID, by bytes */
	__u8	    mne_nid_count;  /* # of NIDs in buffer */
	union {
		lnet_nid_t nids[0];     /* variable size buffer for NIDs. */
	} u;
};

struct mgs_config_body {
	char     mcb_name[MTI_NAME_MAXLEN]; /* logname */
	__u64    mcb_offset;    /* next index of config log to request */
	__u16    mcb_type;      /* type of log: CONFIG_T_[CONFIG|RECOVER] */
	__u8     mcb_reserved;
	__u8     mcb_bits;      /* bits unit size of config log */
	__u32    mcb_units;     /* # of units for bulk transfer */
};

struct mgs_config_res {
	__u64    mcr_offset;    /* index of last config log */
	__u64    mcr_size;      /* size of the log */
};

/* Config marker flags (in config log) */
#define CM_START       0x01
#define CM_END	 0x02
#define CM_SKIP	0x04
#define CM_UPGRADE146  0x08
#define CM_EXCLUDE     0x10
#define CM_START_SKIP (CM_START | CM_SKIP)

struct cfg_marker {
	__u32	     cm_step;       /* aka config version */
	__u32	     cm_flags;
	__u32	     cm_vers;       /* lustre release version number */
	__u32	     cm_padding;    /* 64 bit align */
	__s64	     cm_createtime; /*when this record was first created */
	__s64	     cm_canceltime; /*when this record is no longer valid*/
	char	      cm_tgtname[MTI_NAME_MAXLEN];
	char	      cm_comment[MTI_NAME_MAXLEN];
};

/*
 * Opcodes for multiple servers.
 */

enum obd_cmd {
	OBD_PING = 400,
	OBD_LOG_CANCEL,
	OBD_QC_CALLBACK, /* not used since 2.4 */
	OBD_IDX_READ,
	OBD_LAST_OPC
};
#define OBD_FIRST_OPC OBD_PING

/**
 * llog contexts indices.
 *
 * There is compatibility problem with indexes below, they are not
 * continuous and must keep their numbers for compatibility needs.
 * See LU-5218 for details.
 */
enum llog_ctxt_id {
	LLOG_CONFIG_ORIG_CTXT  =  0,
	LLOG_CONFIG_REPL_CTXT = 1,
	LLOG_MDS_OST_ORIG_CTXT = 2,
	LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */
	LLOG_SIZE_ORIG_CTXT = 4,
	LLOG_SIZE_REPL_CTXT = 5,
	LLOG_TEST_ORIG_CTXT = 8,
	LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */
	LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */
	LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */
	/* for multiple changelog consumers */
	LLOG_CHANGELOG_USER_ORIG_CTXT = 14,
	LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */
	LLOG_MAX_CTXTS
};

/** Identifier for a single log object */
struct llog_logid {
	struct ost_id		lgl_oi;
	__u32		   lgl_ogen;
} __packed;

/** Records written to the CATALOGS list */
#define CATLIST "CATALOGS"
struct llog_catid {
	struct llog_logid       lci_logid;
	__u32		   lci_padding1;
	__u32		   lci_padding2;
	__u32		   lci_padding3;
} __packed;

/* Log data record types - there is no specific reason that these need to
 * be related to the RPC opcodes, but no reason not to (may be handy later?)
 */
#define LLOG_OP_MAGIC 0x10600000
#define LLOG_OP_MASK  0xfff00000

enum llog_op_type {
	LLOG_PAD_MAGIC		= LLOG_OP_MAGIC | 0x00000,
	OST_SZ_REC		= LLOG_OP_MAGIC | 0x00f00,
	/* OST_RAID1_REC	= LLOG_OP_MAGIC | 0x01000, never used */
	MDS_UNLINK_REC		= LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
				  REINT_UNLINK, /* obsolete after 2.5.0 */
	MDS_UNLINK64_REC	= LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
				  REINT_UNLINK,
	/* MDS_SETATTR_REC	= LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
	MDS_SETATTR64_REC	= LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
				  REINT_SETATTR,
	OBD_CFG_REC		= LLOG_OP_MAGIC | 0x20000,
	/* PTL_CFG_REC		= LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
	LLOG_GEN_REC		= LLOG_OP_MAGIC | 0x40000,
	/* LLOG_JOIN_REC	= LLOG_OP_MAGIC | 0x50000, obsolete  1.8.0 */
	CHANGELOG_REC		= LLOG_OP_MAGIC | 0x60000,
	CHANGELOG_USER_REC	= LLOG_OP_MAGIC | 0x70000,
	HSM_AGENT_REC		= LLOG_OP_MAGIC | 0x80000,
	LLOG_HDR_MAGIC		= LLOG_OP_MAGIC | 0x45539,
	LLOG_LOGID_MAGIC	= LLOG_OP_MAGIC | 0x4553b,
};

#define LLOG_REC_HDR_NEEDS_SWABBING(r) \
	(((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))

/** Log record header - stored in little endian order.
 * Each record must start with this struct, end with a llog_rec_tail,
 * and be a multiple of 256 bits in size.
 */
struct llog_rec_hdr {
	__u32	lrh_len;
	__u32	lrh_index;
	__u32	lrh_type;
	__u32	lrh_id;
};

struct llog_rec_tail {
	__u32	lrt_len;
	__u32	lrt_index;
};

/* Where data follow just after header */
#define REC_DATA(ptr)						\
	((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))

#define REC_DATA_LEN(rec)					\
	(rec->lrh_len - sizeof(struct llog_rec_hdr) -		\
	 sizeof(struct llog_rec_tail))

struct llog_logid_rec {
	struct llog_rec_hdr	lid_hdr;
	struct llog_logid	lid_id;
	__u32			lid_padding1;
	__u64			lid_padding2;
	__u64			lid_padding3;
	struct llog_rec_tail	lid_tail;
} __packed;

struct llog_unlink_rec {
	struct llog_rec_hdr	lur_hdr;
	__u64			lur_oid;
	__u32			lur_oseq;
	__u32			lur_count;
	struct llog_rec_tail	lur_tail;
} __packed;

struct llog_unlink64_rec {
	struct llog_rec_hdr	lur_hdr;
	struct lu_fid		lur_fid;
	__u32			lur_count; /* to destroy the lost precreated */
	__u32			lur_padding1;
	__u64			lur_padding2;
	__u64			lur_padding3;
	struct llog_rec_tail    lur_tail;
} __packed;

struct llog_setattr64_rec {
	struct llog_rec_hdr	lsr_hdr;
	struct ost_id		lsr_oi;
	__u32			lsr_uid;
	__u32			lsr_uid_h;
	__u32			lsr_gid;
	__u32			lsr_gid_h;
	__u64			lsr_valid;
	struct llog_rec_tail    lsr_tail;
} __packed;

struct llog_size_change_rec {
	struct llog_rec_hdr	lsc_hdr;
	struct ll_fid		lsc_fid;
	__u32			lsc_ioepoch;
	__u32			lsc_padding1;
	__u64			lsc_padding2;
	__u64			lsc_padding3;
	struct llog_rec_tail	lsc_tail;
} __packed;

/* changelog llog name, needed by client replicators */
#define CHANGELOG_CATALOG "changelog_catalog"

struct changelog_setinfo {
	__u64 cs_recno;
	__u32 cs_id;
} __packed;

/** changelog record */
struct llog_changelog_rec {
	struct llog_rec_hdr	cr_hdr;
	struct changelog_rec	cr;		/**< Variable length field */
	struct llog_rec_tail	cr_do_not_use;	/**< for_sizezof_only */
} __packed;

struct llog_changelog_user_rec {
	struct llog_rec_hdr   cur_hdr;
	__u32		 cur_id;
	__u32		 cur_padding;
	__u64		 cur_endrec;
	struct llog_rec_tail  cur_tail;
} __packed;

enum agent_req_status {
	ARS_WAITING,
	ARS_STARTED,
	ARS_FAILED,
	ARS_CANCELED,
	ARS_SUCCEED,
};

static inline const char *agent_req_status2name(const enum agent_req_status ars)
{
	switch (ars) {
	case ARS_WAITING:
		return "WAITING";
	case ARS_STARTED:
		return "STARTED";
	case ARS_FAILED:
		return "FAILED";
	case ARS_CANCELED:
		return "CANCELED";
	case ARS_SUCCEED:
		return "SUCCEED";
	default:
		return "UNKNOWN";
	}
}

static inline bool agent_req_in_final_state(enum agent_req_status ars)
{
	return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
		(ars == ARS_CANCELED));
}

struct llog_agent_req_rec {
	struct llog_rec_hdr	arr_hdr;	/**< record header */
	__u32			arr_status;	/**< status of the request */
						/* must match enum
						 * agent_req_status
						 */
	__u32			arr_archive_id;	/**< backend archive number */
	__u64			arr_flags;	/**< req flags */
	__u64			arr_compound_id;/**< compound cookie */
	__u64			arr_req_create;	/**< req. creation time */
	__u64			arr_req_change;	/**< req. status change time */
	struct hsm_action_item	arr_hai;	/**< req. to the agent */
	struct llog_rec_tail	arr_tail;   /**< record tail for_sizezof_only */
} __packed;

/* Old llog gen for compatibility */
struct llog_gen {
	__u64 mnt_cnt;
	__u64 conn_cnt;
} __packed;

struct llog_gen_rec {
	struct llog_rec_hdr	lgr_hdr;
	struct llog_gen		lgr_gen;
	__u64			padding1;
	__u64			padding2;
	__u64			padding3;
	struct llog_rec_tail	lgr_tail;
};

/* flags for the logs */
enum llog_flag {
	LLOG_F_ZAP_WHEN_EMPTY	= 0x1,
	LLOG_F_IS_CAT		= 0x2,
	LLOG_F_IS_PLAIN		= 0x4,
	LLOG_F_EXT_JOBID        = BIT(3),
	LLOG_F_IS_FIXSIZE	= BIT(4),

	/*
	 * Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
	 * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
	 * because the catlog record is usually fixed size, but its plain
	 * log record can be variable
	 */
	LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
};

/* On-disk header structure of each log object, stored in little endian order */
#define LLOG_MIN_CHUNK_SIZE	8192
#define LLOG_HEADER_SIZE	(96)	/* sizeof (llog_log_hdr) +
					 * sizeof(llh_tail) - sizeof(llh_bitmap)
					 */
#define LLOG_BITMAP_BYTES	(LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
#define LLOG_MIN_REC_SIZE	(24)	/* round(llog_rec_hdr + llog_rec_tail) */

/* flags for the logs */
struct llog_log_hdr {
	struct llog_rec_hdr     llh_hdr;
	__s64		   llh_timestamp;
	__u32		   llh_count;
	__u32		   llh_bitmap_offset;
	__u32		   llh_size;
	__u32		   llh_flags;
	__u32		   llh_cat_idx;
	/* for a catalog the first plain slot is next to it */
	struct obd_uuid	 llh_tgtuuid;
	__u32		   llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
	/* These fields must always be at the end of the llog_log_hdr.
	 * Note: llh_bitmap size is variable because llog chunk size could be
	 * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192
	 * bytes, and the real size is stored in llh_hdr.lrh_len, which means
	 * llh_tail should only be referred by LLOG_HDR_TAIL().
	 * But this structure is also used by client/server llog interface
	 * (see llog_client.c), it will be kept in its original way to avoid
	 * compatibility issue.
	 */
	__u32		   llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
	struct llog_rec_tail    llh_tail;
} __packed;

#undef LLOG_HEADER_SIZE
#undef LLOG_BITMAP_BYTES

#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len -	\
					   llh->llh_bitmap_offset -	\
					   sizeof(llh->llh_tail)) * 8)
#define LLOG_HDR_BITMAP(llh)	(__u32 *)((char *)(llh) +		\
					  (llh)->llh_bitmap_offset)
#define LLOG_HDR_TAIL(llh)	((struct llog_rec_tail *)((char *)llh + \
							 llh->llh_hdr.lrh_len - \
							 sizeof(llh->llh_tail)))

/** log cookies are used to reference a specific log file and a record
 * therein
 */
struct llog_cookie {
	struct llog_logid       lgc_lgl;
	__u32		   lgc_subsys;
	__u32		   lgc_index;
	__u32		   lgc_padding;
} __packed;

/** llog protocol */
enum llogd_rpc_ops {
	LLOG_ORIGIN_HANDLE_CREATE       = 501,
	LLOG_ORIGIN_HANDLE_NEXT_BLOCK   = 502,
	LLOG_ORIGIN_HANDLE_READ_HEADER  = 503,
	LLOG_ORIGIN_HANDLE_WRITE_REC    = 504,
	LLOG_ORIGIN_HANDLE_CLOSE	= 505,
	LLOG_ORIGIN_CONNECT		= 506,
	LLOG_CATINFO			= 507,  /* deprecated */
	LLOG_ORIGIN_HANDLE_PREV_BLOCK   = 508,
	LLOG_ORIGIN_HANDLE_DESTROY      = 509,  /* for destroy llog object*/
	LLOG_LAST_OPC,
	LLOG_FIRST_OPC		  = LLOG_ORIGIN_HANDLE_CREATE
};

struct llogd_body {
	struct llog_logid  lgd_logid;
	__u32 lgd_ctxt_idx;
	__u32 lgd_llh_flags;
	__u32 lgd_index;
	__u32 lgd_saved_index;
	__u32 lgd_len;
	__u64 lgd_cur_offset;
} __packed;

struct llogd_conn_body {
	struct llog_gen	 lgdc_gen;
	struct llog_logid       lgdc_logid;
	__u32		   lgdc_ctxt_idx;
} __packed;

/* Note: 64-bit types are 64-bit aligned in structure */
struct obdo {
	__u64		o_valid;	/* hot fields in this obdo */
	struct ost_id	o_oi;
	__u64		o_parent_seq;
	__u64		o_size;	 /* o_size-o_blocks == ost_lvb */
	__s64		o_mtime;
	__s64		o_atime;
	__s64		o_ctime;
	__u64		o_blocks;       /* brw: cli sent cached bytes */
	__u64		o_grant;

	/* 32-bit fields start here: keep an even number of them via padding */
	__u32		o_blksize;      /* optimal IO blocksize */
	__u32		o_mode;	 /* brw: cli sent cache remain */
	__u32		o_uid;
	__u32		o_gid;
	__u32		o_flags;
	__u32		o_nlink;	/* brw: checksum */
	__u32		o_parent_oid;
	__u32		o_misc;		/* brw: o_dropped */

	__u64		   o_ioepoch;      /* epoch in ost writes */
	__u32		   o_stripe_idx;   /* holds stripe idx */
	__u32		   o_parent_ver;
	struct lustre_handle    o_handle;  /* brw: lock handle to prolong locks
					    */
	struct llog_cookie      o_lcookie; /* destroy: unlink cookie from MDS,
					    * obsolete in 2.8, reused in OSP
					    */
	__u32			o_uid_h;
	__u32			o_gid_h;

	__u64			o_data_version; /* getattr: sum of iversion for
						 * each stripe.
						 * brw: grant space consumed on
						 * the client for the write
						 */
	__u64			o_padding_4;
	__u64			o_padding_5;
	__u64			o_padding_6;
};

#define o_dirty   o_blocks
#define o_undirty o_mode
#define o_dropped o_misc
#define o_cksum   o_nlink
#define o_grant_used o_data_version

static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd,
					struct obdo *wobdo,
					const struct obdo *lobdo)
{
	*wobdo = *lobdo;
	wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
	if (!ocd)
		return;

	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
	    fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
		/* Currently OBD_FL_OSTID will only be used when 2.4 echo
		 * client communicate with pre-2.4 server
		 */
		wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
		wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
	}
}

static inline void lustre_get_wire_obdo(const struct obd_connect_data *ocd,
					struct obdo *lobdo,
					const struct obdo *wobdo)
{
	__u32 local_flags = 0;

	if (lobdo->o_valid & OBD_MD_FLFLAGS)
		local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;

	*lobdo = *wobdo;
	if (local_flags != 0) {
		lobdo->o_valid |= OBD_MD_FLFLAGS;
		lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
		lobdo->o_flags |= local_flags;
	}
	if (!ocd)
		return;

	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
	    fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
		/* see above */
		lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
		lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
		lobdo->o_oi.oi_fid.f_ver = 0;
	}
}

/* request structure for OST's */
struct ost_body {
	struct  obdo oa;
};

/* Key for FIEMAP to be used in get_info calls */
struct ll_fiemap_info_key {
	char		lfik_name[8];
	struct obdo	lfik_oa;
	struct fiemap	lfik_fiemap;
};

/* Functions for dumping PTLRPC fields */
void dump_rniobuf(struct niobuf_remote *rnb);
void dump_ioo(struct obd_ioobj *nb);
void dump_ost_body(struct ost_body *ob);
void dump_rcs(__u32 *rc);

/* security opcodes */
enum sec_cmd {
	SEC_CTX_INIT	    = 801,
	SEC_CTX_INIT_CONT       = 802,
	SEC_CTX_FINI	    = 803,
	SEC_LAST_OPC,
	SEC_FIRST_OPC	   = SEC_CTX_INIT
};

/*
 * capa related definitions
 */
#define CAPA_HMAC_MAX_LEN       64
#define CAPA_HMAC_KEY_MAX_LEN   56

/* NB take care when changing the sequence of elements this struct,
 * because the offset info is used in find_capa()
 */
struct lustre_capa {
	struct lu_fid   lc_fid;	 /** fid */
	__u64	   lc_opc;	 /** operations allowed */
	__u64	   lc_uid;	 /** file owner */
	__u64	   lc_gid;	 /** file group */
	__u32	   lc_flags;       /** HMAC algorithm & flags */
	__u32	   lc_keyid;       /** key# used for the capability */
	__u32	   lc_timeout;     /** capa timeout value (sec) */
/* FIXME: y2038 time_t overflow: */
	__u32	   lc_expiry;      /** expiry time (sec) */
	__u8	    lc_hmac[CAPA_HMAC_MAX_LEN];   /** HMAC */
} __packed;

/** lustre_capa::lc_opc */
enum {
	CAPA_OPC_BODY_WRITE   = 1 << 0,  /**< write object data */
	CAPA_OPC_BODY_READ    = 1 << 1,  /**< read object data */
	CAPA_OPC_INDEX_LOOKUP = 1 << 2,  /**< lookup object fid */
	CAPA_OPC_INDEX_INSERT = 1 << 3,  /**< insert object fid */
	CAPA_OPC_INDEX_DELETE = 1 << 4,  /**< delete object fid */
	CAPA_OPC_OSS_WRITE    = 1 << 5,  /**< write oss object data */
	CAPA_OPC_OSS_READ     = 1 << 6,  /**< read oss object data */
	CAPA_OPC_OSS_TRUNC    = 1 << 7,  /**< truncate oss object */
	CAPA_OPC_OSS_DESTROY  = 1 << 8,  /**< destroy oss object */
	CAPA_OPC_META_WRITE   = 1 << 9,  /**< write object meta data */
	CAPA_OPC_META_READ    = 1 << 10, /**< read object meta data */
};

#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
#define CAPA_OPC_MDS_ONLY						   \
	(CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
	 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
#define CAPA_OPC_OSS_ONLY						   \
	(CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC |      \
	 CAPA_OPC_OSS_DESTROY)
#define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
#define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)

struct lustre_capa_key {
	__u64   lk_seq;       /**< mds# */
	__u32   lk_keyid;     /**< key# */
	__u32   lk_padding;
	__u8    lk_key[CAPA_HMAC_KEY_MAX_LEN];    /**< key */
} __packed;

/** The link ea holds 1 \a link_ea_entry for each hardlink */
#define LINK_EA_MAGIC 0x11EAF1DFUL
struct link_ea_header {
	__u32 leh_magic;
	__u32 leh_reccount;
	__u64 leh_len;      /* total size */
	/* future use */
	__u32 padding1;
	__u32 padding2;
};

/** Hardlink data is name and parent fid.
 * Stored in this crazy struct for maximum packing and endian-neutrality
 */
struct link_ea_entry {
	/** __u16 stored big-endian, unaligned */
	unsigned char      lee_reclen[2];
	unsigned char      lee_parent_fid[sizeof(struct lu_fid)];
	char	       lee_name[0];
} __packed;

/** fid2path request/reply structure */
struct getinfo_fid2path {
	struct lu_fid   gf_fid;
	__u64	   gf_recno;
	__u32	   gf_linkno;
	__u32	   gf_pathlen;
	char	    gf_path[0];
} __packed;

/** path2parent request/reply structures */
struct getparent {
	struct lu_fid	gp_fid;		/**< parent FID */
	__u32		gp_linkno;	/**< hardlink number */
	__u32		gp_name_size;	/**< size of the name field */
	char		gp_name[0];	/**< zero-terminated link name */
} __packed;

enum {
	LAYOUT_INTENT_ACCESS    = 0,
	LAYOUT_INTENT_READ      = 1,
	LAYOUT_INTENT_WRITE     = 2,
	LAYOUT_INTENT_GLIMPSE   = 3,
	LAYOUT_INTENT_TRUNC     = 4,
	LAYOUT_INTENT_RELEASE   = 5,
	LAYOUT_INTENT_RESTORE   = 6
};

/* enqueue layout lock with intent */
struct layout_intent {
	__u32 li_opc; /* intent operation for enqueue, read, write etc */
	__u32 li_flags;
	__u64 li_start;
	__u64 li_end;
};

/**
 * On the wire version of hsm_progress structure.
 *
 * Contains the userspace hsm_progress and some internal fields.
 */
struct hsm_progress_kernel {
	/* Field taken from struct hsm_progress */
	struct lu_fid		hpk_fid;
	__u64			hpk_cookie;
	struct hsm_extent	hpk_extent;
	__u16			hpk_flags;
	__u16			hpk_errval; /* positive val */
	__u32			hpk_padding1;
	/* Additional fields */
	__u64			hpk_data_version;
	__u64			hpk_padding2;
} __packed;

/** layout swap request structure
 * fid1 and fid2 are in mdt_body
 */
struct mdc_swap_layouts {
	__u64	   msl_flags;
} __packed;

struct close_data {
	struct lustre_handle	cd_handle;
	struct lu_fid		cd_fid;
	__u64			cd_data_version;
	__u64			cd_reserved[8];
};

#endif
/** @} lustreidl */