summaryrefslogtreecommitdiffstats
path: root/dom/webgpu/ffi/wgpu_ffi_generated.h
blob: 80cc69d6ad604ae2c7d4a83b1e196266ba885301 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

/* Generated with cbindgen:0.15.0 */

/* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen.
 * To generate this file:
 *   1. Get the latest cbindgen using `cargo install --force cbindgen`
 *      a. Alternatively, you can clone `https://github.com/eqrion/cbindgen` and use a tagged release
 *   2. Run `rustup run nightly cbindgen toolkit/library/rust/ --lockfile Cargo.lock --crate wgpu_bindings -o dom/webgpu/ffi/wgpu_ffi_generated.h`
 */

struct WGPUByteBuf;
typedef uint64_t WGPUNonZeroU64;
typedef uint64_t WGPUOption_BufferSize;
typedef uint32_t WGPUOption_NonZeroU32;
typedef uint8_t WGPUOption_NonZeroU8;
typedef uint64_t WGPUOption_AdapterId;
typedef uint64_t WGPUOption_BufferId;
typedef uint64_t WGPUOption_PipelineLayoutId;
typedef uint64_t WGPUOption_SamplerId;
typedef uint64_t WGPUOption_SurfaceId;
typedef uint64_t WGPUOption_TextureViewId;


#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>

#define WGPUMAX_BIND_GROUPS 8

#define WGPUMAX_COLOR_TARGETS 4

#define WGPUMAX_MIP_LEVELS 16

#define WGPUMAX_VERTEX_BUFFERS 16

#define WGPUMAX_ANISOTROPY 16

#define WGPUSHADER_STAGE_COUNT 3

#define WGPUDESIRED_NUM_FRAMES 3

/**
 * Buffer-Texture copies must have [`bytes_per_row`] aligned to this number.
 *
 * This doesn't apply to [`Queue::write_texture`].
 *
 * [`bytes_per_row`]: TextureDataLayout::bytes_per_row
 */
#define WGPUCOPY_BYTES_PER_ROW_ALIGNMENT 256

/**
 * Alignment all push constants need
 */
#define WGPUPUSH_CONSTANT_ALIGNMENT 4

/**
 * How edges should be handled in texture addressing.
 */
enum WGPUAddressMode {
  /**
   * Clamp the value to the edge of the texture
   *
   * -0.25 -> 0.0
   * 1.25  -> 1.0
   */
  WGPUAddressMode_ClampToEdge = 0,
  /**
   * Repeat the texture in a tiling fashion
   *
   * -0.25 -> 0.75
   * 1.25 -> 0.25
   */
  WGPUAddressMode_Repeat = 1,
  /**
   * Repeat the texture, mirroring it every repeat
   *
   * -0.25 -> 0.25
   * 1.25 -> 0.75
   */
  WGPUAddressMode_MirrorRepeat = 2,
  /**
   * Clamp the value to the border of the texture
   * Requires feature [`Features::ADDRESS_MODE_CLAMP_TO_BORDER`]
   *
   * -0.25 -> border
   * 1.25 -> border
   */
  WGPUAddressMode_ClampToBorder = 3,
  /**
   * Must be last for serialization purposes
   */
  WGPUAddressMode_Sentinel,
};

/**
 * Alpha blend factor.
 *
 * Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information.
 */
enum WGPUBlendFactor {
  WGPUBlendFactor_Zero = 0,
  WGPUBlendFactor_One = 1,
  WGPUBlendFactor_SrcColor = 2,
  WGPUBlendFactor_OneMinusSrcColor = 3,
  WGPUBlendFactor_SrcAlpha = 4,
  WGPUBlendFactor_OneMinusSrcAlpha = 5,
  WGPUBlendFactor_DstColor = 6,
  WGPUBlendFactor_OneMinusDstColor = 7,
  WGPUBlendFactor_DstAlpha = 8,
  WGPUBlendFactor_OneMinusDstAlpha = 9,
  WGPUBlendFactor_SrcAlphaSaturated = 10,
  WGPUBlendFactor_BlendColor = 11,
  WGPUBlendFactor_OneMinusBlendColor = 12,
  /**
   * Must be last for serialization purposes
   */
  WGPUBlendFactor_Sentinel,
};

/**
 * Alpha blend operation.
 *
 * Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information.
 */
enum WGPUBlendOperation {
  WGPUBlendOperation_Add = 0,
  WGPUBlendOperation_Subtract = 1,
  WGPUBlendOperation_ReverseSubtract = 2,
  WGPUBlendOperation_Min = 3,
  WGPUBlendOperation_Max = 4,
  /**
   * Must be last for serialization purposes
   */
  WGPUBlendOperation_Sentinel,
};

enum WGPUBufferMapAsyncStatus {
  WGPUBufferMapAsyncStatus_Success,
  WGPUBufferMapAsyncStatus_Error,
  WGPUBufferMapAsyncStatus_Unknown,
  WGPUBufferMapAsyncStatus_ContextLost,
  /**
   * Must be last for serialization purposes
   */
  WGPUBufferMapAsyncStatus_Sentinel,
};

/**
 * Comparison function used for depth and stencil operations.
 */
enum WGPUCompareFunction {
  /**
   * Function never passes
   */
  WGPUCompareFunction_Never = 1,
  /**
   * Function passes if new value less than existing value
   */
  WGPUCompareFunction_Less = 2,
  /**
   * Function passes if new value is equal to existing value
   */
  WGPUCompareFunction_Equal = 3,
  /**
   * Function passes if new value is less than or equal to existing value
   */
  WGPUCompareFunction_LessEqual = 4,
  /**
   * Function passes if new value is greater than existing value
   */
  WGPUCompareFunction_Greater = 5,
  /**
   * Function passes if new value is not equal to existing value
   */
  WGPUCompareFunction_NotEqual = 6,
  /**
   * Function passes if new value is greater than or equal to existing value
   */
  WGPUCompareFunction_GreaterEqual = 7,
  /**
   * Function always passes
   */
  WGPUCompareFunction_Always = 8,
  /**
   * Must be last for serialization purposes
   */
  WGPUCompareFunction_Sentinel,
};

/**
 * Type of faces to be culled.
 */
enum WGPUCullMode {
  /**
   * No faces should be culled
   */
  WGPUCullMode_None = 0,
  /**
   * Front faces should be culled
   */
  WGPUCullMode_Front = 1,
  /**
   * Back faces should be culled
   */
  WGPUCullMode_Back = 2,
  /**
   * Must be last for serialization purposes
   */
  WGPUCullMode_Sentinel,
};

/**
 * Texel mixing mode when sampling between texels.
 */
enum WGPUFilterMode {
  /**
   * Nearest neighbor sampling.
   *
   * This creates a pixelated effect when used as a mag filter
   */
  WGPUFilterMode_Nearest = 0,
  /**
   * Linear Interpolation
   *
   * This makes textures smooth but blurry when used as a mag filter.
   */
  WGPUFilterMode_Linear = 1,
  /**
   * Must be last for serialization purposes
   */
  WGPUFilterMode_Sentinel,
};

/**
 * Winding order which classifies the "front" face.
 */
enum WGPUFrontFace {
  /**
   * Triangles with vertices in counter clockwise order are considered the front face.
   *
   * This is the default with right handed coordinate spaces.
   */
  WGPUFrontFace_Ccw = 0,
  /**
   * Triangles with vertices in clockwise order are considered the front face.
   *
   * This is the default with left handed coordinate spaces.
   */
  WGPUFrontFace_Cw = 1,
  /**
   * Must be last for serialization purposes
   */
  WGPUFrontFace_Sentinel,
};

enum WGPUHostMap {
  WGPUHostMap_Read,
  WGPUHostMap_Write,
  /**
   * Must be last for serialization purposes
   */
  WGPUHostMap_Sentinel,
};

/**
 * Format of indices used with pipeline.
 */
enum WGPUIndexFormat {
  /**
   * Indices are 16 bit unsigned integers.
   */
  WGPUIndexFormat_Uint16 = 0,
  /**
   * Indices are 32 bit unsigned integers.
   */
  WGPUIndexFormat_Uint32 = 1,
  /**
   * Must be last for serialization purposes
   */
  WGPUIndexFormat_Sentinel,
};

/**
 * Rate that determines when vertex data is advanced.
 */
enum WGPUInputStepMode {
  /**
   * Input data is advanced every vertex. This is the standard value for vertex data.
   */
  WGPUInputStepMode_Vertex = 0,
  /**
   * Input data is advanced every instance.
   */
  WGPUInputStepMode_Instance = 1,
  /**
   * Must be last for serialization purposes
   */
  WGPUInputStepMode_Sentinel,
};

/**
 * Operation to perform to the output attachment at the start of a renderpass.
 */
enum WGPULoadOp {
  /**
   * Clear the output attachment with the clear color. Clearing is faster than loading.
   */
  WGPULoadOp_Clear = 0,
  /**
   * Do not clear output attachment.
   */
  WGPULoadOp_Load = 1,
  /**
   * Must be last for serialization purposes
   */
  WGPULoadOp_Sentinel,
};

/**
 * Type of drawing mode for polygons
 */
enum WGPUPolygonMode {
  /**
   * Polygons are filled
   */
  WGPUPolygonMode_Fill = 0,
  /**
   * Polygons are draw as line segments
   */
  WGPUPolygonMode_Line = 1,
  /**
   * Polygons are draw as points
   */
  WGPUPolygonMode_Point = 2,
  /**
   * Must be last for serialization purposes
   */
  WGPUPolygonMode_Sentinel,
};

/**
 * Power Preference when choosing a physical adapter.
 */
enum WGPUPowerPreference {
  /**
   * Adapter that uses the least possible power. This is often an integerated GPU.
   */
  WGPUPowerPreference_LowPower = 0,
  /**
   * Adapter that has the highest performance. This is often a discrete GPU.
   */
  WGPUPowerPreference_HighPerformance = 1,
  /**
   * Must be last for serialization purposes
   */
  WGPUPowerPreference_Sentinel,
};

/**
 * Primitive type the input mesh is composed of.
 */
enum WGPUPrimitiveTopology {
  /**
   * Vertex data is a list of points. Each vertex is a new point.
   */
  WGPUPrimitiveTopology_PointList = 0,
  /**
   * Vertex data is a list of lines. Each pair of vertices composes a new line.
   *
   * Vertices `0 1 2 3` create two lines `0 1` and `2 3`
   */
  WGPUPrimitiveTopology_LineList = 1,
  /**
   * Vertex data is a strip of lines. Each set of two adjacent vertices form a line.
   *
   * Vertices `0 1 2 3` create three lines `0 1`, `1 2`, and `2 3`.
   */
  WGPUPrimitiveTopology_LineStrip = 2,
  /**
   * Vertex data is a list of triangles. Each set of 3 vertices composes a new triangle.
   *
   * Vertices `0 1 2 3 4 5` create two triangles `0 1 2` and `3 4 5`
   */
  WGPUPrimitiveTopology_TriangleList = 3,
  /**
   * Vertex data is a triangle strip. Each set of three adjacent vertices form a triangle.
   *
   * Vertices `0 1 2 3 4 5` creates four triangles `0 1 2`, `2 1 3`, `3 2 4`, and `4 3 5`
   */
  WGPUPrimitiveTopology_TriangleStrip = 4,
  /**
   * Must be last for serialization purposes
   */
  WGPUPrimitiveTopology_Sentinel,
};

enum WGPURawBindingType {
  WGPURawBindingType_UniformBuffer,
  WGPURawBindingType_StorageBuffer,
  WGPURawBindingType_ReadonlyStorageBuffer,
  WGPURawBindingType_Sampler,
  WGPURawBindingType_ComparisonSampler,
  WGPURawBindingType_SampledTexture,
  WGPURawBindingType_ReadonlyStorageTexture,
  WGPURawBindingType_WriteonlyStorageTexture,
  /**
   * Must be last for serialization purposes
   */
  WGPURawBindingType_Sentinel,
};

/**
 * Operation to perform on the stencil value.
 */
enum WGPUStencilOperation {
  /**
   * Keep stencil value unchanged.
   */
  WGPUStencilOperation_Keep = 0,
  /**
   * Set stencil value to zero.
   */
  WGPUStencilOperation_Zero = 1,
  /**
   * Replace stencil value with value provided in most recent call to [`RenderPass::set_stencil_reference`].
   */
  WGPUStencilOperation_Replace = 2,
  /**
   * Bitwise inverts stencil value.
   */
  WGPUStencilOperation_Invert = 3,
  /**
   * Increments stencil value by one, clamping on overflow.
   */
  WGPUStencilOperation_IncrementClamp = 4,
  /**
   * Decrements stencil value by one, clamping on underflow.
   */
  WGPUStencilOperation_DecrementClamp = 5,
  /**
   * Increments stencil value by one, wrapping on overflow.
   */
  WGPUStencilOperation_IncrementWrap = 6,
  /**
   * Decrements stencil value by one, wrapping on underflow.
   */
  WGPUStencilOperation_DecrementWrap = 7,
  /**
   * Must be last for serialization purposes
   */
  WGPUStencilOperation_Sentinel,
};

/**
 * Operation to perform to the output attachment at the end of a renderpass.
 */
enum WGPUStoreOp {
  /**
   * Clear the render target. If you don't care about the contents of the target, this can be faster.
   */
  WGPUStoreOp_Clear = 0,
  /**
   * Store the result of the renderpass.
   */
  WGPUStoreOp_Store = 1,
  /**
   * Must be last for serialization purposes
   */
  WGPUStoreOp_Sentinel,
};

/**
 * Kind of data the texture holds.
 */
enum WGPUTextureAspect {
  /**
   * Depth, Stencil, and Color.
   */
  WGPUTextureAspect_All,
  /**
   * Stencil.
   */
  WGPUTextureAspect_StencilOnly,
  /**
   * Depth.
   */
  WGPUTextureAspect_DepthOnly,
  /**
   * Must be last for serialization purposes
   */
  WGPUTextureAspect_Sentinel,
};

/**
 * Dimensionality of a texture.
 */
enum WGPUTextureDimension {
  /**
   * 1D texture
   */
  WGPUTextureDimension_D1,
  /**
   * 2D texture
   */
  WGPUTextureDimension_D2,
  /**
   * 3D texture
   */
  WGPUTextureDimension_D3,
  /**
   * Must be last for serialization purposes
   */
  WGPUTextureDimension_Sentinel,
};

/**
 * Underlying texture data format.
 *
 * If there is a conversion in the format (such as srgb -> linear), The conversion listed is for
 * loading from texture in a shader. When writing to the texture, the opposite conversion takes place.
 */
enum WGPUTextureFormat {
  /**
   * Red channel only. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
   */
  WGPUTextureFormat_R8Unorm = 0,
  /**
   * Red channel only. 8 bit integer per channel. [-127, 127] converted to/from float [-1, 1] in shader.
   */
  WGPUTextureFormat_R8Snorm = 1,
  /**
   * Red channel only. 8 bit integer per channel. Unsigned in shader.
   */
  WGPUTextureFormat_R8Uint = 2,
  /**
   * Red channel only. 8 bit integer per channel. Signed in shader.
   */
  WGPUTextureFormat_R8Sint = 3,
  /**
   * Red channel only. 16 bit integer per channel. Unsigned in shader.
   */
  WGPUTextureFormat_R16Uint = 4,
  /**
   * Red channel only. 16 bit integer per channel. Signed in shader.
   */
  WGPUTextureFormat_R16Sint = 5,
  /**
   * Red channel only. 16 bit float per channel. Float in shader.
   */
  WGPUTextureFormat_R16Float = 6,
  /**
   * Red and green channels. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
   */
  WGPUTextureFormat_Rg8Unorm = 7,
  /**
   * Red and green channels. 8 bit integer per channel. [-127, 127] converted to/from float [-1, 1] in shader.
   */
  WGPUTextureFormat_Rg8Snorm = 8,
  /**
   * Red and green channels. 8 bit integer per channel. Unsigned in shader.
   */
  WGPUTextureFormat_Rg8Uint = 9,
  /**
   * Red and green channel s. 8 bit integer per channel. Signed in shader.
   */
  WGPUTextureFormat_Rg8Sint = 10,
  /**
   * Red channel only. 32 bit integer per channel. Unsigned in shader.
   */
  WGPUTextureFormat_R32Uint = 11,
  /**
   * Red channel only. 32 bit integer per channel. Signed in shader.
   */
  WGPUTextureFormat_R32Sint = 12,
  /**
   * Red channel only. 32 bit float per channel. Float in shader.
   */
  WGPUTextureFormat_R32Float = 13,
  /**
   * Red and green channels. 16 bit integer per channel. Unsigned in shader.
   */
  WGPUTextureFormat_Rg16Uint = 14,
  /**
   * Red and green channels. 16 bit integer per channel. Signed in shader.
   */
  WGPUTextureFormat_Rg16Sint = 15,
  /**
   * Red and green channels. 16 bit float per channel. Float in shader.
   */
  WGPUTextureFormat_Rg16Float = 16,
  /**
   * Red, green, blue, and alpha channels. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
   */
  WGPUTextureFormat_Rgba8Unorm = 17,
  /**
   * Red, green, blue, and alpha channels. 8 bit integer per channel. Srgb-color [0, 255] converted to/from linear-color float [0, 1] in shader.
   */
  WGPUTextureFormat_Rgba8UnormSrgb = 18,
  /**
   * Red, green, blue, and alpha channels. 8 bit integer per channel. [-127, 127] converted to/from float [-1, 1] in shader.
   */
  WGPUTextureFormat_Rgba8Snorm = 19,
  /**
   * Red, green, blue, and alpha channels. 8 bit integer per channel. Unsigned in shader.
   */
  WGPUTextureFormat_Rgba8Uint = 20,
  /**
   * Red, green, blue, and alpha channels. 8 bit integer per channel. Signed in shader.
   */
  WGPUTextureFormat_Rgba8Sint = 21,
  /**
   * Blue, green, red, and alpha channels. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
   */
  WGPUTextureFormat_Bgra8Unorm = 22,
  /**
   * Blue, green, red, and alpha channels. 8 bit integer per channel. Srgb-color [0, 255] converted to/from linear-color float [0, 1] in shader.
   */
  WGPUTextureFormat_Bgra8UnormSrgb = 23,
  /**
   * Red, green, blue, and alpha channels. 10 bit integer for RGB channels, 2 bit integer for alpha channel. [0, 1023] ([0, 3] for alpha) converted to/from float [0, 1] in shader.
   */
  WGPUTextureFormat_Rgb10a2Unorm = 24,
  /**
   * Red, green, and blue channels. 11 bit float with no sign bit for RG channels. 10 bit float with no sign bit for blue channel. Float in shader.
   */
  WGPUTextureFormat_Rg11b10Float = 25,
  /**
   * Red and green channels. 32 bit integer per channel. Unsigned in shader.
   */
  WGPUTextureFormat_Rg32Uint = 26,
  /**
   * Red and green channels. 32 bit integer per channel. Signed in shader.
   */
  WGPUTextureFormat_Rg32Sint = 27,
  /**
   * Red and green channels. 32 bit float per channel. Float in shader.
   */
  WGPUTextureFormat_Rg32Float = 28,
  /**
   * Red, green, blue, and alpha channels. 16 bit integer per channel. Unsigned in shader.
   */
  WGPUTextureFormat_Rgba16Uint = 29,
  /**
   * Red, green, blue, and alpha channels. 16 bit integer per channel. Signed in shader.
   */
  WGPUTextureFormat_Rgba16Sint = 30,
  /**
   * Red, green, blue, and alpha channels. 16 bit float per channel. Float in shader.
   */
  WGPUTextureFormat_Rgba16Float = 31,
  /**
   * Red, green, blue, and alpha channels. 32 bit integer per channel. Unsigned in shader.
   */
  WGPUTextureFormat_Rgba32Uint = 32,
  /**
   * Red, green, blue, and alpha channels. 32 bit integer per channel. Signed in shader.
   */
  WGPUTextureFormat_Rgba32Sint = 33,
  /**
   * Red, green, blue, and alpha channels. 32 bit float per channel. Float in shader.
   */
  WGPUTextureFormat_Rgba32Float = 34,
  /**
   * Special depth format with 32 bit floating point depth.
   */
  WGPUTextureFormat_Depth32Float = 35,
  /**
   * Special depth format with at least 24 bit integer depth.
   */
  WGPUTextureFormat_Depth24Plus = 36,
  /**
   * Special depth/stencil format with at least 24 bit integer depth and 8 bits integer stencil.
   */
  WGPUTextureFormat_Depth24PlusStencil8 = 37,
  /**
   * 4x4 block compressed texture. 8 bytes per block (4 bit/px). 4 color + alpha pallet. 5 bit R + 6 bit G + 5 bit B + 1 bit alpha.
   * [0, 64] ([0, 1] for alpha) converted to/from float [0, 1] in shader.
   *
   * Also known as DXT1.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc1RgbaUnorm = 38,
  /**
   * 4x4 block compressed texture. 8 bytes per block (4 bit/px). 4 color + alpha pallet. 5 bit R + 6 bit G + 5 bit B + 1 bit alpha.
   * Srgb-color [0, 64] ([0, 16] for alpha) converted to/from linear-color float [0, 1] in shader.
   *
   * Also known as DXT1.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc1RgbaUnormSrgb = 39,
  /**
   * 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet. 5 bit R + 6 bit G + 5 bit B + 4 bit alpha.
   * [0, 64] ([0, 16] for alpha) converted to/from float [0, 1] in shader.
   *
   * Also known as DXT3.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc2RgbaUnorm = 40,
  /**
   * 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet. 5 bit R + 6 bit G + 5 bit B + 4 bit alpha.
   * Srgb-color [0, 64] ([0, 256] for alpha) converted to/from linear-color float [0, 1] in shader.
   *
   * Also known as DXT3.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc2RgbaUnormSrgb = 41,
  /**
   * 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet + 8 alpha pallet. 5 bit R + 6 bit G + 5 bit B + 8 bit alpha.
   * [0, 64] ([0, 256] for alpha) converted to/from float [0, 1] in shader.
   *
   * Also known as DXT5.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc3RgbaUnorm = 42,
  /**
   * 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet + 8 alpha pallet. 5 bit R + 6 bit G + 5 bit B + 8 bit alpha.
   * Srgb-color [0, 64] ([0, 256] for alpha) converted to/from linear-color float [0, 1] in shader.
   *
   * Also known as DXT5.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc3RgbaUnormSrgb = 43,
  /**
   * 4x4 block compressed texture. 8 bytes per block (4 bit/px). 8 color pallet. 8 bit R.
   * [0, 256] converted to/from float [0, 1] in shader.
   *
   * Also known as RGTC1.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc4RUnorm = 44,
  /**
   * 4x4 block compressed texture. 8 bytes per block (4 bit/px). 8 color pallet. 8 bit R.
   * [-127, 127] converted to/from float [-1, 1] in shader.
   *
   * Also known as RGTC1.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc4RSnorm = 45,
  /**
   * 4x4 block compressed texture. 16 bytes per block (16 bit/px). 8 color red pallet + 8 color green pallet. 8 bit RG.
   * [0, 256] converted to/from float [0, 1] in shader.
   *
   * Also known as RGTC2.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc5RgUnorm = 46,
  /**
   * 4x4 block compressed texture. 16 bytes per block (16 bit/px). 8 color red pallet + 8 color green pallet. 8 bit RG.
   * [-127, 127] converted to/from float [-1, 1] in shader.
   *
   * Also known as RGTC2.
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc5RgSnorm = 47,
  /**
   * 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 16 bit unsigned float RGB. Float in shader.
   *
   * Also known as BPTC (float).
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc6hRgbUfloat = 48,
  /**
   * 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 16 bit signed float RGB. Float in shader.
   *
   * Also known as BPTC (float).
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc6hRgbSfloat = 49,
  /**
   * 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 8 bit integer RGBA.
   * [0, 256] converted to/from float [0, 1] in shader.
   *
   * Also known as BPTC (unorm).
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc7RgbaUnorm = 50,
  /**
   * 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 8 bit integer RGBA.
   * Srgb-color [0, 255] converted to/from linear-color float [0, 1] in shader.
   *
   * Also known as BPTC (unorm).
   *
   * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
   */
  WGPUTextureFormat_Bc7RgbaUnormSrgb = 51,
  /**
   * Must be last for serialization purposes
   */
  WGPUTextureFormat_Sentinel,
};

/**
 * Dimensions of a particular texture view.
 */
enum WGPUTextureViewDimension {
  /**
   * A one dimensional texture. `texture1D` in glsl shaders.
   */
  WGPUTextureViewDimension_D1,
  /**
   * A two dimensional texture. `texture2D` in glsl shaders.
   */
  WGPUTextureViewDimension_D2,
  /**
   * A two dimensional array texture. `texture2DArray` in glsl shaders.
   */
  WGPUTextureViewDimension_D2Array,
  /**
   * A cubemap texture. `textureCube` in glsl shaders.
   */
  WGPUTextureViewDimension_Cube,
  /**
   * A cubemap array texture. `textureCubeArray` in glsl shaders.
   */
  WGPUTextureViewDimension_CubeArray,
  /**
   * A three dimensional texture. `texture3D` in glsl shaders.
   */
  WGPUTextureViewDimension_D3,
  /**
   * Must be last for serialization purposes
   */
  WGPUTextureViewDimension_Sentinel,
};

/**
 * Vertex Format for a Vertex Attribute (input).
 */
enum WGPUVertexFormat {
  /**
   * Two unsigned bytes (u8). `uvec2` in shaders.
   */
  WGPUVertexFormat_Uchar2 = 0,
  /**
   * Four unsigned bytes (u8). `uvec4` in shaders.
   */
  WGPUVertexFormat_Uchar4 = 1,
  /**
   * Two signed bytes (i8). `ivec2` in shaders.
   */
  WGPUVertexFormat_Char2 = 2,
  /**
   * Four signed bytes (i8). `ivec4` in shaders.
   */
  WGPUVertexFormat_Char4 = 3,
  /**
   * Two unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec2` in shaders.
   */
  WGPUVertexFormat_Uchar2Norm = 4,
  /**
   * Four unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec4` in shaders.
   */
  WGPUVertexFormat_Uchar4Norm = 5,
  /**
   * Two signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec2` in shaders.
   */
  WGPUVertexFormat_Char2Norm = 6,
  /**
   * Four signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec4` in shaders.
   */
  WGPUVertexFormat_Char4Norm = 7,
  /**
   * Two unsigned shorts (u16). `uvec2` in shaders.
   */
  WGPUVertexFormat_Ushort2 = 8,
  /**
   * Four unsigned shorts (u16). `uvec4` in shaders.
   */
  WGPUVertexFormat_Ushort4 = 9,
  /**
   * Two signed shorts (i16). `ivec2` in shaders.
   */
  WGPUVertexFormat_Short2 = 10,
  /**
   * Four signed shorts (i16). `ivec4` in shaders.
   */
  WGPUVertexFormat_Short4 = 11,
  /**
   * Two unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec2` in shaders.
   */
  WGPUVertexFormat_Ushort2Norm = 12,
  /**
   * Four unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec4` in shaders.
   */
  WGPUVertexFormat_Ushort4Norm = 13,
  /**
   * Two signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec2` in shaders.
   */
  WGPUVertexFormat_Short2Norm = 14,
  /**
   * Four signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec4` in shaders.
   */
  WGPUVertexFormat_Short4Norm = 15,
  /**
   * Two half-precision floats (no Rust equiv). `vec2` in shaders.
   */
  WGPUVertexFormat_Half2 = 16,
  /**
   * Four half-precision floats (no Rust equiv). `vec4` in shaders.
   */
  WGPUVertexFormat_Half4 = 17,
  /**
   * One single-precision float (f32). `float` in shaders.
   */
  WGPUVertexFormat_Float = 18,
  /**
   * Two single-precision floats (f32). `vec2` in shaders.
   */
  WGPUVertexFormat_Float2 = 19,
  /**
   * Three single-precision floats (f32). `vec3` in shaders.
   */
  WGPUVertexFormat_Float3 = 20,
  /**
   * Four single-precision floats (f32). `vec4` in shaders.
   */
  WGPUVertexFormat_Float4 = 21,
  /**
   * One unsigned int (u32). `uint` in shaders.
   */
  WGPUVertexFormat_Uint = 22,
  /**
   * Two unsigned ints (u32). `uvec2` in shaders.
   */
  WGPUVertexFormat_Uint2 = 23,
  /**
   * Three unsigned ints (u32). `uvec3` in shaders.
   */
  WGPUVertexFormat_Uint3 = 24,
  /**
   * Four unsigned ints (u32). `uvec4` in shaders.
   */
  WGPUVertexFormat_Uint4 = 25,
  /**
   * One signed int (i32). `int` in shaders.
   */
  WGPUVertexFormat_Int = 26,
  /**
   * Two signed ints (i32). `ivec2` in shaders.
   */
  WGPUVertexFormat_Int2 = 27,
  /**
   * Three signed ints (i32). `ivec3` in shaders.
   */
  WGPUVertexFormat_Int3 = 28,
  /**
   * Four signed ints (i32). `ivec4` in shaders.
   */
  WGPUVertexFormat_Int4 = 29,
  /**
   * Must be last for serialization purposes
   */
  WGPUVertexFormat_Sentinel,
};

/**
 * The internal enum mirrored from `BufferUsage`. The values don't have to match!
 */
struct WGPUBufferUse;

struct WGPUClient;

struct WGPUComputePass;

struct WGPUGlobal;

/**
 * Describes a pipeline layout.
 *
 * A `PipelineLayoutDescriptor` can be used to create a pipeline layout.
 */
struct WGPUPipelineLayoutDescriptor;

struct WGPURenderBundleEncoder;

struct WGPURenderPass;

/**
 * The internal enum mirrored from `TextureUsage`. The values don't have to match!
 */
struct WGPUTextureUse;

struct WGPUInfrastructure {
  struct WGPUClient *client;
  const uint8_t *error;
};

typedef WGPUNonZeroU64 WGPUId_Adapter_Dummy;

typedef WGPUId_Adapter_Dummy WGPUAdapterId;

typedef WGPUNonZeroU64 WGPUId_Device_Dummy;

typedef WGPUId_Device_Dummy WGPUDeviceId;

typedef WGPUNonZeroU64 WGPUId_Buffer_Dummy;

typedef WGPUId_Buffer_Dummy WGPUBufferId;

typedef const char *WGPURawString;

/**
 * Integral type used for buffer offsets.
 */
typedef uint64_t WGPUBufferAddress;

/**
 * Different ways that you can use a buffer.
 *
 * The usages determine what kind of memory the buffer is allocated from and what
 * actions the buffer can partake in.
 */
typedef uint32_t WGPUBufferUsage;
/**
 * Allow a buffer to be mapped for reading using [`Buffer::map_async`] + [`Buffer::get_mapped_range`].
 * This does not include creating a buffer with [`BufferDescriptor::mapped_at_creation`] set.
 *
 * If [`Features::MAPPABLE_PRIMARY_BUFFERS`] isn't enabled, the only other usage a buffer
 * may have is COPY_DST.
 */
#define WGPUBufferUsage_MAP_READ (uint32_t)1
/**
 * Allow a buffer to be mapped for writing using [`Buffer::map_async`] + [`Buffer::get_mapped_range_mut`].
 * This does not include creating a buffer with `mapped_at_creation` set.
 *
 * If [`Features::MAPPABLE_PRIMARY_BUFFERS`] feature isn't enabled, the only other usage a buffer
 * may have is COPY_SRC.
 */
#define WGPUBufferUsage_MAP_WRITE (uint32_t)2
/**
 * Allow a buffer to be the source buffer for a [`CommandEncoder::copy_buffer_to_buffer`] or [`CommandEncoder::copy_buffer_to_texture`]
 * operation.
 */
#define WGPUBufferUsage_COPY_SRC (uint32_t)4
/**
 * Allow a buffer to be the destination buffer for a [`CommandEncoder::copy_buffer_to_buffer`], [`CommandEncoder::copy_texture_to_buffer`],
 * or [`Queue::write_buffer`] operation.
 */
#define WGPUBufferUsage_COPY_DST (uint32_t)8
/**
 * Allow a buffer to be the index buffer in a draw operation.
 */
#define WGPUBufferUsage_INDEX (uint32_t)16
/**
 * Allow a buffer to be the vertex buffer in a draw operation.
 */
#define WGPUBufferUsage_VERTEX (uint32_t)32
/**
 * Allow a buffer to be a [`BindingType::UniformBuffer`] inside a bind group.
 */
#define WGPUBufferUsage_UNIFORM (uint32_t)64
/**
 * Allow a buffer to be a [`BindingType::StorageBuffer`] inside a bind group.
 */
#define WGPUBufferUsage_STORAGE (uint32_t)128
/**
 * Allow a buffer to be the indirect buffer in an indirect draw call.
 */
#define WGPUBufferUsage_INDIRECT (uint32_t)256

/**
 * Describes a [`Buffer`].
 */
struct WGPUBufferDescriptor {
  /**
   * Debug label of a buffer. This will show up in graphics debuggers for easy identification.
   */
  WGPURawString label;
  /**
   * Size of a buffer.
   */
  WGPUBufferAddress size;
  /**
   * Usages of a buffer. If the buffer is used in any way that isn't specified here, the operation
   * will panic.
   */
  WGPUBufferUsage usage;
  /**
   * Allows a buffer to be mapped immediately after they are made. It does not have to be [`BufferUsage::MAP_READ`] or
   * [`BufferUsage::MAP_WRITE`], all buffers are allowed to be mapped at creation.
   */
  bool mapped_at_creation;
};

typedef WGPUNonZeroU64 WGPUId_Texture_Dummy;

typedef WGPUId_Texture_Dummy WGPUTextureId;

/**
 * Extent of a texture related operation.
 */
struct WGPUExtent3d {
  uint32_t width;
  uint32_t height;
  uint32_t depth;
};

/**
 * Different ways that you can use a texture.
 *
 * The usages determine what kind of memory the texture is allocated from and what
 * actions the texture can partake in.
 */
typedef uint32_t WGPUTextureUsage;
/**
 * Allows a texture to be the source in a [`CommandEncoder::copy_texture_to_buffer`] or
 * [`CommandEncoder::copy_texture_to_texture`] operation.
 */
#define WGPUTextureUsage_COPY_SRC (uint32_t)1
/**
 * Allows a texture to be the destination in a  [`CommandEncoder::copy_texture_to_buffer`],
 * [`CommandEncoder::copy_texture_to_texture`], or [`Queue::write_texture`] operation.
 */
#define WGPUTextureUsage_COPY_DST (uint32_t)2
/**
 * Allows a texture to be a [`BindingType::SampledTexture`] in a bind group.
 */
#define WGPUTextureUsage_SAMPLED (uint32_t)4
/**
 * Allows a texture to be a [`BindingType::StorageTexture`] in a bind group.
 */
#define WGPUTextureUsage_STORAGE (uint32_t)8
/**
 * Allows a texture to be a output attachment of a renderpass.
 */
#define WGPUTextureUsage_OUTPUT_ATTACHMENT (uint32_t)16

/**
 * Describes a [`Texture`].
 */
struct WGPUTextureDescriptor {
  /**
   * Debug label of the texture. This will show up in graphics debuggers for easy identification.
   */
  WGPURawString label;
  /**
   * Size of the texture. For a regular 1D/2D texture, the unused sizes will be 1. For 2DArray textures, Z is the
   * number of 2D textures in that array.
   */
  struct WGPUExtent3d size;
  /**
   * Mip count of texture. For a texture with no extra mips, this must be 1.
   */
  uint32_t mip_level_count;
  /**
   * Sample count of texture. If this is not 1, texture must have [`BindingType::SampledTexture::multisampled`] set to true.
   */
  uint32_t sample_count;
  /**
   * Dimensions of the texture.
   */
  enum WGPUTextureDimension dimension;
  /**
   * Format of the texture.
   */
  enum WGPUTextureFormat format;
  /**
   * Allowed usages of the texture. If used in other ways, the operation will panic.
   */
  WGPUTextureUsage usage;
};

typedef WGPUNonZeroU64 WGPUId_TextureView_Dummy;

typedef WGPUId_TextureView_Dummy WGPUTextureViewId;

struct WGPUTextureViewDescriptor {
  WGPURawString label;
  const enum WGPUTextureFormat *format;
  const enum WGPUTextureViewDimension *dimension;
  enum WGPUTextureAspect aspect;
  uint32_t base_mip_level;
  WGPUOption_NonZeroU32 level_count;
  uint32_t base_array_layer;
  WGPUOption_NonZeroU32 array_layer_count;
};

typedef WGPUNonZeroU64 WGPUId_Sampler_Dummy;

typedef WGPUId_Sampler_Dummy WGPUSamplerId;

struct WGPUSamplerDescriptor {
  WGPURawString label;
  enum WGPUAddressMode address_modes[3];
  enum WGPUFilterMode mag_filter;
  enum WGPUFilterMode min_filter;
  enum WGPUFilterMode mipmap_filter;
  float lod_min_clamp;
  float lod_max_clamp;
  const enum WGPUCompareFunction *compare;
  WGPUOption_NonZeroU8 anisotropy_clamp;
};

typedef WGPUNonZeroU64 WGPUId_CommandBuffer_Dummy;

typedef WGPUId_CommandBuffer_Dummy WGPUCommandBufferId;

typedef WGPUCommandBufferId WGPUCommandEncoderId;

/**
 * Describes a [`CommandEncoder`].
 */
struct WGPUCommandEncoderDescriptor {
  /**
   * Debug label for the command encoder. This will show up in graphics debuggers for easy identification.
   */
  WGPURawString label;
};

struct WGPUComputePassDescriptor {
  uint32_t todo;
};

/**
 * RGBA double precision color.
 *
 * This is not to be used as a generic color type, only for specific wgpu interfaces.
 */
struct WGPUColor {
  double r;
  double g;
  double b;
  double a;
};
#define WGPUColor_TRANSPARENT (WGPUColor){ .r = 0.0, .g = 0.0, .b = 0.0, .a = 0.0 }
#define WGPUColor_BLACK (WGPUColor){ .r = 0.0, .g = 0.0, .b = 0.0, .a = 1.0 }
#define WGPUColor_WHITE (WGPUColor){ .r = 1.0, .g = 1.0, .b = 1.0, .a = 1.0 }
#define WGPUColor_RED (WGPUColor){ .r = 1.0, .g = 0.0, .b = 0.0, .a = 1.0 }
#define WGPUColor_GREEN (WGPUColor){ .r = 0.0, .g = 1.0, .b = 0.0, .a = 1.0 }
#define WGPUColor_BLUE (WGPUColor){ .r = 0.0, .g = 0.0, .b = 1.0, .a = 1.0 }

/**
 * Describes an individual channel within a render pass, such as color, depth, or stencil.
 */
struct WGPUPassChannel_Color {
  /**
   * Operation to perform to the output attachment at the start of a renderpass. This must be clear if it
   * is the first renderpass rendering to a swap chain image.
   */
  enum WGPULoadOp load_op;
  /**
   * Operation to perform to the output attachment at the end of a renderpass.
   */
  enum WGPUStoreOp store_op;
  /**
   * If load_op is [`LoadOp::Clear`], the attachement will be cleared to this color.
   */
  struct WGPUColor clear_value;
  /**
   * If true, the relevant channel is not changed by a renderpass, and the corresponding attachment
   * can be used inside the pass by other read-only usages.
   */
  bool read_only;
};

/**
 * Describes a color attachment to a render pass.
 */
struct WGPUColorAttachmentDescriptor {
  /**
   * The view to use as an attachment.
   */
  WGPUTextureViewId attachment;
  /**
   * The view that will receive the resolved output if multisampling is used.
   */
  WGPUOption_TextureViewId resolve_target;
  /**
   * What operations will be performed on this color attachment.
   */
  struct WGPUPassChannel_Color channel;
};

/**
 * Describes an individual channel within a render pass, such as color, depth, or stencil.
 */
struct WGPUPassChannel_f32 {
  /**
   * Operation to perform to the output attachment at the start of a renderpass. This must be clear if it
   * is the first renderpass rendering to a swap chain image.
   */
  enum WGPULoadOp load_op;
  /**
   * Operation to perform to the output attachment at the end of a renderpass.
   */
  enum WGPUStoreOp store_op;
  /**
   * If load_op is [`LoadOp::Clear`], the attachement will be cleared to this color.
   */
  float clear_value;
  /**
   * If true, the relevant channel is not changed by a renderpass, and the corresponding attachment
   * can be used inside the pass by other read-only usages.
   */
  bool read_only;
};

/**
 * Describes an individual channel within a render pass, such as color, depth, or stencil.
 */
struct WGPUPassChannel_u32 {
  /**
   * Operation to perform to the output attachment at the start of a renderpass. This must be clear if it
   * is the first renderpass rendering to a swap chain image.
   */
  enum WGPULoadOp load_op;
  /**
   * Operation to perform to the output attachment at the end of a renderpass.
   */
  enum WGPUStoreOp store_op;
  /**
   * If load_op is [`LoadOp::Clear`], the attachement will be cleared to this color.
   */
  uint32_t clear_value;
  /**
   * If true, the relevant channel is not changed by a renderpass, and the corresponding attachment
   * can be used inside the pass by other read-only usages.
   */
  bool read_only;
};

/**
 * Describes a depth/stencil attachment to a render pass.
 */
struct WGPUDepthStencilAttachmentDescriptor {
  /**
   * The view to use as an attachment.
   */
  WGPUTextureViewId attachment;
  /**
   * What operations will be performed on the depth part of the attachment.
   */
  struct WGPUPassChannel_f32 depth;
  /**
   * What operations will be performed on the stencil part of the attachment.
   */
  struct WGPUPassChannel_u32 stencil;
};

struct WGPURenderPassDescriptor {
  const struct WGPUColorAttachmentDescriptor *color_attachments;
  uintptr_t color_attachments_length;
  const struct WGPUDepthStencilAttachmentDescriptor *depth_stencil_attachment;
};

typedef WGPUNonZeroU64 WGPUId_BindGroupLayout_Dummy;

typedef WGPUId_BindGroupLayout_Dummy WGPUBindGroupLayoutId;

typedef WGPUNonZeroU64 WGPUId_PipelineLayout_Dummy;

typedef WGPUId_PipelineLayout_Dummy WGPUPipelineLayoutId;

typedef WGPUNonZeroU64 WGPUId_BindGroup_Dummy;

typedef WGPUId_BindGroup_Dummy WGPUBindGroupId;

typedef WGPUNonZeroU64 WGPUId_ShaderModule_Dummy;

typedef WGPUId_ShaderModule_Dummy WGPUShaderModuleId;

struct WGPUShaderModuleDescriptor {
  const uint32_t *spirv_words;
  uintptr_t spirv_words_length;
  WGPURawString wgsl_chars;
};

typedef WGPUNonZeroU64 WGPUId_ComputePipeline_Dummy;

typedef WGPUId_ComputePipeline_Dummy WGPUComputePipelineId;

struct WGPUProgrammableStageDescriptor {
  WGPUShaderModuleId module;
  WGPURawString entry_point;
};

struct WGPUComputePipelineDescriptor {
  WGPURawString label;
  WGPUOption_PipelineLayoutId layout;
  struct WGPUProgrammableStageDescriptor compute_stage;
};

typedef WGPUNonZeroU64 WGPUId_RenderPipeline_Dummy;

typedef WGPUId_RenderPipeline_Dummy WGPURenderPipelineId;

/**
 * Describes the state of the rasterizer in a render pipeline.
 */
struct WGPURasterizationStateDescriptor {
  enum WGPUFrontFace front_face;
  enum WGPUCullMode cull_mode;
  /**
   * Controls the way each polygon is rasterized. Can be either `Fill` (default), `Line` or `Point`
   *
   * Setting this to something other than `Fill` requires `Features::NON_FILL_POLYGON_MODE` to be enabled.
   */
  enum WGPUPolygonMode polygon_mode;
  /**
   * If enabled polygon depth is clamped to 0-1 range instead of being clipped.
   *
   * Requires `Features::DEPTH_CLAMPING` enabled.
   */
  bool clamp_depth;
  int32_t depth_bias;
  float depth_bias_slope_scale;
  float depth_bias_clamp;
};

/**
 * Describes the blend state of a pipeline.
 *
 * Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information.
 */
struct WGPUBlendDescriptor {
  enum WGPUBlendFactor src_factor;
  enum WGPUBlendFactor dst_factor;
  enum WGPUBlendOperation operation;
};

/**
 * Color write mask. Disabled color channels will not be written to.
 */
typedef uint32_t WGPUColorWrite;
/**
 * Enable red channel writes
 */
#define WGPUColorWrite_RED (uint32_t)1
/**
 * Enable green channel writes
 */
#define WGPUColorWrite_GREEN (uint32_t)2
/**
 * Enable blue channel writes
 */
#define WGPUColorWrite_BLUE (uint32_t)4
/**
 * Enable alpha channel writes
 */
#define WGPUColorWrite_ALPHA (uint32_t)8
/**
 * Enable red, green, and blue channel writes
 */
#define WGPUColorWrite_COLOR (uint32_t)7
/**
 * Enable writes to all channels.
 */
#define WGPUColorWrite_ALL (uint32_t)15

/**
 * Describes the color state of a render pipeline.
 */
struct WGPUColorStateDescriptor {
  /**
   * The [`TextureFormat`] of the image that this pipeline will render to. Must match the the format
   * of the corresponding color attachment in [`CommandEncoder::begin_render_pass`].
   */
  enum WGPUTextureFormat format;
  /**
   * The alpha blending that is used for this pipeline.
   */
  struct WGPUBlendDescriptor alpha_blend;
  /**
   * The color blending that is used for this pipeline.
   */
  struct WGPUBlendDescriptor color_blend;
  /**
   * Mask which enables/disables writes to different color/alpha channel.
   */
  WGPUColorWrite write_mask;
};

/**
 * Describes stencil state in a render pipeline.
 *
 * If you are not using stencil state, set this to [`StencilStateFaceDescriptor::IGNORE`].
 */
struct WGPUStencilStateFaceDescriptor {
  /**
   * Comparison function that determines if the fail_op or pass_op is used on the stencil buffer.
   */
  enum WGPUCompareFunction compare;
  /**
   * Operation that is preformed when stencil test fails.
   */
  enum WGPUStencilOperation fail_op;
  /**
   * Operation that is performed when depth test fails but stencil test succeeds.
   */
  enum WGPUStencilOperation depth_fail_op;
  /**
   * Operation that is performed when stencil test success.
   */
  enum WGPUStencilOperation pass_op;
};

struct WGPUStencilStateDescriptor {
  /**
   * Front face mode.
   */
  struct WGPUStencilStateFaceDescriptor front;
  /**
   * Back face mode.
   */
  struct WGPUStencilStateFaceDescriptor back;
  /**
   * Stencil values are AND'd with this mask when reading and writing from the stencil buffer. Only low 8 bits are used.
   */
  uint32_t read_mask;
  /**
   * Stencil values are AND'd with this mask when writing to the stencil buffer. Only low 8 bits are used.
   */
  uint32_t write_mask;
};

/**
 * Describes the depth/stencil state in a render pipeline.
 */
struct WGPUDepthStencilStateDescriptor {
  /**
   * Format of the depth/stencil buffer, must be special depth format. Must match the the format
   * of the depth/stencil attachment in [`CommandEncoder::begin_render_pass`].
   */
  enum WGPUTextureFormat format;
  /**
   * If disabled, depth will not be written to.
   */
  bool depth_write_enabled;
  /**
   * Comparison function used to compare depth values in the depth test.
   */
  enum WGPUCompareFunction depth_compare;
  struct WGPUStencilStateDescriptor stencil;
};

/**
 * Integral type used for binding locations in shaders.
 */
typedef uint32_t WGPUShaderLocation;

/**
 * Vertex inputs (attributes) to shaders.
 *
 * Arrays of these can be made with the [`vertex_attr_array`] macro. Vertex attributes are assumed to be tightly packed.
 */
struct WGPUVertexAttributeDescriptor {
  /**
   * Byte offset of the start of the input
   */
  WGPUBufferAddress offset;
  /**
   * Format of the input
   */
  enum WGPUVertexFormat format;
  /**
   * Location for this input. Must match the location in the shader.
   */
  WGPUShaderLocation shader_location;
};

struct WGPUVertexBufferDescriptor {
  WGPUBufferAddress stride;
  enum WGPUInputStepMode step_mode;
  const struct WGPUVertexAttributeDescriptor *attributes;
  uintptr_t attributes_length;
};

struct WGPUVertexStateDescriptor {
  enum WGPUIndexFormat index_format;
  const struct WGPUVertexBufferDescriptor *vertex_buffers;
  uintptr_t vertex_buffers_length;
};

struct WGPURenderPipelineDescriptor {
  WGPURawString label;
  WGPUOption_PipelineLayoutId layout;
  const struct WGPUProgrammableStageDescriptor *vertex_stage;
  const struct WGPUProgrammableStageDescriptor *fragment_stage;
  enum WGPUPrimitiveTopology primitive_topology;
  const struct WGPURasterizationStateDescriptor *rasterization_state;
  const struct WGPUColorStateDescriptor *color_states;
  uintptr_t color_states_length;
  const struct WGPUDepthStencilStateDescriptor *depth_stencil_state;
  struct WGPUVertexStateDescriptor vertex_state;
  uint32_t sample_count;
  uint32_t sample_mask;
  bool alpha_to_coverage_enabled;
};

typedef void *WGPUFactoryParam;

typedef WGPUNonZeroU64 WGPUId_SwapChain_Dummy;

typedef WGPUId_SwapChain_Dummy WGPUSwapChainId;

typedef WGPUNonZeroU64 WGPUId_RenderBundle;

typedef WGPUId_RenderBundle WGPURenderBundleId;

typedef WGPUNonZeroU64 WGPUId_Surface;

typedef WGPUId_Surface WGPUSurfaceId;

struct WGPUIdentityRecyclerFactory {
  WGPUFactoryParam param;
  void (*free_adapter)(WGPUAdapterId, WGPUFactoryParam);
  void (*free_device)(WGPUDeviceId, WGPUFactoryParam);
  void (*free_swap_chain)(WGPUSwapChainId, WGPUFactoryParam);
  void (*free_pipeline_layout)(WGPUPipelineLayoutId, WGPUFactoryParam);
  void (*free_shader_module)(WGPUShaderModuleId, WGPUFactoryParam);
  void (*free_bind_group_layout)(WGPUBindGroupLayoutId, WGPUFactoryParam);
  void (*free_bind_group)(WGPUBindGroupId, WGPUFactoryParam);
  void (*free_command_buffer)(WGPUCommandBufferId, WGPUFactoryParam);
  void (*free_render_bundle)(WGPURenderBundleId, WGPUFactoryParam);
  void (*free_render_pipeline)(WGPURenderPipelineId, WGPUFactoryParam);
  void (*free_compute_pipeline)(WGPUComputePipelineId, WGPUFactoryParam);
  void (*free_buffer)(WGPUBufferId, WGPUFactoryParam);
  void (*free_texture)(WGPUTextureId, WGPUFactoryParam);
  void (*free_texture_view)(WGPUTextureViewId, WGPUFactoryParam);
  void (*free_sampler)(WGPUSamplerId, WGPUFactoryParam);
  void (*free_surface)(WGPUSurfaceId, WGPUFactoryParam);
};

/**
 * Options for requesting adapter.
 */
struct WGPURequestAdapterOptions_SurfaceId {
  /**
   * Power preference for the adapter.
   */
  enum WGPUPowerPreference power_preference;
  /**
   * Surface that is required to be presentable with the requested adapter. This does not
   * create the surface, only guarantees that the adapter can present to said surface.
   */
  WGPUOption_SurfaceId compatible_surface;
};

typedef struct WGPURequestAdapterOptions_SurfaceId WGPURequestAdapterOptions;

/**
 * Features that are not guaranteed to be supported.
 *
 * These are either part of the webgpu standard, or are extension features supported by
 * wgpu when targeting native.
 *
 * If you want to use a feature, you need to first verify that the adapter supports
 * the feature. If the adapter does not support the feature, requesting a device with it enabled
 * will panic.
 */
typedef uint64_t WGPUFeatures;
/**
 * By default, polygon depth is clipped to 0-1 range. Anything outside of that range
 * is rejected, and respective fragments are not touched.
 *
 * With this extension, we can force clamping of the polygon depth to 0-1. That allows
 * shadow map occluders to be rendered into a tighter depth range.
 *
 * Supported platforms:
 * - desktops
 * - some mobile chips
 *
 * This is a web and native feature.
 */
#define WGPUFeatures_DEPTH_CLAMPING (uint64_t)1
/**
 * Enables BCn family of compressed textures. All BCn textures use 4x4 pixel blocks
 * with 8 or 16 bytes per block.
 *
 * Compressed textures sacrifice some quality in exchange for signifigantly reduced
 * bandwidth usage.
 *
 * Supported Platforms:
 * - desktops
 *
 * This is a web and native feature.
 */
#define WGPUFeatures_TEXTURE_COMPRESSION_BC (uint64_t)2
/**
 * Webgpu only allows the MAP_READ and MAP_WRITE buffer usage to be matched with
 * COPY_DST and COPY_SRC respectively. This removes this requirement.
 *
 * This is only beneficial on systems that share memory between CPU and GPU. If enabled
 * on a system that doesn't, this can severely hinder performance. Only use if you understand
 * the consequences.
 *
 * Supported platforms:
 * - All
 *
 * This is a native only feature.
 */
#define WGPUFeatures_MAPPABLE_PRIMARY_BUFFERS (uint64_t)65536
/**
 * Allows the user to create uniform arrays of sampled textures in shaders:
 *
 * eg. `uniform texture2D textures[10]`.
 *
 * This capability allows them to exist and to be indexed by compile time constant
 * values.
 *
 * Supported platforms:
 * - DX12
 * - Metal (with MSL 2.0+ on macOS 10.13+)
 * - Vulkan
 *
 * This is a native only feature.
 */
#define WGPUFeatures_SAMPLED_TEXTURE_BINDING_ARRAY (uint64_t)131072
/**
 * Allows shaders to index sampled texture arrays with dynamically uniform values:
 *
 * eg. `texture_array[uniform_value]`
 *
 * This capability means the hardware will also support SAMPLED_TEXTURE_BINDING_ARRAY.
 *
 * Supported platforms:
 * - DX12
 * - Metal (with MSL 2.0+ on macOS 10.13+)
 * - Vulkan's shaderSampledImageArrayDynamicIndexing feature
 *
 * This is a native only feature.
 */
#define WGPUFeatures_SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING (uint64_t)262144
/**
 * Allows shaders to index sampled texture arrays with dynamically non-uniform values:
 *
 * eg. `texture_array[vertex_data]`
 *
 * In order to use this capability, the corresponding GLSL extension must be enabled like so:
 *
 * `#extension GL_EXT_nonuniform_qualifier : require`
 *
 * and then used either as `nonuniformEXT` qualifier in variable declaration:
 *
 * eg. `layout(location = 0) nonuniformEXT flat in int vertex_data;`
 *
 * or as `nonuniformEXT` constructor:
 *
 * eg. `texture_array[nonuniformEXT(vertex_data)]`
 *
 * HLSL does not need any extension.
 *
 * This capability means the hardware will also support SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING
 * and SAMPLED_TEXTURE_BINDING_ARRAY.
 *
 * Supported platforms:
 * - DX12
 * - Metal (with MSL 2.0+ on macOS 10.13+)
 * - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s shaderSampledImageArrayNonUniformIndexing feature)
 *
 * This is a native only feature.
 */
#define WGPUFeatures_SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING (uint64_t)524288
/**
 * Allows the user to create unsized uniform arrays of bindings:
 *
 * eg. `uniform texture2D textures[]`.
 *
 * If this capability is supported, SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING is very likely
 * to also be supported
 *
 * Supported platforms:
 * - DX12
 * - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s runtimeDescriptorArray feature
 *
 * This is a native only feature.
 */
#define WGPUFeatures_UNSIZED_BINDING_ARRAY (uint64_t)1048576
/**
 * Allows the user to call [`RenderPass::multi_draw_indirect`] and [`RenderPass::multi_draw_indexed_indirect`].
 *
 * Allows multiple indirect calls to be dispatched from a single buffer.
 *
 * Supported platforms:
 * - DX12
 * - Metal
 * - Vulkan
 *
 * This is a native only feature.
 */
#define WGPUFeatures_MULTI_DRAW_INDIRECT (uint64_t)2097152
/**
 * Allows the user to call [`RenderPass::multi_draw_indirect_count`] and [`RenderPass::multi_draw_indexed_indirect_count`].
 *
 * This allows the use of a buffer containing the actual number of draw calls.
 *
 * Supported platforms:
 * - DX12
 * - Vulkan 1.2+ (or VK_KHR_draw_indirect_count)
 *
 * This is a native only feature.
 */
#define WGPUFeatures_MULTI_DRAW_INDIRECT_COUNT (uint64_t)4194304
/**
 * Allows the use of push constants: small, fast bits of memory that can be updated
 * inside a [`RenderPass`].
 *
 * Allows the user to call [`RenderPass::set_push_constants`], provide a non-empty array
 * to [`PipelineLayoutDescriptor`], and provide a non-zero limit to [`Limits::max_push_constant_size`].
 *
 * A block of push constants can be declared with `layout(push_constant) uniform Name {..}` in shaders.
 *
 * Supported platforms:
 * - DX12
 * - Vulkan
 * - Metal
 * - DX11 (emulated with uniforms)
 * - OpenGL (emulated with uniforms)
 *
 * This is a native only feature.
 */
#define WGPUFeatures_PUSH_CONSTANTS (uint64_t)8388608
/**
 * Allows the use of [`AddressMode::ClampToBorder`].
 *
 * Supported platforms:
 * - DX12
 * - Vulkan
 * - Metal (macOS 10.12+ only)
 * - DX11
 * - OpenGL
 *
 * This is a web and native feature.
 */
#define WGPUFeatures_ADDRESS_MODE_CLAMP_TO_BORDER (uint64_t)16777216
/**
 * Allows the user to set a non-fill polygon mode in [`RasterizationStateDescriptor::polygon_mode`]
 *
 * This allows drawing polygons/triangles as lines (wireframe) or points instead of filled
 *
 * Supported platforms:
 * - DX12
 * - Vulkan
 *
 * This is a native only feature.
 */
#define WGPUFeatures_NON_FILL_POLYGON_MODE (uint64_t)33554432
/**
 * Features which are part of the upstream WebGPU standard.
 */
#define WGPUFeatures_ALL_WEBGPU (uint64_t)65535
/**
 * Features that are only available when targeting native (not web).
 */
#define WGPUFeatures_ALL_NATIVE (uint64_t)18446744073709486080ULL

/**
 * Represents the sets of limits an adapter/device supports.
 *
 * Limits "better" than the default must be supported by the adapter and requested when requesting
 * a device. If limits "better" than the adapter supports are requested, requesting a device will panic.
 * Once a device is requested, you may only use resources up to the limits requested _even_ if the
 * adapter supports "better" limits.
 *
 * Requesting limits that are "better" than you need may cause performance to decrease because the
 * implementation needs to support more than is needed. You should ideally only request exactly what
 * you need.
 *
 * See also: https://gpuweb.github.io/gpuweb/#dictdef-gpulimits
 */
struct WGPULimits {
  /**
   * Amount of bind groups that can be attached to a pipeline at the same time. Defaults to 4. Higher is "better".
   */
  uint32_t max_bind_groups;
  /**
   * Amount of uniform buffer bindings that can be dynamic in a single pipeline. Defaults to 8. Higher is "better".
   */
  uint32_t max_dynamic_uniform_buffers_per_pipeline_layout;
  /**
   * Amount of storage buffer bindings that can be dynamic in a single pipeline. Defaults to 4. Higher is "better".
   */
  uint32_t max_dynamic_storage_buffers_per_pipeline_layout;
  /**
   * Amount of sampled textures visible in a single shader stage. Defaults to 16. Higher is "better".
   */
  uint32_t max_sampled_textures_per_shader_stage;
  /**
   * Amount of samplers visible in a single shader stage. Defaults to 16. Higher is "better".
   */
  uint32_t max_samplers_per_shader_stage;
  /**
   * Amount of storage buffers visible in a single shader stage. Defaults to 4. Higher is "better".
   */
  uint32_t max_storage_buffers_per_shader_stage;
  /**
   * Amount of storage textures visible in a single shader stage. Defaults to 4. Higher is "better".
   */
  uint32_t max_storage_textures_per_shader_stage;
  /**
   * Amount of uniform buffers visible in a single shader stage. Defaults to 12. Higher is "better".
   */
  uint32_t max_uniform_buffers_per_shader_stage;
  /**
   * Maximum size in bytes of a binding to a uniform buffer. Defaults to 16384. Higher is "better".
   */
  uint32_t max_uniform_buffer_binding_size;
  /**
   * Amount of storage available for push constants in bytes. Defaults to 0. Higher is "better".
   * Requesting more than 0 during device creation requires [`Features::PUSH_CONSTANTS`] to be enabled.
   *
   * Expect the size to be:
   * - Vulkan: 128-256 bytes
   * - DX12: 256 bytes
   * - Metal: 4096 bytes
   * - DX11 & OpenGL don't natively support push constants, and are emulated with uniforms,
   *   so this number is less useful.
   */
  uint32_t max_push_constant_size;
};

/**
 * Describes a [`Device`].
 */
struct WGPUDeviceDescriptor {
  /**
   * Features that the device should support. If any feature is not supported by
   * the adapter, creating a device will panic.
   */
  WGPUFeatures features;
  /**
   * Limits that the device should support. If any limit is "better" than the limit exposed by
   * the adapter, creating a device will panic.
   */
  struct WGPULimits limits;
  /**
   * Switch shader validation on/off. This is a temporary field
   * that will be removed once our validation logic is complete.
   */
  bool shader_validation;
};

typedef void (*WGPUBufferMapCallback)(enum WGPUBufferMapAsyncStatus status, uint8_t *userdata);

struct WGPUBufferMapOperation {
  enum WGPUHostMap host;
  WGPUBufferMapCallback callback;
  uint8_t *user_data;
};

/**
 * Describes a [`CommandBuffer`].
 */
struct WGPUCommandBufferDescriptor {
  WGPURawString label;
};

/**
 * Origin of a copy to/from a texture.
 */
struct WGPUOrigin3d {
  uint32_t x;
  uint32_t y;
  uint32_t z;
};
#define WGPUOrigin3d_ZERO (WGPUOrigin3d){ .x = 0, .y = 0, .z = 0 }

/**
 * View of a texture which can be used to copy to/from a buffer/texture.
 */
struct WGPUTextureCopyView_TextureId {
  /**
   * The texture to be copied to/from.
   */
  WGPUTextureId texture;
  /**
   * The target mip level of the texture.
   */
  uint32_t mip_level;
  /**
   * The base texel of the texture in the selected `mip_level`.
   */
  struct WGPUOrigin3d origin;
};

typedef struct WGPUTextureCopyView_TextureId WGPUTextureCopyView;

/**
 * Layout of a texture in a buffer's memory.
 */
struct WGPUTextureDataLayout {
  /**
   * Offset into the buffer that is the start of the texture. Must be a multiple of texture block size.
   * For non-compressed textures, this is 1.
   */
  WGPUBufferAddress offset;
  /**
   * Bytes per "row" of the image. This represents one row of pixels in the x direction. Compressed
   * textures include multiple rows of pixels in each "row". May be 0 for 1D texture copies.
   *
   * Must be a multiple of 256 for [`CommandEncoder::copy_buffer_to_texture`] and [`CommandEncoder::copy_texture_to_buffer`].
   * [`Queue::write_texture`] does not have this requirement.
   *
   * Must be a multiple of the texture block size. For non-compressed textures, this is 1.
   */
  uint32_t bytes_per_row;
  /**
   * Rows that make up a single "image". Each "image" is one layer in the z direction of a 3D image. May be larger
   * than `copy_size.y`.
   *
   * May be 0 for 2D texture copies.
   */
  uint32_t rows_per_image;
};

/**
 * View of a buffer which can be used to copy to/from a texture.
 */
struct WGPUBufferCopyView_BufferId {
  /**
   * The buffer to be copied to/from.
   */
  WGPUBufferId buffer;
  /**
   * The layout of the texture data in this buffer.
   */
  struct WGPUTextureDataLayout layout;
};

typedef struct WGPUBufferCopyView_BufferId WGPUBufferCopyView;

typedef WGPUDeviceId WGPUQueueId;

/**
 * Describes the shader stages that a binding will be visible from.
 *
 * These can be combined so something that is visible from both vertex and fragment shaders can be defined as:
 *
 * `ShaderStage::VERTEX | ShaderStage::FRAGMENT`
 */
typedef uint32_t WGPUShaderStage;
/**
 * Binding is not visible from any shader stage.
 */
#define WGPUShaderStage_NONE (uint32_t)0
/**
 * Binding is visible from the vertex shader of a render pipeline.
 */
#define WGPUShaderStage_VERTEX (uint32_t)1
/**
 * Binding is visible from the fragment shader of a render pipeline.
 */
#define WGPUShaderStage_FRAGMENT (uint32_t)2
/**
 * Binding is visible from the compute shader of a compute pipeline.
 */
#define WGPUShaderStage_COMPUTE (uint32_t)4

typedef uint32_t WGPURawEnumOption_TextureViewDimension;

typedef uint32_t WGPURawEnumOption_TextureComponentType;

typedef uint32_t WGPURawEnumOption_TextureFormat;

struct WGPUBindGroupLayoutEntry {
  uint32_t binding;
  WGPUShaderStage visibility;
  enum WGPURawBindingType ty;
  bool has_dynamic_offset;
  WGPUOption_BufferSize min_binding_size;
  WGPURawEnumOption_TextureViewDimension view_dimension;
  WGPURawEnumOption_TextureComponentType texture_component_type;
  bool multisampled;
  WGPURawEnumOption_TextureFormat storage_texture_format;
};

struct WGPUBindGroupLayoutDescriptor {
  WGPURawString label;
  const struct WGPUBindGroupLayoutEntry *entries;
  uintptr_t entries_length;
};

struct WGPUBindGroupEntry {
  uint32_t binding;
  WGPUOption_BufferId buffer;
  WGPUBufferAddress offset;
  WGPUOption_BufferSize size;
  WGPUOption_SamplerId sampler;
  WGPUOption_TextureViewId texture_view;
};

struct WGPUBindGroupDescriptor {
  WGPURawString label;
  WGPUBindGroupLayoutId layout;
  const struct WGPUBindGroupEntry *entries;
  uintptr_t entries_length;
};

/**
 * Integral type used for dynamic bind group offsets.
 */
typedef uint32_t WGPUDynamicOffset;































/**
 * Bound uniform/storage buffer offsets must be aligned to this number.
 */
#define WGPUBIND_BUFFER_ALIGNMENT 256

/**
 * Buffer to buffer copy offsets and sizes must be aligned to this number.
 */
#define WGPUCOPY_BUFFER_ALIGNMENT 4

/**
 * Vertex buffer strides have to be aligned to this number.
 */
#define WGPUVERTEX_STRIDE_ALIGNMENT 4

WGPU_INLINE
struct WGPUInfrastructure wgpu_client_new(void)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe because improper use may lead to memory
 * problems. For example, a double-free may occur if the function is called
 * twice on the same raw pointer.
 */
WGPU_INLINE
void wgpu_client_delete(struct WGPUClient *aClient)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `id_length` elements.
 */
WGPU_INLINE
uintptr_t wgpu_client_make_adapter_ids(const struct WGPUClient *aClient,
                                       WGPUAdapterId *aIds,
                                       uintptr_t aIdLength)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_adapter_id(const struct WGPUClient *aClient,
                                 WGPUAdapterId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUDeviceId wgpu_client_make_device_id(const struct WGPUClient *aClient,
                                        WGPUAdapterId aAdapterId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_device_id(const struct WGPUClient *aClient,
                                WGPUDeviceId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUBufferId wgpu_client_make_buffer_id(const struct WGPUClient *aClient,
                                        WGPUDeviceId aDeviceId)
WGPU_FUNC;

WGPU_INLINE
WGPUBufferId wgpu_client_create_buffer(const struct WGPUClient *aClient,
                                       WGPUDeviceId aDeviceId,
                                       const struct WGPUBufferDescriptor *aDesc,
                                       WGPUByteBuf *aBb)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_buffer_id(const struct WGPUClient *aClient,
                                WGPUBufferId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUTextureId wgpu_client_create_texture(const struct WGPUClient *aClient,
                                         WGPUDeviceId aDeviceId,
                                         const struct WGPUTextureDescriptor *aDesc,
                                         WGPUByteBuf *aBb)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_texture_id(const struct WGPUClient *aClient,
                                 WGPUTextureId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUTextureViewId wgpu_client_create_texture_view(const struct WGPUClient *aClient,
                                                  WGPUDeviceId aDeviceId,
                                                  const struct WGPUTextureViewDescriptor *aDesc,
                                                  WGPUByteBuf *aBb)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_texture_view_id(const struct WGPUClient *aClient,
                                      WGPUTextureViewId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUSamplerId wgpu_client_create_sampler(const struct WGPUClient *aClient,
                                         WGPUDeviceId aDeviceId,
                                         const struct WGPUSamplerDescriptor *aDesc,
                                         WGPUByteBuf *aBb)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_sampler_id(const struct WGPUClient *aClient,
                                 WGPUSamplerId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUCommandEncoderId wgpu_client_create_command_encoder(const struct WGPUClient *aClient,
                                                        WGPUDeviceId aDeviceId,
                                                        const struct WGPUCommandEncoderDescriptor *aDesc,
                                                        WGPUByteBuf *aBb)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_encoder_id(const struct WGPUClient *aClient,
                                 WGPUCommandEncoderId aId)
WGPU_FUNC;

WGPU_INLINE
struct WGPUComputePass *wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId aEncoderId,
                                                                const struct WGPUComputePassDescriptor *aDesc)
WGPU_FUNC;

WGPU_INLINE
void wgpu_compute_pass_finish(const struct WGPUComputePass *aPass,
                              WGPUByteBuf *aOutput)
WGPU_FUNC;

WGPU_INLINE
void wgpu_compute_pass_destroy(struct WGPUComputePass *aPass)
WGPU_FUNC;

WGPU_INLINE
struct WGPURenderPass *wgpu_command_encoder_begin_render_pass(WGPUCommandEncoderId aEncoderId,
                                                              const struct WGPURenderPassDescriptor *aDesc)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_finish(const struct WGPURenderPass *aPass,
                             WGPUByteBuf *aOutput)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_destroy(struct WGPURenderPass *aPass)
WGPU_FUNC;

WGPU_INLINE
WGPUBindGroupLayoutId wgpu_client_make_bind_group_layout_id(const struct WGPUClient *aClient,
                                                            WGPUDeviceId aDeviceId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_bind_group_layout_id(const struct WGPUClient *aClient,
                                           WGPUBindGroupLayoutId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUPipelineLayoutId wgpu_client_make_pipeline_layout_id(const struct WGPUClient *aClient,
                                                         WGPUDeviceId aDeviceId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_pipeline_layout_id(const struct WGPUClient *aClient,
                                         WGPUPipelineLayoutId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUBindGroupId wgpu_client_make_bind_group_id(const struct WGPUClient *aClient,
                                               WGPUDeviceId aDeviceId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_bind_group_id(const struct WGPUClient *aClient,
                                    WGPUBindGroupId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUShaderModuleId wgpu_client_create_shader_module(const struct WGPUClient *aClient,
                                                    WGPUDeviceId aDeviceId,
                                                    const struct WGPUShaderModuleDescriptor *aDesc,
                                                    WGPUByteBuf *aBb)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_shader_module_id(const struct WGPUClient *aClient,
                                       WGPUShaderModuleId aId)
WGPU_FUNC;

WGPU_INLINE
WGPUComputePipelineId wgpu_client_create_compute_pipeline(const struct WGPUClient *aClient,
                                                          WGPUDeviceId aDeviceId,
                                                          const struct WGPUComputePipelineDescriptor *aDesc,
                                                          WGPUByteBuf *aBb)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_compute_pipeline_id(const struct WGPUClient *aClient,
                                          WGPUComputePipelineId aId)
WGPU_FUNC;

WGPU_INLINE
WGPURenderPipelineId wgpu_client_create_render_pipeline(const struct WGPUClient *aClient,
                                                        WGPUDeviceId aDeviceId,
                                                        const struct WGPURenderPipelineDescriptor *aDesc,
                                                        WGPUByteBuf *aBb)
WGPU_FUNC;

WGPU_INLINE
void wgpu_client_kill_render_pipeline_id(const struct WGPUClient *aClient,
                                         WGPURenderPipelineId aId)
WGPU_FUNC;

WGPU_INLINE
struct WGPUGlobal *wgpu_server_new(struct WGPUIdentityRecyclerFactory aFactory)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe because improper use may lead to memory
 * problems. For example, a double-free may occur if the function is called
 * twice on the same raw pointer.
 */
WGPU_INLINE
void wgpu_server_delete(struct WGPUGlobal *aGlobal)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_poll_all_devices(const struct WGPUGlobal *aGlobal,
                                  bool aForceWait)
WGPU_FUNC;

/**
 * Request an adapter according to the specified options.
 * Provide the list of IDs to pick from.
 *
 * Returns the index in this list, or -1 if unable to pick.
 *
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `id_length` elements.
 */
WGPU_INLINE
int8_t wgpu_server_instance_request_adapter(const struct WGPUGlobal *aGlobal,
                                            const WGPURequestAdapterOptions *aDesc,
                                            const WGPUAdapterId *aIds,
                                            uintptr_t aIdLength)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_adapter_request_device(const struct WGPUGlobal *aGlobal,
                                        WGPUAdapterId aSelfId,
                                        const struct WGPUDeviceDescriptor *aDesc,
                                        WGPUDeviceId aNewId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_adapter_drop(const struct WGPUGlobal *aGlobal,
                              WGPUAdapterId aAdapterId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_device_drop(const struct WGPUGlobal *aGlobal,
                             WGPUDeviceId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_device_create_buffer(const struct WGPUGlobal *aGlobal,
                                      WGPUDeviceId aSelfId,
                                      const struct WGPUBufferDescriptor *aDesc,
                                      WGPUBufferId aNewId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_buffer_map(const struct WGPUGlobal *aGlobal,
                            WGPUBufferId aBufferId,
                            WGPUBufferAddress aStart,
                            WGPUBufferAddress aSize,
                            struct WGPUBufferMapOperation aOperation)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `size` elements.
 */
WGPU_INLINE
uint8_t *wgpu_server_buffer_get_mapped_range(const struct WGPUGlobal *aGlobal,
                                             WGPUBufferId aBufferId,
                                             WGPUBufferAddress aStart,
                                             WGPUOption_BufferSize aSize)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_buffer_unmap(const struct WGPUGlobal *aGlobal,
                              WGPUBufferId aBufferId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_buffer_drop(const struct WGPUGlobal *aGlobal,
                             WGPUBufferId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_device_create_encoder(const struct WGPUGlobal *aGlobal,
                                       WGPUDeviceId aSelfId,
                                       const struct WGPUCommandEncoderDescriptor *aDesc,
                                       WGPUCommandEncoderId aNewId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_encoder_finish(const struct WGPUGlobal *aGlobal,
                                WGPUCommandEncoderId aSelfId,
                                const struct WGPUCommandBufferDescriptor *aDesc)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_encoder_drop(const struct WGPUGlobal *aGlobal,
                              WGPUCommandEncoderId aSelfId)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `byte_length` elements.
 */
WGPU_INLINE
void wgpu_server_command_buffer_drop(const struct WGPUGlobal *aGlobal,
                                     WGPUCommandBufferId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_encoder_copy_buffer_to_buffer(const struct WGPUGlobal *aGlobal,
                                               WGPUCommandEncoderId aSelfId,
                                               WGPUBufferId aSourceId,
                                               WGPUBufferAddress aSourceOffset,
                                               WGPUBufferId aDestinationId,
                                               WGPUBufferAddress aDestinationOffset,
                                               WGPUBufferAddress aSize)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_encoder_copy_texture_to_buffer(const struct WGPUGlobal *aGlobal,
                                                WGPUCommandEncoderId aSelfId,
                                                const WGPUTextureCopyView *aSource,
                                                const WGPUBufferCopyView *aDestination,
                                                const struct WGPUExtent3d *aSize)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_encoder_copy_buffer_to_texture(const struct WGPUGlobal *aGlobal,
                                                WGPUCommandEncoderId aSelfId,
                                                const WGPUBufferCopyView *aSource,
                                                const WGPUTextureCopyView *aDestination,
                                                const struct WGPUExtent3d *aSize)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_encoder_copy_texture_to_texture(const struct WGPUGlobal *aGlobal,
                                                 WGPUCommandEncoderId aSelfId,
                                                 const WGPUTextureCopyView *aSource,
                                                 const WGPUTextureCopyView *aDestination,
                                                 const struct WGPUExtent3d *aSize)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointers are
 * valid for `color_attachments_length` and `command_length` elements,
 * respectively.
 */
WGPU_INLINE
void wgpu_server_encode_compute_pass(const struct WGPUGlobal *aGlobal,
                                     WGPUCommandEncoderId aSelfId,
                                     const WGPUByteBuf *aByteBuf)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointers are
 * valid for `color_attachments_length` and `command_length` elements,
 * respectively.
 */
WGPU_INLINE
void wgpu_server_encode_render_pass(const struct WGPUGlobal *aGlobal,
                                    WGPUCommandEncoderId aSelfId,
                                    const struct WGPURenderPass *aPass)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `command_buffer_id_length` elements.
 */
WGPU_INLINE
void wgpu_server_queue_submit(const struct WGPUGlobal *aGlobal,
                              WGPUQueueId aSelfId,
                              const WGPUCommandBufferId *aCommandBufferIds,
                              uintptr_t aCommandBufferIdLength)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `data_length` elements.
 */
WGPU_INLINE
void wgpu_server_queue_write_buffer(const struct WGPUGlobal *aGlobal,
                                    WGPUQueueId aSelfId,
                                    WGPUBufferId aBufferId,
                                    WGPUBufferAddress aBufferOffset,
                                    const uint8_t *aData,
                                    uintptr_t aDataLength)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `data_length` elements.
 */
WGPU_INLINE
void wgpu_server_queue_write_texture(const struct WGPUGlobal *aGlobal,
                                     WGPUQueueId aSelfId,
                                     const WGPUTextureCopyView *aDestination,
                                     const uint8_t *aData,
                                     uintptr_t aDataLength,
                                     const struct WGPUTextureDataLayout *aLayout,
                                     const struct WGPUExtent3d *aExtent)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `entries_length` elements.
 */
WGPU_INLINE
void wgpu_server_device_create_bind_group_layout(const struct WGPUGlobal *aGlobal,
                                                 WGPUDeviceId aSelfId,
                                                 const struct WGPUBindGroupLayoutDescriptor *aDesc,
                                                 WGPUBindGroupLayoutId aNewId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_bind_group_layout_drop(const struct WGPUGlobal *aGlobal,
                                        WGPUBindGroupLayoutId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_device_create_pipeline_layout(const struct WGPUGlobal *aGlobal,
                                               WGPUDeviceId aSelfId,
                                               const struct WGPUPipelineLayoutDescriptor *aDesc,
                                               WGPUPipelineLayoutId aNewId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_pipeline_layout_drop(const struct WGPUGlobal *aGlobal,
                                      WGPUPipelineLayoutId aSelfId)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `entries_length` elements.
 */
WGPU_INLINE
void wgpu_server_device_create_bind_group(const struct WGPUGlobal *aGlobal,
                                          WGPUDeviceId aSelfId,
                                          const struct WGPUBindGroupDescriptor *aDesc,
                                          WGPUBindGroupId aNewId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_bind_group_drop(const struct WGPUGlobal *aGlobal,
                                 WGPUBindGroupId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_shader_module_drop(const struct WGPUGlobal *aGlobal,
                                    WGPUShaderModuleId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_compute_pipeline_drop(const struct WGPUGlobal *aGlobal,
                                       WGPUComputePipelineId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_render_pipeline_drop(const struct WGPUGlobal *aGlobal,
                                      WGPURenderPipelineId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_device_create_texture(const struct WGPUGlobal *aGlobal,
                                       WGPUDeviceId aSelfId,
                                       const struct WGPUTextureDescriptor *aDesc,
                                       WGPUTextureId aNewId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_texture_create_view(const struct WGPUGlobal *aGlobal,
                                     WGPUTextureId aSelfId,
                                     const struct WGPUTextureViewDescriptor *aDesc,
                                     WGPUTextureViewId aNewId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_texture_drop(const struct WGPUGlobal *aGlobal,
                              WGPUTextureId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_texture_view_drop(const struct WGPUGlobal *aGlobal,
                                   WGPUTextureViewId aSelfId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_device_create_sampler(const struct WGPUGlobal *aGlobal,
                                       WGPUDeviceId aSelfId,
                                       const struct WGPUSamplerDescriptor *aDesc,
                                       WGPUSamplerId aNewId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_server_sampler_drop(const struct WGPUGlobal *aGlobal,
                              WGPUSamplerId aSelfId)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `offset_length` elements.
 */
WGPU_INLINE
void wgpu_render_bundle_set_bind_group(struct WGPURenderBundleEncoder *aBundle,
                                       uint32_t aIndex,
                                       WGPUBindGroupId aBindGroupId,
                                       const WGPUDynamicOffset *aOffsets,
                                       uintptr_t aOffsetLength)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_set_pipeline(struct WGPURenderBundleEncoder *aBundle,
                                     WGPURenderPipelineId aPipelineId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_set_index_buffer(struct WGPURenderBundleEncoder *aBundle,
                                         WGPUBufferId aBufferId,
                                         WGPUBufferAddress aOffset,
                                         WGPUOption_BufferSize aSize)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_set_vertex_buffer(struct WGPURenderBundleEncoder *aBundle,
                                          uint32_t aSlot,
                                          WGPUBufferId aBufferId,
                                          WGPUBufferAddress aOffset,
                                          WGPUOption_BufferSize aSize)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_set_push_constants(struct WGPURenderBundleEncoder *aPass,
                                           WGPUShaderStage aStages,
                                           uint32_t aOffset,
                                           uint32_t aSizeBytes,
                                           const uint8_t *aData)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_draw(struct WGPURenderBundleEncoder *aBundle,
                             uint32_t aVertexCount,
                             uint32_t aInstanceCount,
                             uint32_t aFirstVertex,
                             uint32_t aFirstInstance)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_draw_indexed(struct WGPURenderBundleEncoder *aBundle,
                                     uint32_t aIndexCount,
                                     uint32_t aInstanceCount,
                                     uint32_t aFirstIndex,
                                     int32_t aBaseVertex,
                                     uint32_t aFirstInstance)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_draw_indirect(struct WGPURenderBundleEncoder *aBundle,
                                      WGPUBufferId aBufferId,
                                      WGPUBufferAddress aOffset)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_bundle_indexed_indirect(struct WGPURenderBundleEncoder *aBundle,
                                              WGPUBufferId aBufferId,
                                              WGPUBufferAddress aOffset)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_push_debug_group(struct WGPURenderBundleEncoder *aBundle,
                                         WGPURawString aLabel)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_pop_debug_group(struct WGPURenderBundleEncoder *aBundle)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_bundle_insert_debug_marker(struct WGPURenderBundleEncoder *aBundle,
                                            WGPURawString aLabel)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `offset_length` elements.
 */
WGPU_INLINE
void wgpu_compute_pass_set_bind_group(struct WGPUComputePass *aPass,
                                      uint32_t aIndex,
                                      WGPUBindGroupId aBindGroupId,
                                      const WGPUDynamicOffset *aOffsets,
                                      uintptr_t aOffsetLength)
WGPU_FUNC;

WGPU_INLINE
void wgpu_compute_pass_set_pipeline(struct WGPUComputePass *aPass,
                                    WGPUComputePipelineId aPipelineId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_compute_pass_set_push_constant(struct WGPUComputePass *aPass,
                                         uint32_t aOffset,
                                         uint32_t aSizeBytes,
                                         const uint8_t *aData)
WGPU_FUNC;

WGPU_INLINE
void wgpu_compute_pass_dispatch(struct WGPUComputePass *aPass,
                                uint32_t aGroupsX,
                                uint32_t aGroupsY,
                                uint32_t aGroupsZ)
WGPU_FUNC;

WGPU_INLINE
void wgpu_compute_pass_dispatch_indirect(struct WGPUComputePass *aPass,
                                         WGPUBufferId aBufferId,
                                         WGPUBufferAddress aOffset)
WGPU_FUNC;

WGPU_INLINE
void wgpu_compute_pass_push_debug_group(struct WGPUComputePass *aPass,
                                        WGPURawString aLabel,
                                        uint32_t aColor)
WGPU_FUNC;

WGPU_INLINE
void wgpu_compute_pass_pop_debug_group(struct WGPUComputePass *aPass)
WGPU_FUNC;

WGPU_INLINE
void wgpu_compute_pass_insert_debug_marker(struct WGPUComputePass *aPass,
                                           WGPURawString aLabel,
                                           uint32_t aColor)
WGPU_FUNC;

/**
 * # Safety
 *
 * This function is unsafe as there is no guarantee that the given pointer is
 * valid for `offset_length` elements.
 */
WGPU_INLINE
void wgpu_render_pass_set_bind_group(struct WGPURenderPass *aPass,
                                     uint32_t aIndex,
                                     WGPUBindGroupId aBindGroupId,
                                     const WGPUDynamicOffset *aOffsets,
                                     uintptr_t aOffsetLength)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_set_pipeline(struct WGPURenderPass *aPass,
                                   WGPURenderPipelineId aPipelineId)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_set_index_buffer(struct WGPURenderPass *aPass,
                                       WGPUBufferId aBufferId,
                                       WGPUBufferAddress aOffset,
                                       WGPUOption_BufferSize aSize)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_set_vertex_buffer(struct WGPURenderPass *aPass,
                                        uint32_t aSlot,
                                        WGPUBufferId aBufferId,
                                        WGPUBufferAddress aOffset,
                                        WGPUOption_BufferSize aSize)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_set_blend_color(struct WGPURenderPass *aPass,
                                      const struct WGPUColor *aColor)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_set_stencil_reference(struct WGPURenderPass *aPass,
                                            uint32_t aValue)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_set_viewport(struct WGPURenderPass *aPass,
                                   float aX,
                                   float aY,
                                   float aW,
                                   float aH,
                                   float aDepthMin,
                                   float aDepthMax)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_set_scissor_rect(struct WGPURenderPass *aPass,
                                       uint32_t aX,
                                       uint32_t aY,
                                       uint32_t aW,
                                       uint32_t aH)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_set_push_constants(struct WGPURenderPass *aPass,
                                         WGPUShaderStage aStages,
                                         uint32_t aOffset,
                                         uint32_t aSizeBytes,
                                         const uint8_t *aData)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_draw(struct WGPURenderPass *aPass,
                           uint32_t aVertexCount,
                           uint32_t aInstanceCount,
                           uint32_t aFirstVertex,
                           uint32_t aFirstInstance)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_draw_indexed(struct WGPURenderPass *aPass,
                                   uint32_t aIndexCount,
                                   uint32_t aInstanceCount,
                                   uint32_t aFirstIndex,
                                   int32_t aBaseVertex,
                                   uint32_t aFirstInstance)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_draw_indirect(struct WGPURenderPass *aPass,
                                    WGPUBufferId aBufferId,
                                    WGPUBufferAddress aOffset)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_draw_indexed_indirect(struct WGPURenderPass *aPass,
                                            WGPUBufferId aBufferId,
                                            WGPUBufferAddress aOffset)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_multi_draw_indirect(struct WGPURenderPass *aPass,
                                          WGPUBufferId aBufferId,
                                          WGPUBufferAddress aOffset,
                                          uint32_t aCount)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_multi_draw_indexed_indirect(struct WGPURenderPass *aPass,
                                                  WGPUBufferId aBufferId,
                                                  WGPUBufferAddress aOffset,
                                                  uint32_t aCount)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_multi_draw_indirect_count(struct WGPURenderPass *aPass,
                                                WGPUBufferId aBufferId,
                                                WGPUBufferAddress aOffset,
                                                WGPUBufferId aCountBufferId,
                                                WGPUBufferAddress aCountBufferOffset,
                                                uint32_t aMaxCount)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_multi_draw_indexed_indirect_count(struct WGPURenderPass *aPass,
                                                        WGPUBufferId aBufferId,
                                                        WGPUBufferAddress aOffset,
                                                        WGPUBufferId aCountBufferId,
                                                        WGPUBufferAddress aCountBufferOffset,
                                                        uint32_t aMaxCount)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_push_debug_group(struct WGPURenderPass *aPass,
                                       WGPURawString aLabel,
                                       uint32_t aColor)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_pop_debug_group(struct WGPURenderPass *aPass)
WGPU_FUNC;

WGPU_INLINE
void wgpu_render_pass_insert_debug_marker(struct WGPURenderPass *aPass,
                                          WGPURawString aLabel,
                                          uint32_t aColor)
WGPU_FUNC;