summaryrefslogtreecommitdiffstats
path: root/third_party/rust/wasm-smith
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/wasm-smith
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/wasm-smith')
-rw-r--r--third_party/rust/wasm-smith/.cargo-checksum.json1
-rw-r--r--third_party/rust/wasm-smith/Cargo.toml72
-rw-r--r--third_party/rust/wasm-smith/LICENSE220
-rw-r--r--third_party/rust/wasm-smith/README.md101
-rw-r--r--third_party/rust/wasm-smith/benches/corpus.rs29
-rw-r--r--third_party/rust/wasm-smith/src/component.rs2232
-rw-r--r--third_party/rust/wasm-smith/src/component/encode.rs319
-rw-r--r--third_party/rust/wasm-smith/src/config.rs837
-rw-r--r--third_party/rust/wasm-smith/src/core.rs1741
-rw-r--r--third_party/rust/wasm-smith/src/core/code_builder.rs5355
-rw-r--r--third_party/rust/wasm-smith/src/core/code_builder/no_traps.rs644
-rw-r--r--third_party/rust/wasm-smith/src/core/encode.rs262
-rw-r--r--third_party/rust/wasm-smith/src/core/terminate.rs70
-rw-r--r--third_party/rust/wasm-smith/src/lib.rs193
-rw-r--r--third_party/rust/wasm-smith/tests/component.rs42
-rw-r--r--third_party/rust/wasm-smith/tests/core.rs318
16 files changed, 12436 insertions, 0 deletions
diff --git a/third_party/rust/wasm-smith/.cargo-checksum.json b/third_party/rust/wasm-smith/.cargo-checksum.json
new file mode 100644
index 0000000000..d0d11b7881
--- /dev/null
+++ b/third_party/rust/wasm-smith/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"3fab85439f91b5b0dfc20abd90cabf4a9f29ab1c422ab1308851c344302d32b3","LICENSE":"268872b9816f90fd8e85db5a28d33f8150ebb8dd016653fb39ef1f94f2686bc5","README.md":"9202d01e78acf04e38e23e162a91c20ece8968f6172c87bfa6f18bf0b3f27d74","benches/corpus.rs":"2df29556be0799f0cb1f32c8d0ae5ba0c4b9815cf4d59a8b71744d926c0693a0","src/component.rs":"32f93aac210f70fbc3f870c400993f4e7ff780ca6e111453a4dc4545906887e8","src/component/encode.rs":"09eddb96b5b607a87673714208c6d43b247ec4870168661d27d2c3ce92c43afd","src/config.rs":"3d33dea22d53081504e13fea4b5c99898624f26ed03013a831bae0c9759823fc","src/core.rs":"40949add77c2b90e9f3fc0ce6862a7c83ac03d5dd9babde5abee536d6fe074e6","src/core/code_builder.rs":"60e407a758ff58aafdcf4eb4e9141aca2ab7c7fe09d6644bc6e31043f88d0d69","src/core/code_builder/no_traps.rs":"e595dbde06551f5f8b23e03cfac0634beacab08e6243c66d6ffda95079212b24","src/core/encode.rs":"b4cc82895e3c3afe26e2e62bbdd63c44e7c1f091133e49f0eaf7e7ec22400e40","src/core/terminate.rs":"d24af5206a13aee7d6a6ea900ccdf088c09d053c36026cf1607cc38c972b3ba9","src/lib.rs":"77ed926d64d325a73613f1c307db39c65c428a1eafae114033c73a60da586fd3","tests/component.rs":"54c69ebdda583207f9f0467a600ee0ca87fbee6b365e97ec3deff7b46bd6af06","tests/core.rs":"da9e27e7057a7cd9435666d4bd4d57cec03b0d5d74bad289373db7beb8a2e1a6"},"package":"549cb78be46f43ad6746402871336cb6a989127fb847e93eb6ba0817647485a6"} \ No newline at end of file
diff --git a/third_party/rust/wasm-smith/Cargo.toml b/third_party/rust/wasm-smith/Cargo.toml
new file mode 100644
index 0000000000..06606c0147
--- /dev/null
+++ b/third_party/rust/wasm-smith/Cargo.toml
@@ -0,0 +1,72 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+name = "wasm-smith"
+version = "0.12.5"
+authors = ["Nick Fitzgerald <fitzgen@gmail.com>"]
+exclude = ["/benches/corpus"]
+description = "A WebAssembly test case generator"
+documentation = "https://docs.rs/wasm-smith"
+readme = "./README.md"
+categories = [
+ "command-line-utilities",
+ "development-tools",
+ "development-tools::testing",
+ "wasm",
+]
+license = "Apache-2.0 WITH LLVM-exception"
+repository = "https://github.com/bytecodealliance/wasm-tools/tree/main/crates/wasm-smith"
+
+[[bench]]
+name = "corpus"
+harness = false
+
+[dependencies.arbitrary]
+version = "1.1.0"
+features = ["derive"]
+
+[dependencies.flagset]
+version = "0.4"
+
+[dependencies.indexmap]
+version = "1.9.1"
+
+[dependencies.leb128]
+version = "0.2.4"
+
+[dependencies.serde]
+version = "1.0.137"
+features = ["derive"]
+optional = true
+
+[dependencies.wasm-encoder]
+version = "0.25.0"
+
+[dependencies.wasmparser]
+version = "0.102.0"
+
+[dev-dependencies.criterion]
+version = "0.3.3"
+
+[dev-dependencies.libfuzzer-sys]
+version = "0.4.0"
+
+[dev-dependencies.rand]
+version = "0.8.4"
+features = ["small_rng"]
+
+[features]
+_internal_cli = [
+ "serde",
+ "flagset/serde",
+]
diff --git a/third_party/rust/wasm-smith/LICENSE b/third_party/rust/wasm-smith/LICENSE
new file mode 100644
index 0000000000..f9d81955f4
--- /dev/null
+++ b/third_party/rust/wasm-smith/LICENSE
@@ -0,0 +1,220 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+--- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
diff --git a/third_party/rust/wasm-smith/README.md b/third_party/rust/wasm-smith/README.md
new file mode 100644
index 0000000000..720472ebb6
--- /dev/null
+++ b/third_party/rust/wasm-smith/README.md
@@ -0,0 +1,101 @@
+# `wasm-smith`
+
+**A WebAssembly test case generator.**
+
+[![](https://docs.rs/wasm-smith/badge.svg)](https://docs.rs/wasm-smith/)
+[![](https://img.shields.io/crates/v/wasm-smith.svg)](https://crates.io/crates/wasm-smith)
+[![](https://img.shields.io/crates/d/wasm-smith.svg)](https://crates.io/crates/wasm-smith)
+![Rust](https://github.com/fitzgen/wasm-smith/workflows/Rust/badge.svg)
+
+* [Features](#features)
+* [Usage](#usage)
+ * [With `cargo fuzz` and `libfuzzer-sys`](#with-cargo-fuzz-and-libfuzzer-sys)
+ * [As a Command Line Tool](#as-a-command-line-tool)
+
+## Features
+
+* **Always valid:** All generated Wasm modules pass validation. `wasm-smith`
+ gets past your wasm parser and validator, exercising the guts of your Wasm
+ compiler, runtime, or tool.
+
+* **Supports the full WebAssembly language:** Doesn't have blind spots or
+ unimplemented instructions.
+
+* **Implements the
+ [`Arbitrary`](https://docs.rs/arbitrary/*/arbitrary/trait.Arbitrary.html)
+ trait**: Easy to use with [`cargo
+ fuzz`](https://github.com/rust-fuzz/cargo-fuzz) and
+ [`libfuzzer-sys`](https://github.com/rust-fuzz/libfuzzer)!
+
+* **Deterministic:** Given the same input seed, always generates the same output
+ Wasm module, so you can always reproduce test failures.
+
+* **Plays nice with mutation-based fuzzers:** Small changes to the input tend to
+ produce small changes to the output Wasm module. Larger inputs tend to
+ generate larger Wasm modules.
+
+## Usage
+
+### With `cargo fuzz` and `libfuzzer-sys`
+
+First, use `cargo fuzz` to define a new fuzz target:
+
+```shell
+$ cargo fuzz add my_wasm_smith_fuzz_target
+```
+
+Next, add `wasm-smith` to your dependencies:
+
+```shell
+$ cargo add wasm-smith
+```
+
+Then, define your fuzz target so that it takes arbitrary `wasm_smith::Module`s
+as an argument, convert the module into serialized Wasm bytes via the `to_bytes`
+method, and then feed it into your system:
+
+```rust
+// fuzz/fuzz_targets/my_wasm_smith_fuzz_target.rs
+
+#![no_main]
+
+use libfuzzer_sys::fuzz_target;
+use wasm_smith::Module;
+
+fuzz_target!(|module: Module| {
+ let wasm_bytes = module.to_bytes();
+
+ // Your code here...
+});
+```
+
+Finally, start fuzzing:
+
+```shell
+$ cargo fuzz run my_wasm_smith_fuzz_target
+```
+
+> **Note:** Also check out [the `validate` fuzz
+> target](https://github.com/fitzgen/wasm-smith/blob/main/fuzz/fuzz_targets/validate.rs)
+> defined in this repository. Using the `wasmparser` crate, it checks that every
+> module generated by `wasm-smith` validates successfully.
+
+### As a Command Line Tool
+
+Install the CLI tool via `cargo`:
+
+```shell
+$ cargo install wasm-tools
+```
+
+Convert some arbitrary input into a valid Wasm module:
+
+```shell
+$ head -c 100 /dev/urandom | wasm-tools smith -o test.wasm
+```
+
+Finally, run your tool on the generated Wasm module:
+
+```shell
+$ my-wasm-tool test.wasm
+```
diff --git a/third_party/rust/wasm-smith/benches/corpus.rs b/third_party/rust/wasm-smith/benches/corpus.rs
new file mode 100644
index 0000000000..56206c69c4
--- /dev/null
+++ b/third_party/rust/wasm-smith/benches/corpus.rs
@@ -0,0 +1,29 @@
+use arbitrary::{Arbitrary, Unstructured};
+use criterion::{black_box, criterion_group, criterion_main, Criterion};
+use wasm_smith::Module;
+
+pub fn benchmark_corpus(c: &mut Criterion) {
+ let mut corpus = Vec::with_capacity(2000);
+ let entries = std::fs::read_dir("./benches/corpus").expect("failed to read dir");
+ for e in entries {
+ let e = e.expect("failed to read dir entry");
+ let seed = std::fs::read(e.path()).expect("failed to read seed file");
+ corpus.push(seed);
+ }
+
+ // Benchmark how long it takes to generate a module for every seed in our
+ // corpus (taken from the `validate` fuzz target).
+ c.bench_function("corpus", |b| {
+ b.iter(|| {
+ for seed in &corpus {
+ let seed = black_box(seed);
+ let mut u = Unstructured::new(seed);
+ let result = Module::arbitrary(&mut u);
+ let _ = black_box(result);
+ }
+ })
+ });
+}
+
+criterion_group!(benches, benchmark_corpus);
+criterion_main!(benches);
diff --git a/third_party/rust/wasm-smith/src/component.rs b/third_party/rust/wasm-smith/src/component.rs
new file mode 100644
index 0000000000..cb8acdc6bb
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/component.rs
@@ -0,0 +1,2232 @@
+//! Generation of Wasm
+//! [components](https://github.com/WebAssembly/component-model).
+
+#![allow(unused_variables, dead_code)] // TODO FITZGEN
+
+use crate::{arbitrary_loop, Config, DefaultConfig};
+use arbitrary::{Arbitrary, Result, Unstructured};
+use std::collections::BTreeMap;
+use std::convert::TryFrom;
+use std::{
+ collections::{HashMap, HashSet},
+ marker,
+ rc::Rc,
+};
+use wasm_encoder::{ComponentTypeRef, ComponentValType, PrimitiveValType, TypeBounds, ValType};
+use wasmparser::types::KebabString;
+
+mod encode;
+
+/// A pseudo-random WebAssembly [component].
+///
+/// Construct instances of this type with [the `Arbitrary`
+/// trait](https://docs.rs/arbitrary/*/arbitrary/trait.Arbitrary.html).
+///
+/// [component]: https://github.com/WebAssembly/component-model/blob/ast-and-binary/design/MVP/Explainer.md
+///
+/// ## Configured Generated Components
+///
+/// This uses the [`DefaultConfig`][crate::DefaultConfig] configuration. If you
+/// want to customize the shape of generated components, define your own
+/// configuration type, implement the [`Config`][crate::Config] trait for it,
+/// and use [`ConfiguredComponent<YourConfigType>`][crate::ConfiguredComponent]
+/// instead of plain `Component`.
+#[derive(Debug)]
+pub struct Component {
+ sections: Vec<Section>,
+}
+
+/// A builder to create a component (and possibly a whole tree of nested
+/// components).
+///
+/// Maintains a stack of components we are currently building, as well as
+/// metadata about them. The split between `Component` and `ComponentBuilder` is
+/// that the builder contains metadata that is purely used when generating
+/// components and is unnecessary after we are done generating the structure of
+/// the components and only need to encode an already-generated component to
+/// bytes.
+#[derive(Debug)]
+struct ComponentBuilder {
+ config: Rc<dyn Config>,
+
+ // The set of core `valtype`s that we are configured to generate.
+ core_valtypes: Vec<ValType>,
+
+ // Stack of types scopes that are currently available.
+ //
+ // There is an entry in this stack for each component, but there can also be
+ // additional entries for module/component/instance types, each of which
+ // have their own scope.
+ //
+ // This stack is always non-empty and the last entry is always the current
+ // scope.
+ //
+ // When a particular scope can alias outer types, it can alias from any
+ // scope that is older than it (i.e. `types_scope[i]` can alias from
+ // `types_scope[j]` when `j <= i`).
+ types: Vec<TypesScope>,
+
+ // The set of components we are currently building and their associated
+ // metadata.
+ components: Vec<ComponentContext>,
+
+ // Whether we are in the final bits of generating this component and we just
+ // need to ensure that the minimum number of entities configured have all
+ // been generated. This changes the behavior of various
+ // `arbitrary_<section>` methods to always fill in their minimums.
+ fill_minimums: bool,
+
+ // Our maximums for these entities are applied across the whole component
+ // tree, not per-component.
+ total_components: usize,
+ total_modules: usize,
+ total_instances: usize,
+ total_values: usize,
+}
+
+#[derive(Debug, Clone)]
+enum ComponentOrCoreFuncType {
+ Component(Rc<FuncType>),
+ Core(Rc<crate::core::FuncType>),
+}
+
+impl ComponentOrCoreFuncType {
+ fn as_core(&self) -> &Rc<crate::core::FuncType> {
+ match self {
+ ComponentOrCoreFuncType::Core(t) => t,
+ ComponentOrCoreFuncType::Component(_) => panic!("not a core func type"),
+ }
+ }
+
+ fn as_component(&self) -> &Rc<FuncType> {
+ match self {
+ ComponentOrCoreFuncType::Core(_) => panic!("not a component func type"),
+ ComponentOrCoreFuncType::Component(t) => t,
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+enum ComponentOrCoreInstanceType {
+ Component(Rc<InstanceType>),
+ Core(BTreeMap<String, crate::core::EntityType>),
+}
+
+/// Metadata (e.g. contents of various index spaces) we keep track of on a
+/// per-component basis.
+#[derive(Debug)]
+struct ComponentContext {
+ // The actual component itself.
+ component: Component,
+
+ // The number of imports we have generated thus far.
+ num_imports: usize,
+
+ // The set of names of imports we've generated thus far.
+ import_names: HashSet<KebabString>,
+
+ // The set of URLs of imports we've generated thus far.
+ import_urls: HashSet<KebabString>,
+
+ // This component's function index space.
+ funcs: Vec<ComponentOrCoreFuncType>,
+
+ // Which entries in `funcs` are component functions?
+ component_funcs: Vec<u32>,
+
+ // Which entries in `component_funcs` are component functions that only use scalar
+ // types?
+ scalar_component_funcs: Vec<u32>,
+
+ // Which entries in `funcs` are core Wasm functions?
+ //
+ // Note that a component can't import core functions, so these entries will
+ // never point to a `Section::Import`.
+ core_funcs: Vec<u32>,
+
+ // This component's component index space.
+ //
+ // An indirect list of all directly-nested (not transitive) components
+ // inside this component.
+ //
+ // Each entry is of the form `(i, j)` where `component.sections[i]` is
+ // guaranteed to be either
+ //
+ // * a `Section::Component` and we are referencing the component defined in
+ // that section (in this case `j` must also be `0`, since a component
+ // section can only contain a single nested component), or
+ //
+ // * a `Section::Import` and we are referencing the `j`th import in that
+ // section, which is guaranteed to be a component import.
+ components: Vec<(usize, usize)>,
+
+ // This component's module index space.
+ //
+ // An indirect list of all directly-nested (not transitive) modules
+ // inside this component.
+ //
+ // Each entry is of the form `(i, j)` where `component.sections[i]` is
+ // guaranteed to be either
+ //
+ // * a `Section::Core` and we are referencing the module defined in that
+ // section (in this case `j` must also be `0`, since a core section can
+ // only contain a single nested module), or
+ //
+ // * a `Section::Import` and we are referencing the `j`th import in that
+ // section, which is guaranteed to be a module import.
+ modules: Vec<(usize, usize)>,
+
+ // This component's instance index space.
+ instances: Vec<ComponentOrCoreInstanceType>,
+
+ // This component's value index space.
+ values: Vec<ComponentValType>,
+}
+
+impl ComponentContext {
+ fn empty() -> Self {
+ ComponentContext {
+ component: Component::empty(),
+ num_imports: 0,
+ import_names: HashSet::default(),
+ import_urls: HashSet::default(),
+ funcs: vec![],
+ component_funcs: vec![],
+ scalar_component_funcs: vec![],
+ core_funcs: vec![],
+ components: vec![],
+ modules: vec![],
+ instances: vec![],
+ values: vec![],
+ }
+ }
+
+ fn num_modules(&self) -> usize {
+ self.modules.len()
+ }
+
+ fn num_components(&self) -> usize {
+ self.components.len()
+ }
+
+ fn num_instances(&self) -> usize {
+ self.instances.len()
+ }
+
+ fn num_funcs(&self) -> usize {
+ self.funcs.len()
+ }
+
+ fn num_values(&self) -> usize {
+ self.values.len()
+ }
+}
+
+#[derive(Debug, Default)]
+struct TypesScope {
+ // All core types in this scope, regardless of kind.
+ core_types: Vec<Rc<CoreType>>,
+
+ // The indices of all the entries in `core_types` that are core function types.
+ core_func_types: Vec<u32>,
+
+ // The indices of all the entries in `core_types` that are module types.
+ module_types: Vec<u32>,
+
+ // All component types in this index space, regardless of kind.
+ types: Vec<Rc<Type>>,
+
+ // The indices of all the entries in `types` that are defined value types.
+ defined_types: Vec<u32>,
+
+ // The indices of all the entries in `types` that are func types.
+ func_types: Vec<u32>,
+
+ // A map from function types to their indices in the types space.
+ func_type_to_indices: HashMap<Rc<FuncType>, Vec<u32>>,
+
+ // The indices of all the entries in `types` that are component types.
+ component_types: Vec<u32>,
+
+ // The indices of all the entries in `types` that are instance types.
+ instance_types: Vec<u32>,
+}
+
+impl TypesScope {
+ fn push(&mut self, ty: Rc<Type>) -> u32 {
+ let ty_idx = u32::try_from(self.types.len()).unwrap();
+
+ let kind_list = match &*ty {
+ Type::Defined(_) => &mut self.defined_types,
+ Type::Func(func_ty) => {
+ self.func_type_to_indices
+ .entry(func_ty.clone())
+ .or_default()
+ .push(ty_idx);
+ &mut self.func_types
+ }
+ Type::Component(_) => &mut self.component_types,
+ Type::Instance(_) => &mut self.instance_types,
+ };
+ kind_list.push(ty_idx);
+
+ self.types.push(ty);
+ ty_idx
+ }
+
+ fn push_core(&mut self, ty: Rc<CoreType>) -> u32 {
+ let ty_idx = u32::try_from(self.core_types.len()).unwrap();
+
+ let kind_list = match &*ty {
+ CoreType::Func(_) => &mut self.core_func_types,
+ CoreType::Module(_) => &mut self.module_types,
+ };
+ kind_list.push(ty_idx);
+
+ self.core_types.push(ty);
+ ty_idx
+ }
+
+ fn get(&self, index: u32) -> &Rc<Type> {
+ &self.types[index as usize]
+ }
+
+ fn get_core(&self, index: u32) -> &Rc<CoreType> {
+ &self.core_types[index as usize]
+ }
+
+ fn get_func(&self, index: u32) -> &Rc<FuncType> {
+ match &**self.get(index) {
+ Type::Func(f) => f,
+ _ => panic!("get_func on non-function type"),
+ }
+ }
+
+ fn can_ref_type(&self) -> bool {
+ // All component types and core module types may be referenced
+ !self.types.is_empty() || !self.module_types.is_empty()
+ }
+}
+
+impl<'a> Arbitrary<'a> for Component {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ Ok(ConfiguredComponent::<DefaultConfig>::arbitrary(u)?.component)
+ }
+}
+
+/// A pseudo-random generated Wasm component with custom configuration.
+///
+/// If you don't care about custom configuration, use
+/// [`Component`][crate::Component] instead.
+///
+/// For details on configuring, see the [`Config`][crate::Config] trait.
+#[derive(Debug)]
+pub struct ConfiguredComponent<C> {
+ /// The generated component, controlled by the configuration of `C` in the
+ /// `Arbitrary` implementation.
+ pub component: Component,
+ _marker: marker::PhantomData<C>,
+}
+
+impl<'a, C> Arbitrary<'a> for ConfiguredComponent<C>
+where
+ C: Config + Arbitrary<'a>,
+{
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ let config = C::arbitrary(u)?;
+ let component = Component::new(config, u)?;
+ Ok(ConfiguredComponent {
+ component,
+ _marker: marker::PhantomData,
+ })
+ }
+}
+
+#[derive(Default)]
+struct EntityCounts {
+ globals: usize,
+ tables: usize,
+ memories: usize,
+ tags: usize,
+ funcs: usize,
+}
+
+impl Component {
+ /// Construct a new `Component` using the given configuration.
+ pub fn new(config: impl Config, u: &mut Unstructured) -> Result<Self> {
+ let mut builder = ComponentBuilder::new(Rc::new(config));
+ builder.build(u)
+ }
+
+ fn empty() -> Self {
+ Component { sections: vec![] }
+ }
+}
+
+#[must_use]
+enum Step {
+ Finished(Component),
+ StillBuilding,
+}
+
+impl Step {
+ fn unwrap_still_building(self) {
+ match self {
+ Step::Finished(_) => panic!(
+ "`Step::unwrap_still_building` called on a `Step` that is not `StillBuilding`"
+ ),
+ Step::StillBuilding => {}
+ }
+ }
+}
+
+impl ComponentBuilder {
+ fn new(config: Rc<dyn Config>) -> Self {
+ ComponentBuilder {
+ config,
+ core_valtypes: vec![],
+ types: vec![Default::default()],
+ components: vec![ComponentContext::empty()],
+ fill_minimums: false,
+ total_components: 0,
+ total_modules: 0,
+ total_instances: 0,
+ total_values: 0,
+ }
+ }
+
+ fn build(&mut self, u: &mut Unstructured) -> Result<Component> {
+ self.core_valtypes = crate::core::configured_valtypes(&*self.config);
+
+ let mut choices: Vec<fn(&mut ComponentBuilder, &mut Unstructured) -> Result<Step>> = vec![];
+
+ loop {
+ choices.clear();
+ choices.push(Self::finish_component);
+
+ // Only add any choice other than "finish what we've generated thus
+ // far" when there is more arbitrary fuzzer data for us to consume.
+ if !u.is_empty() {
+ choices.push(Self::arbitrary_custom_section);
+
+ // NB: we add each section as a choice even if we've already
+ // generated our maximum number of entities in that section so that
+ // we can exercise adding empty sections to the end of the module.
+ choices.push(Self::arbitrary_core_type_section);
+ choices.push(Self::arbitrary_type_section);
+ choices.push(Self::arbitrary_import_section);
+ choices.push(Self::arbitrary_canonical_section);
+
+ if self.total_modules < self.config.max_modules() {
+ choices.push(Self::arbitrary_core_module_section);
+ }
+
+ if self.components.len() < self.config.max_nesting_depth()
+ && self.total_components < self.config.max_components()
+ {
+ choices.push(Self::arbitrary_component_section);
+ }
+
+ // TODO FITZGEN
+ //
+ // choices.push(Self::arbitrary_instance_section);
+ // choices.push(Self::arbitrary_export_section);
+ // choices.push(Self::arbitrary_start_section);
+ // choices.push(Self::arbitrary_alias_section);
+ }
+
+ let f = u.choose(&choices)?;
+ match f(self, u)? {
+ Step::StillBuilding => {}
+ Step::Finished(component) => {
+ if self.components.is_empty() {
+ // If we just finished the root component, then return it.
+ return Ok(component);
+ } else {
+ // Otherwise, add it as a nested component in the parent.
+ self.push_section(Section::Component(component));
+ }
+ }
+ }
+ }
+ }
+
+ fn finish_component(&mut self, u: &mut Unstructured) -> Result<Step> {
+ // Ensure we've generated all of our minimums.
+ self.fill_minimums = true;
+ {
+ if self.current_type_scope().types.len() < self.config.min_types() {
+ self.arbitrary_type_section(u)?.unwrap_still_building();
+ }
+ if self.component().num_imports < self.config.min_imports() {
+ self.arbitrary_import_section(u)?.unwrap_still_building();
+ }
+ if self.component().funcs.len() < self.config.min_funcs() {
+ self.arbitrary_canonical_section(u)?.unwrap_still_building();
+ }
+ }
+ self.fill_minimums = false;
+
+ self.types
+ .pop()
+ .expect("should have a types scope for the component we are finishing");
+ Ok(Step::Finished(self.components.pop().unwrap().component))
+ }
+
+ fn config(&self) -> &dyn Config {
+ &*self.config
+ }
+
+ fn component(&self) -> &ComponentContext {
+ self.components.last().unwrap()
+ }
+
+ fn component_mut(&mut self) -> &mut ComponentContext {
+ self.components.last_mut().unwrap()
+ }
+
+ fn last_section(&self) -> Option<&Section> {
+ self.component().component.sections.last()
+ }
+
+ fn last_section_mut(&mut self) -> Option<&mut Section> {
+ self.component_mut().component.sections.last_mut()
+ }
+
+ fn push_section(&mut self, section: Section) {
+ self.component_mut().component.sections.push(section);
+ }
+
+ fn ensure_section(
+ &mut self,
+ mut predicate: impl FnMut(&Section) -> bool,
+ mut make_section: impl FnMut() -> Section,
+ ) -> &mut Section {
+ match self.last_section() {
+ Some(sec) if predicate(sec) => {}
+ _ => self.push_section(make_section()),
+ }
+ self.last_section_mut().unwrap()
+ }
+
+ fn arbitrary_custom_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::Custom(u.arbitrary()?));
+ Ok(Step::StillBuilding)
+ }
+
+ fn push_type(&mut self, ty: Rc<Type>) -> u32 {
+ match self.ensure_section(
+ |s| matches!(s, Section::Type(_)),
+ || Section::Type(TypeSection { types: vec![] }),
+ ) {
+ Section::Type(TypeSection { types }) => {
+ types.push(ty.clone());
+ self.current_type_scope_mut().push(ty)
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn push_core_type(&mut self, ty: Rc<CoreType>) -> u32 {
+ match self.ensure_section(
+ |s| matches!(s, Section::CoreType(_)),
+ || Section::CoreType(CoreTypeSection { types: vec![] }),
+ ) {
+ Section::CoreType(CoreTypeSection { types }) => {
+ types.push(ty.clone());
+ self.current_type_scope_mut().push_core(ty)
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_core_type_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::CoreType(CoreTypeSection { types: vec![] }));
+
+ let min = if self.fill_minimums {
+ self.config
+ .min_types()
+ .saturating_sub(self.current_type_scope().types.len())
+ } else {
+ 0
+ };
+
+ let max = self.config.max_types() - self.current_type_scope().types.len();
+
+ arbitrary_loop(u, min, max, |u| {
+ let mut type_fuel = self.config.max_type_size();
+ let ty = self.arbitrary_core_type(u, &mut type_fuel)?;
+ self.push_core_type(ty);
+ Ok(true)
+ })?;
+
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_core_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<CoreType>> {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(Rc::new(CoreType::Module(Rc::new(ModuleType::default()))));
+ }
+
+ let ty = match u.int_in_range::<u8>(0..=1)? {
+ 0 => CoreType::Func(crate::core::arbitrary_func_type(
+ u,
+ &self.core_valtypes,
+ if self.config.multi_value_enabled() {
+ None
+ } else {
+ Some(1)
+ },
+ )?),
+ 1 => CoreType::Module(self.arbitrary_module_type(u, type_fuel)?),
+ _ => unreachable!(),
+ };
+ Ok(Rc::new(ty))
+ }
+
+ fn arbitrary_type_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::Type(TypeSection { types: vec![] }));
+
+ let min = if self.fill_minimums {
+ self.config
+ .min_types()
+ .saturating_sub(self.current_type_scope().types.len())
+ } else {
+ 0
+ };
+
+ let max = self.config.max_types() - self.current_type_scope().types.len();
+
+ arbitrary_loop(u, min, max, |u| {
+ let mut type_fuel = self.config.max_type_size();
+ let ty = self.arbitrary_type(u, &mut type_fuel)?;
+ self.push_type(ty);
+ Ok(true)
+ })?;
+
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_type_ref<'a>(
+ &self,
+ u: &mut Unstructured<'a>,
+ for_import: bool,
+ for_type_def: bool,
+ ) -> Result<Option<ComponentTypeRef>> {
+ let mut choices: Vec<fn(&Self, &mut Unstructured) -> Result<ComponentTypeRef>> = Vec::new();
+ let scope = self.current_type_scope();
+
+ if !scope.module_types.is_empty()
+ && (for_type_def || !for_import || self.total_modules < self.config.max_modules())
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Module(
+ *u.choose(&me.current_type_scope().module_types)?,
+ ))
+ });
+ }
+
+ // Types cannot be imported currently
+ if !for_import
+ && !scope.types.is_empty()
+ && (for_type_def || scope.types.len() < self.config.max_types())
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Type(
+ TypeBounds::Eq,
+ u.int_in_range(
+ 0..=u32::try_from(me.current_type_scope().types.len() - 1).unwrap(),
+ )?,
+ ))
+ });
+ }
+
+ // TODO: wasm-smith needs to ensure that every arbitrary value gets used exactly once.
+ // until that time, don't import values
+ // if for_type_def || !for_import || self.total_values < self.config.max_values() {
+ // choices.push(|me, u| Ok(ComponentTypeRef::Value(me.arbitrary_component_val_type(u)?)));
+ // }
+
+ if !scope.func_types.is_empty()
+ && (for_type_def
+ || !for_import
+ || self.component().num_funcs() < self.config.max_funcs())
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Func(
+ *u.choose(&me.current_type_scope().func_types)?,
+ ))
+ });
+ }
+
+ if !scope.component_types.is_empty()
+ && (for_type_def || !for_import || self.total_components < self.config.max_components())
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Component(
+ *u.choose(&me.current_type_scope().component_types)?,
+ ))
+ });
+ }
+
+ if !scope.instance_types.is_empty()
+ && (for_type_def || !for_import || self.total_instances < self.config.max_instances())
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Instance(
+ *u.choose(&me.current_type_scope().instance_types)?,
+ ))
+ });
+ }
+
+ if choices.is_empty() {
+ return Ok(None);
+ }
+
+ let f = u.choose(&choices)?;
+ f(self, u).map(Option::Some)
+ }
+
+ fn arbitrary_type(&mut self, u: &mut Unstructured, type_fuel: &mut u32) -> Result<Rc<Type>> {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(Rc::new(Type::Defined(
+ self.arbitrary_defined_type(u, type_fuel)?,
+ )));
+ }
+
+ let ty = match u.int_in_range::<u8>(0..=3)? {
+ 0 => Type::Defined(self.arbitrary_defined_type(u, type_fuel)?),
+ 1 => Type::Func(self.arbitrary_func_type(u, type_fuel)?),
+ 2 => Type::Component(self.arbitrary_component_type(u, type_fuel)?),
+ 3 => Type::Instance(self.arbitrary_instance_type(u, type_fuel)?),
+ _ => unreachable!(),
+ };
+ Ok(Rc::new(ty))
+ }
+
+ fn arbitrary_module_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<ModuleType>> {
+ let mut defs = vec![];
+ let mut has_memory = false;
+ let mut has_canonical_abi_realloc = false;
+ let mut has_canonical_abi_free = false;
+ let mut types: Vec<Rc<crate::core::FuncType>> = vec![];
+ let mut imports = HashMap::new();
+ let mut exports = HashSet::new();
+ let mut counts = EntityCounts::default();
+
+ // Special case the canonical ABI functions since certain types can only
+ // be passed across the component boundary if they exist and
+ // randomly generating them is extremely unlikely.
+
+ // `memory`
+ if counts.memories < self.config.max_memories() && u.ratio::<u8>(99, 100)? {
+ defs.push(ModuleTypeDef::Export(
+ "memory".into(),
+ crate::core::EntityType::Memory(self.arbitrary_core_memory_type(u)?),
+ ));
+ exports.insert("memory".into());
+ counts.memories += 1;
+ has_memory = true;
+ }
+
+ // `canonical_abi_realloc`
+ if counts.funcs < self.config.max_funcs()
+ && types.len() < self.config.max_types()
+ && u.ratio::<u8>(99, 100)?
+ {
+ let realloc_ty = Rc::new(crate::core::FuncType {
+ params: vec![ValType::I32, ValType::I32, ValType::I32, ValType::I32],
+ results: vec![ValType::I32],
+ });
+ let ty_idx = u32::try_from(types.len()).unwrap();
+ types.push(realloc_ty.clone());
+ defs.push(ModuleTypeDef::TypeDef(crate::core::Type::Func(
+ realloc_ty.clone(),
+ )));
+ defs.push(ModuleTypeDef::Export(
+ "canonical_abi_realloc".into(),
+ crate::core::EntityType::Func(ty_idx, realloc_ty),
+ ));
+ exports.insert("canonical_abi_realloc".into());
+ counts.funcs += 1;
+ has_canonical_abi_realloc = true;
+ }
+
+ // `canonical_abi_free`
+ if counts.funcs < self.config.max_funcs()
+ && types.len() < self.config.max_types()
+ && u.ratio::<u8>(99, 100)?
+ {
+ let free_ty = Rc::new(crate::core::FuncType {
+ params: vec![ValType::I32, ValType::I32, ValType::I32],
+ results: vec![],
+ });
+ let ty_idx = u32::try_from(types.len()).unwrap();
+ types.push(free_ty.clone());
+ defs.push(ModuleTypeDef::TypeDef(crate::core::Type::Func(
+ free_ty.clone(),
+ )));
+ defs.push(ModuleTypeDef::Export(
+ "canonical_abi_free".into(),
+ crate::core::EntityType::Func(ty_idx, free_ty),
+ ));
+ exports.insert("canonical_abi_free".into());
+ counts.funcs += 1;
+ has_canonical_abi_free = true;
+ }
+
+ let mut entity_choices: Vec<
+ fn(
+ &ComponentBuilder,
+ &mut Unstructured,
+ &mut EntityCounts,
+ &[Rc<crate::core::FuncType>],
+ ) -> Result<crate::core::EntityType>,
+ > = Vec::with_capacity(5);
+
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ let max_choice = if types.len() < self.config.max_types() {
+ // Check if the parent scope has core function types to alias
+ if !types.is_empty()
+ || (!self.types.is_empty()
+ && !self.types.last().unwrap().core_func_types.is_empty())
+ {
+ // Imports, exports, types, and aliases
+ 3
+ } else {
+ // Imports, exports, and types
+ 2
+ }
+ } else {
+ // Imports and exports
+ 1
+ };
+
+ match u.int_in_range::<u8>(0..=max_choice)? {
+ // Import.
+ 0 => {
+ let module = crate::limited_string(100, u)?;
+ let existing_module_imports = imports.entry(module.clone()).or_default();
+ let field = crate::unique_string(100, existing_module_imports, u)?;
+ let entity_type = match self.arbitrary_core_entity_type(
+ u,
+ &types,
+ &mut entity_choices,
+ &mut counts,
+ )? {
+ None => return Ok(false),
+ Some(x) => x,
+ };
+ defs.push(ModuleTypeDef::Import(crate::core::Import {
+ module,
+ field,
+ entity_type,
+ }));
+ }
+
+ // Export.
+ 1 => {
+ let name = crate::unique_string(100, &mut exports, u)?;
+ let entity_ty = match self.arbitrary_core_entity_type(
+ u,
+ &types,
+ &mut entity_choices,
+ &mut counts,
+ )? {
+ None => return Ok(false),
+ Some(x) => x,
+ };
+ defs.push(ModuleTypeDef::Export(name, entity_ty));
+ }
+
+ // Type definition.
+ 2 => {
+ let ty = crate::core::arbitrary_func_type(
+ u,
+ &self.core_valtypes,
+ if self.config.multi_value_enabled() {
+ None
+ } else {
+ Some(1)
+ },
+ )?;
+ types.push(ty.clone());
+ defs.push(ModuleTypeDef::TypeDef(crate::core::Type::Func(ty)));
+ }
+
+ // Alias
+ 3 => {
+ let (count, index, kind) = self.arbitrary_outer_core_type_alias(u, &types)?;
+ let ty = match &kind {
+ CoreOuterAliasKind::Type(ty) => ty.clone(),
+ };
+ types.push(ty);
+ defs.push(ModuleTypeDef::OuterAlias {
+ count,
+ i: index,
+ kind,
+ });
+ }
+
+ _ => unreachable!(),
+ }
+
+ Ok(true)
+ })?;
+
+ Ok(Rc::new(ModuleType {
+ defs,
+ has_memory,
+ has_canonical_abi_realloc,
+ has_canonical_abi_free,
+ }))
+ }
+
+ fn arbitrary_core_entity_type(
+ &self,
+ u: &mut Unstructured,
+ types: &[Rc<crate::core::FuncType>],
+ choices: &mut Vec<
+ fn(
+ &ComponentBuilder,
+ &mut Unstructured,
+ &mut EntityCounts,
+ &[Rc<crate::core::FuncType>],
+ ) -> Result<crate::core::EntityType>,
+ >,
+ counts: &mut EntityCounts,
+ ) -> Result<Option<crate::core::EntityType>> {
+ choices.clear();
+
+ if counts.globals < self.config.max_globals() {
+ choices.push(|c, u, counts, _types| {
+ counts.globals += 1;
+ Ok(crate::core::EntityType::Global(
+ c.arbitrary_core_global_type(u)?,
+ ))
+ });
+ }
+
+ if counts.tables < self.config.max_tables() {
+ choices.push(|c, u, counts, _types| {
+ counts.tables += 1;
+ Ok(crate::core::EntityType::Table(
+ c.arbitrary_core_table_type(u)?,
+ ))
+ });
+ }
+
+ if counts.memories < self.config.max_memories() {
+ choices.push(|c, u, counts, _types| {
+ counts.memories += 1;
+ Ok(crate::core::EntityType::Memory(
+ c.arbitrary_core_memory_type(u)?,
+ ))
+ });
+ }
+
+ if types.iter().any(|ty| ty.results.is_empty())
+ && self.config.exceptions_enabled()
+ && counts.tags < self.config.max_tags()
+ {
+ choices.push(|c, u, counts, types| {
+ counts.tags += 1;
+ let tag_func_types = types
+ .iter()
+ .enumerate()
+ .filter(|(_, ty)| ty.results.is_empty())
+ .map(|(i, _)| u32::try_from(i).unwrap())
+ .collect::<Vec<_>>();
+ Ok(crate::core::EntityType::Tag(
+ crate::core::arbitrary_tag_type(u, &tag_func_types, |idx| {
+ types[usize::try_from(idx).unwrap()].clone()
+ })?,
+ ))
+ });
+ }
+
+ if !types.is_empty() && counts.funcs < self.config.max_funcs() {
+ choices.push(|c, u, counts, types| {
+ counts.funcs += 1;
+ let ty_idx = u.int_in_range(0..=u32::try_from(types.len() - 1).unwrap())?;
+ let ty = types[ty_idx as usize].clone();
+ Ok(crate::core::EntityType::Func(ty_idx, ty))
+ });
+ }
+
+ if choices.is_empty() {
+ return Ok(None);
+ }
+
+ let f = u.choose(choices)?;
+ let ty = f(self, u, counts, types)?;
+ Ok(Some(ty))
+ }
+
+ fn arbitrary_core_valtype(&self, u: &mut Unstructured) -> Result<ValType> {
+ Ok(*u.choose(&self.core_valtypes)?)
+ }
+
+ fn arbitrary_core_global_type(&self, u: &mut Unstructured) -> Result<crate::core::GlobalType> {
+ Ok(crate::core::GlobalType {
+ val_type: self.arbitrary_core_valtype(u)?,
+ mutable: u.arbitrary()?,
+ })
+ }
+
+ fn arbitrary_core_table_type(&self, u: &mut Unstructured) -> Result<crate::core::TableType> {
+ crate::core::arbitrary_table_type(u, self.config())
+ }
+
+ fn arbitrary_core_memory_type(&self, u: &mut Unstructured) -> Result<crate::core::MemoryType> {
+ crate::core::arbitrary_memtype(u, self.config())
+ }
+
+ fn with_types_scope<T>(&mut self, f: impl FnOnce(&mut Self) -> Result<T>) -> Result<T> {
+ self.types.push(Default::default());
+ let result = f(self);
+ self.types.pop();
+ result
+ }
+
+ fn current_type_scope(&self) -> &TypesScope {
+ self.types.last().unwrap()
+ }
+
+ fn current_type_scope_mut(&mut self) -> &mut TypesScope {
+ self.types.last_mut().unwrap()
+ }
+
+ fn outer_types_scope(&self, count: u32) -> &TypesScope {
+ &self.types[self.types.len() - 1 - usize::try_from(count).unwrap()]
+ }
+
+ fn outer_type(&self, count: u32, i: u32) -> &Rc<Type> {
+ &self.outer_types_scope(count).types[usize::try_from(i).unwrap()]
+ }
+
+ fn arbitrary_component_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<ComponentType>> {
+ let mut defs = vec![];
+ let mut imports = HashSet::new();
+ let mut import_urls = HashSet::new();
+ let mut exports = HashSet::new();
+ let mut export_urls = HashSet::new();
+
+ self.with_types_scope(|me| {
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ if me.current_type_scope().can_ref_type() && u.int_in_range::<u8>(0..=3)? == 0 {
+ if let Some(ty) = me.arbitrary_type_ref(u, true, true)? {
+ // Imports.
+ let name = crate::unique_kebab_string(100, &mut imports, u)?;
+ let url = if u.arbitrary()? {
+ Some(crate::unique_url(100, &mut import_urls, u)?)
+ } else {
+ None
+ };
+ defs.push(ComponentTypeDef::Import(Import { name, url, ty }));
+ return Ok(true);
+ }
+
+ // Can't reference an arbitrary type, fallback to another definition.
+ }
+
+ // Type definitions, exports, and aliases.
+ let def =
+ me.arbitrary_instance_type_def(u, &mut exports, &mut export_urls, type_fuel)?;
+ defs.push(def.into());
+ Ok(true)
+ })
+ })?;
+
+ Ok(Rc::new(ComponentType { defs }))
+ }
+
+ fn arbitrary_instance_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<InstanceType>> {
+ let mut defs = vec![];
+ let mut exports = HashSet::new();
+ let mut export_urls = HashSet::new();
+
+ self.with_types_scope(|me| {
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ defs.push(me.arbitrary_instance_type_def(
+ u,
+ &mut exports,
+ &mut export_urls,
+ type_fuel,
+ )?);
+ Ok(true)
+ })
+ })?;
+
+ Ok(Rc::new(InstanceType { defs }))
+ }
+
+ fn arbitrary_instance_type_def(
+ &mut self,
+ u: &mut Unstructured,
+ exports: &mut HashSet<KebabString>,
+ export_urls: &mut HashSet<KebabString>,
+ type_fuel: &mut u32,
+ ) -> Result<InstanceTypeDecl> {
+ let mut choices: Vec<
+ fn(
+ &mut ComponentBuilder,
+ &mut HashSet<KebabString>,
+ &mut HashSet<KebabString>,
+ &mut Unstructured,
+ &mut u32,
+ ) -> Result<InstanceTypeDecl>,
+ > = Vec::with_capacity(3);
+
+ // Export.
+ if self.current_type_scope().can_ref_type() {
+ choices.push(|me, exports, export_urls, u, _type_fuel| {
+ let ty = me.arbitrary_type_ref(u, false, true)?.unwrap();
+ if let ComponentTypeRef::Type(_, idx) = ty {
+ let ty = me.current_type_scope().get(idx).clone();
+ me.current_type_scope_mut().push(ty);
+ }
+ Ok(InstanceTypeDecl::Export {
+ name: crate::unique_kebab_string(100, exports, u)?,
+ url: if u.arbitrary()? {
+ Some(crate::unique_url(100, export_urls, u)?)
+ } else {
+ None
+ },
+ ty,
+ })
+ });
+ }
+
+ // Outer type alias.
+ if self
+ .types
+ .iter()
+ .any(|scope| !scope.types.is_empty() || !scope.core_types.is_empty())
+ {
+ choices.push(|me, _exports, _export_urls, u, _type_fuel| {
+ let alias = me.arbitrary_outer_type_alias(u)?;
+ match &alias {
+ Alias::Outer {
+ kind: OuterAliasKind::Type(ty),
+ ..
+ } => me.current_type_scope_mut().push(ty.clone()),
+ Alias::Outer {
+ kind: OuterAliasKind::CoreType(ty),
+ ..
+ } => me.current_type_scope_mut().push_core(ty.clone()),
+ _ => unreachable!(),
+ };
+ Ok(InstanceTypeDecl::Alias(alias))
+ });
+ }
+
+ // Core type definition.
+ choices.push(|me, _exports, _export_urls, u, type_fuel| {
+ let ty = me.arbitrary_core_type(u, type_fuel)?;
+ me.current_type_scope_mut().push_core(ty.clone());
+ Ok(InstanceTypeDecl::CoreType(ty))
+ });
+
+ // Type definition.
+ if self.types.len() < self.config.max_nesting_depth() {
+ choices.push(|me, _exports, _export_urls, u, type_fuel| {
+ let ty = me.arbitrary_type(u, type_fuel)?;
+ me.current_type_scope_mut().push(ty.clone());
+ Ok(InstanceTypeDecl::Type(ty))
+ });
+ }
+
+ let f = u.choose(&choices)?;
+ f(self, exports, export_urls, u, type_fuel)
+ }
+
+ fn arbitrary_outer_core_type_alias(
+ &self,
+ u: &mut Unstructured,
+ local_types: &[Rc<crate::core::FuncType>],
+ ) -> Result<(u32, u32, CoreOuterAliasKind)> {
+ let enclosing_type_len = if !self.types.is_empty() {
+ self.types.last().unwrap().core_func_types.len()
+ } else {
+ 0
+ };
+
+ assert!(!local_types.is_empty() || enclosing_type_len > 0);
+
+ let max = enclosing_type_len + local_types.len() - 1;
+ let i = u.int_in_range(0..=max)?;
+ let (count, index, ty) = if i < enclosing_type_len {
+ let enclosing = self.types.last().unwrap();
+ let index = enclosing.core_func_types[i];
+ (
+ 1,
+ index,
+ match enclosing.get_core(index).as_ref() {
+ CoreType::Func(ty) => ty.clone(),
+ CoreType::Module(_) => unreachable!(),
+ },
+ )
+ } else if i - enclosing_type_len < local_types.len() {
+ let i = i - enclosing_type_len;
+ (0, u32::try_from(i).unwrap(), local_types[i].clone())
+ } else {
+ unreachable!()
+ };
+
+ Ok((count, index, CoreOuterAliasKind::Type(ty)))
+ }
+
+ fn arbitrary_outer_type_alias(&self, u: &mut Unstructured) -> Result<Alias> {
+ let non_empty_types_scopes: Vec<_> = self
+ .types
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, scope)| !scope.types.is_empty() || !scope.core_types.is_empty())
+ .collect();
+ assert!(
+ !non_empty_types_scopes.is_empty(),
+ "precondition: there are non-empty types scopes"
+ );
+
+ let (count, scope) = u.choose(&non_empty_types_scopes)?;
+ let count = u32::try_from(*count).unwrap();
+ assert!(!scope.types.is_empty() || !scope.core_types.is_empty());
+
+ let max_type_in_scope = scope.types.len() + scope.core_types.len() - 1;
+ let i = u.int_in_range(0..=max_type_in_scope)?;
+
+ let (i, kind) = if i < scope.types.len() {
+ let i = u32::try_from(i).unwrap();
+ (i, OuterAliasKind::Type(Rc::clone(scope.get(i))))
+ } else if i - scope.types.len() < scope.core_types.len() {
+ let i = u32::try_from(i - scope.types.len()).unwrap();
+ (i, OuterAliasKind::CoreType(Rc::clone(scope.get_core(i))))
+ } else {
+ unreachable!()
+ };
+
+ Ok(Alias::Outer { count, i, kind })
+ }
+
+ fn arbitrary_func_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<FuncType>> {
+ let mut params = Vec::new();
+ let mut results = Vec::new();
+ let mut names = HashSet::new();
+
+ // Note: parameters are currently limited to a maximum of 16
+ // because any additional parameters will require indirect access
+ // via a pointer argument; when this occurs, validation of any
+ // lowered function will fail because it will be missing a
+ // memory option (not yet implemented).
+ //
+ // When options are correctly specified on canonical functions,
+ // we should increase this maximum to test indirect parameter
+ // passing.
+ arbitrary_loop(u, 0, 16, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ let name = crate::unique_kebab_string(100, &mut names, u)?;
+ let ty = self.arbitrary_component_val_type(u)?;
+
+ params.push((name, ty));
+
+ Ok(true)
+ })?;
+
+ names.clear();
+
+ // Likewise, the limit for results is 1 before the memory option is
+ // required. When the memory option is implemented, this restriction
+ // should be relaxed.
+ arbitrary_loop(u, 0, 1, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ // If the result list is empty (i.e. first push), then arbitrarily give
+ // the result a name. Otherwise, all of the subsequent items must be named.
+ let name = if results.is_empty() {
+ // Most of the time we should have a single, unnamed result.
+ u.ratio::<u8>(10, 100)?
+ .then(|| crate::unique_kebab_string(100, &mut names, u))
+ .transpose()?
+ } else {
+ Some(crate::unique_kebab_string(100, &mut names, u)?)
+ };
+
+ let ty = self.arbitrary_component_val_type(u)?;
+
+ results.push((name, ty));
+
+ // There can be only one unnamed result.
+ if results.len() == 1 && results[0].0.is_none() {
+ return Ok(false);
+ }
+
+ Ok(true)
+ })?;
+
+ Ok(Rc::new(FuncType { params, results }))
+ }
+
+ fn arbitrary_component_val_type(&self, u: &mut Unstructured) -> Result<ComponentValType> {
+ let max_choices = if self.current_type_scope().defined_types.is_empty() {
+ 0
+ } else {
+ 1
+ };
+ match u.int_in_range(0..=max_choices)? {
+ 0 => Ok(ComponentValType::Primitive(
+ self.arbitrary_primitive_val_type(u)?,
+ )),
+ 1 => {
+ let index = *u.choose(&self.current_type_scope().defined_types)?;
+ let ty = Rc::clone(self.current_type_scope().get(index));
+ Ok(ComponentValType::Type(index))
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_primitive_val_type(&self, u: &mut Unstructured) -> Result<PrimitiveValType> {
+ match u.int_in_range(0..=12)? {
+ 0 => Ok(PrimitiveValType::Bool),
+ 1 => Ok(PrimitiveValType::S8),
+ 2 => Ok(PrimitiveValType::U8),
+ 3 => Ok(PrimitiveValType::S16),
+ 4 => Ok(PrimitiveValType::U16),
+ 5 => Ok(PrimitiveValType::S32),
+ 6 => Ok(PrimitiveValType::U32),
+ 7 => Ok(PrimitiveValType::S64),
+ 8 => Ok(PrimitiveValType::U64),
+ 9 => Ok(PrimitiveValType::Float32),
+ 10 => Ok(PrimitiveValType::Float64),
+ 11 => Ok(PrimitiveValType::Char),
+ 12 => Ok(PrimitiveValType::String),
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_record_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<RecordType> {
+ let mut fields = vec![];
+ let mut field_names = HashSet::new();
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ let name = crate::unique_kebab_string(100, &mut field_names, u)?;
+ let ty = self.arbitrary_component_val_type(u)?;
+
+ fields.push((name, ty));
+ Ok(true)
+ })?;
+ Ok(RecordType { fields })
+ }
+
+ fn arbitrary_variant_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<VariantType> {
+ let mut cases = vec![];
+ let mut case_names = HashSet::new();
+ arbitrary_loop(u, 1, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ let name = crate::unique_kebab_string(100, &mut case_names, u)?;
+
+ let ty = u
+ .arbitrary::<bool>()?
+ .then(|| self.arbitrary_component_val_type(u))
+ .transpose()?;
+
+ let refines = if !cases.is_empty() && u.arbitrary()? {
+ let max_cases = u32::try_from(cases.len() - 1).unwrap();
+ Some(u.int_in_range(0..=max_cases)?)
+ } else {
+ None
+ };
+
+ cases.push((name, ty, refines));
+ Ok(true)
+ })?;
+
+ Ok(VariantType { cases })
+ }
+
+ fn arbitrary_list_type(&self, u: &mut Unstructured) -> Result<ListType> {
+ Ok(ListType {
+ elem_ty: self.arbitrary_component_val_type(u)?,
+ })
+ }
+
+ fn arbitrary_tuple_type(&self, u: &mut Unstructured, type_fuel: &mut u32) -> Result<TupleType> {
+ let mut fields = vec![];
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ fields.push(self.arbitrary_component_val_type(u)?);
+ Ok(true)
+ })?;
+ Ok(TupleType { fields })
+ }
+
+ fn arbitrary_flags_type(&self, u: &mut Unstructured, type_fuel: &mut u32) -> Result<FlagsType> {
+ let mut fields = vec![];
+ let mut field_names = HashSet::new();
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ fields.push(crate::unique_kebab_string(100, &mut field_names, u)?);
+ Ok(true)
+ })?;
+ Ok(FlagsType { fields })
+ }
+
+ fn arbitrary_enum_type(&self, u: &mut Unstructured, type_fuel: &mut u32) -> Result<EnumType> {
+ let mut variants = vec![];
+ let mut variant_names = HashSet::new();
+ arbitrary_loop(u, 1, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ variants.push(crate::unique_kebab_string(100, &mut variant_names, u)?);
+ Ok(true)
+ })?;
+ Ok(EnumType { variants })
+ }
+
+ fn arbitrary_union_type(&self, u: &mut Unstructured, type_fuel: &mut u32) -> Result<UnionType> {
+ let mut variants = vec![];
+ arbitrary_loop(u, 1, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ variants.push(self.arbitrary_component_val_type(u)?);
+ Ok(true)
+ })?;
+ Ok(UnionType { variants })
+ }
+
+ fn arbitrary_option_type(&self, u: &mut Unstructured) -> Result<OptionType> {
+ Ok(OptionType {
+ inner_ty: self.arbitrary_component_val_type(u)?,
+ })
+ }
+
+ fn arbitrary_result_type(&self, u: &mut Unstructured) -> Result<ResultType> {
+ Ok(ResultType {
+ ok_ty: u
+ .arbitrary::<bool>()?
+ .then(|| self.arbitrary_component_val_type(u))
+ .transpose()?,
+ err_ty: u
+ .arbitrary::<bool>()?
+ .then(|| self.arbitrary_component_val_type(u))
+ .transpose()?,
+ })
+ }
+
+ fn arbitrary_defined_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<DefinedType> {
+ match u.int_in_range(0..=9)? {
+ 0 => Ok(DefinedType::Primitive(
+ self.arbitrary_primitive_val_type(u)?,
+ )),
+ 1 => Ok(DefinedType::Record(
+ self.arbitrary_record_type(u, type_fuel)?,
+ )),
+ 2 => Ok(DefinedType::Variant(
+ self.arbitrary_variant_type(u, type_fuel)?,
+ )),
+ 3 => Ok(DefinedType::List(self.arbitrary_list_type(u)?)),
+ 4 => Ok(DefinedType::Tuple(self.arbitrary_tuple_type(u, type_fuel)?)),
+ 5 => Ok(DefinedType::Flags(self.arbitrary_flags_type(u, type_fuel)?)),
+ 6 => Ok(DefinedType::Enum(self.arbitrary_enum_type(u, type_fuel)?)),
+ 7 => Ok(DefinedType::Union(self.arbitrary_union_type(u, type_fuel)?)),
+ 8 => Ok(DefinedType::Option(self.arbitrary_option_type(u)?)),
+ 9 => Ok(DefinedType::Result(self.arbitrary_result_type(u)?)),
+ _ => unreachable!(),
+ }
+ }
+
+ fn push_import(&mut self, name: KebabString, url: Option<String>, ty: ComponentTypeRef) {
+ let nth = match self.ensure_section(
+ |sec| matches!(sec, Section::Import(_)),
+ || Section::Import(ImportSection { imports: vec![] }),
+ ) {
+ Section::Import(sec) => {
+ sec.imports.push(Import { name, url, ty });
+ sec.imports.len() - 1
+ }
+ _ => unreachable!(),
+ };
+ let section_index = self.component().component.sections.len() - 1;
+
+ match ty {
+ ComponentTypeRef::Module(_) => {
+ self.total_modules += 1;
+ self.component_mut().modules.push((section_index, nth));
+ }
+ ComponentTypeRef::Func(ty_index) => {
+ let func_ty = match self.current_type_scope().get(ty_index).as_ref() {
+ Type::Func(ty) => ty.clone(),
+ _ => unreachable!(),
+ };
+
+ if func_ty.is_scalar() {
+ let func_index = u32::try_from(self.component().component_funcs.len()).unwrap();
+ self.component_mut().scalar_component_funcs.push(func_index);
+ }
+
+ let func_index = u32::try_from(self.component().funcs.len()).unwrap();
+ self.component_mut()
+ .funcs
+ .push(ComponentOrCoreFuncType::Component(func_ty));
+
+ self.component_mut().component_funcs.push(func_index);
+ }
+ ComponentTypeRef::Value(ty) => {
+ self.total_values += 1;
+ self.component_mut().values.push(ty);
+ }
+ ComponentTypeRef::Type(TypeBounds::Eq, ty_index) => {
+ let ty = self.current_type_scope().get(ty_index).clone();
+ self.current_type_scope_mut().push(ty);
+ }
+ ComponentTypeRef::Instance(ty_index) => {
+ let instance_ty = match self.current_type_scope().get(ty_index).as_ref() {
+ Type::Instance(ty) => ty.clone(),
+ _ => unreachable!(),
+ };
+
+ self.total_instances += 1;
+ self.component_mut()
+ .instances
+ .push(ComponentOrCoreInstanceType::Component(instance_ty));
+ }
+ ComponentTypeRef::Component(_) => {
+ self.total_components += 1;
+ self.component_mut().components.push((section_index, nth));
+ }
+ }
+ }
+
+ fn core_function_type(&self, core_func_index: u32) -> &Rc<crate::core::FuncType> {
+ self.component().funcs[self.component().core_funcs[core_func_index as usize] as usize]
+ .as_core()
+ }
+
+ fn component_function_type(&self, func_index: u32) -> &Rc<FuncType> {
+ self.component().funcs[self.component().component_funcs[func_index as usize] as usize]
+ .as_component()
+ }
+
+ fn push_func(&mut self, func: Func) {
+ let nth = match self.component_mut().component.sections.last_mut() {
+ Some(Section::Canonical(CanonicalSection { funcs })) => funcs.len(),
+ _ => {
+ self.push_section(Section::Canonical(CanonicalSection { funcs: vec![] }));
+ 0
+ }
+ };
+ let section_index = self.component().component.sections.len() - 1;
+
+ let func_index = u32::try_from(self.component().funcs.len()).unwrap();
+
+ let ty = match &func {
+ Func::CanonLift { func_ty, .. } => {
+ let ty = Rc::clone(self.current_type_scope().get_func(*func_ty));
+ if ty.is_scalar() {
+ let func_index = u32::try_from(self.component().component_funcs.len()).unwrap();
+ self.component_mut().scalar_component_funcs.push(func_index);
+ }
+ self.component_mut().component_funcs.push(func_index);
+ ComponentOrCoreFuncType::Component(ty)
+ }
+ Func::CanonLower {
+ func_index: comp_func_index,
+ ..
+ } => {
+ let comp_func_ty = self.component_function_type(*comp_func_index);
+ let core_func_ty = canonical_abi_for(comp_func_ty);
+ self.component_mut().core_funcs.push(func_index);
+ ComponentOrCoreFuncType::Core(core_func_ty)
+ }
+ };
+
+ self.component_mut().funcs.push(ty);
+
+ match self.component_mut().component.sections.last_mut() {
+ Some(Section::Canonical(CanonicalSection { funcs })) => funcs.push(func),
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_import_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::Import(ImportSection { imports: vec![] }));
+
+ let min = if self.fill_minimums {
+ self.config
+ .min_imports()
+ .saturating_sub(self.component().num_imports)
+ } else {
+ // Allow generating empty sections. We can always fill in the required
+ // minimum later.
+ 0
+ };
+ let max = self.config.max_imports() - self.component().num_imports;
+
+ crate::arbitrary_loop(u, min, max, |u| {
+ match self.arbitrary_type_ref(u, true, false)? {
+ Some(ty) => {
+ let name =
+ crate::unique_kebab_string(100, &mut self.component_mut().import_names, u)?;
+ let url = if u.arbitrary()? {
+ Some(crate::unique_url(
+ 100,
+ &mut self.component_mut().import_urls,
+ u,
+ )?)
+ } else {
+ None
+ };
+ self.push_import(name, url, ty);
+ Ok(true)
+ }
+ None => Ok(false),
+ }
+ })?;
+
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_canonical_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::Canonical(CanonicalSection { funcs: vec![] }));
+
+ let min = if self.fill_minimums {
+ self.config
+ .min_funcs()
+ .saturating_sub(self.component().funcs.len())
+ } else {
+ // Allow generating empty sections. We can always fill in the
+ // required minimum later.
+ 0
+ };
+ let max = self.config.max_funcs() - self.component().funcs.len();
+
+ let mut choices: Vec<fn(&mut Unstructured, &mut ComponentBuilder) -> Result<Option<Func>>> =
+ Vec::with_capacity(2);
+
+ crate::arbitrary_loop(u, min, max, |u| {
+ choices.clear();
+
+ // NB: We only lift/lower scalar component functions.
+ //
+ // If we generated lifting and lowering of compound value types,
+ // the probability of generating a corresponding Wasm module that
+ // generates valid instances of the compound value types would
+ // be vanishingly tiny (e.g. for `list<string>` we would have to
+ // generate a core Wasm module that correctly produces a pointer and
+ // length for a memory region that itself is a series of pointers
+ // and lengths of valid strings, as well as `canonical_abi_realloc`
+ // and `canonical_abi_free` functions that do the right thing).
+ //
+ // This is a pretty serious limitation of `wasm-smith`'s component
+ // types support, but it is one we are intentionally
+ // accepting. `wasm-smith` will focus on generating arbitrary
+ // component sections, structures, and import/export topologies; not
+ // component functions and core Wasm implementations of component
+ // functions. In the future, we intend to build a new, distinct test
+ // case generator specifically for exercising component functions
+ // and the canonical ABI. This new generator won't emit arbitrary
+ // component sections, structures, or import/export topologies, and
+ // will instead leave that to `wasm-smith`.
+
+ if !self.component().scalar_component_funcs.is_empty() {
+ choices.push(|u, c| {
+ let func_index = *u.choose(&c.component().scalar_component_funcs)?;
+ Ok(Some(Func::CanonLower {
+ // Scalar component functions don't use any canonical options.
+ options: vec![],
+ func_index,
+ }))
+ });
+ }
+
+ if !self.component().core_funcs.is_empty() {
+ choices.push(|u, c| {
+ let core_func_index = u.int_in_range(
+ 0..=u32::try_from(c.component().core_funcs.len() - 1).unwrap(),
+ )?;
+ let core_func_ty = c.core_function_type(core_func_index);
+ let comp_func_ty = inverse_scalar_canonical_abi_for(u, core_func_ty)?;
+
+ let func_ty = if let Some(indices) = c
+ .current_type_scope()
+ .func_type_to_indices
+ .get(&comp_func_ty)
+ {
+ // If we've already defined this component function type
+ // one or more times, then choose one of those
+ // definitions arbitrarily.
+ debug_assert!(!indices.is_empty());
+ *u.choose(indices)?
+ } else if c.current_type_scope().types.len() < c.config.max_types() {
+ // If we haven't already defined this component function
+ // type, and we haven't defined the configured maximum
+ // amount of types yet, then just define this type.
+ let ty = Rc::new(Type::Func(Rc::new(comp_func_ty)));
+ c.push_type(ty)
+ } else {
+ // Otherwise, give up on lifting this function.
+ return Ok(None);
+ };
+
+ Ok(Some(Func::CanonLift {
+ func_ty,
+ // Scalar functions don't use any canonical options.
+ options: vec![],
+ core_func_index,
+ }))
+ });
+ }
+
+ if choices.is_empty() {
+ return Ok(false);
+ }
+
+ let f = u.choose(&choices)?;
+ if let Some(func) = f(u, self)? {
+ self.push_func(func);
+ }
+
+ Ok(true)
+ })?;
+
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_core_module_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ let config: Rc<dyn Config> = Rc::clone(&self.config);
+ let module = crate::core::Module::new_internal(
+ config,
+ u,
+ crate::core::DuplicateImportsBehavior::Disallowed,
+ )?;
+ self.push_section(Section::CoreModule(module));
+ self.total_modules += 1;
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_component_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.types.push(TypesScope::default());
+ self.components.push(ComponentContext::empty());
+ self.total_components += 1;
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_instance_section(&mut self, u: &mut Unstructured) -> Result<()> {
+ todo!()
+ }
+
+ fn arbitrary_export_section(&mut self, u: &mut Unstructured) -> Result<()> {
+ todo!()
+ }
+
+ fn arbitrary_start_section(&mut self, u: &mut Unstructured) -> Result<()> {
+ todo!()
+ }
+
+ fn arbitrary_alias_section(&mut self, u: &mut Unstructured) -> Result<()> {
+ todo!()
+ }
+}
+
+fn canonical_abi_for(func_ty: &FuncType) -> Rc<crate::core::FuncType> {
+ let to_core_ty = |ty| match ty {
+ ComponentValType::Primitive(prim_ty) => match prim_ty {
+ PrimitiveValType::Char
+ | PrimitiveValType::Bool
+ | PrimitiveValType::S8
+ | PrimitiveValType::U8
+ | PrimitiveValType::S16
+ | PrimitiveValType::U16
+ | PrimitiveValType::S32
+ | PrimitiveValType::U32 => ValType::I32,
+ PrimitiveValType::S64 | PrimitiveValType::U64 => ValType::I64,
+ PrimitiveValType::Float32 => ValType::F32,
+ PrimitiveValType::Float64 => ValType::F64,
+ PrimitiveValType::String => {
+ unimplemented!("non-scalar types are not supported yet")
+ }
+ },
+ ComponentValType::Type(_) => unimplemented!("non-scalar types are not supported yet"),
+ };
+
+ Rc::new(crate::core::FuncType {
+ params: func_ty
+ .params
+ .iter()
+ .map(|(_, ty)| to_core_ty(*ty))
+ .collect(),
+ results: func_ty
+ .results
+ .iter()
+ .map(|(_, ty)| to_core_ty(*ty))
+ .collect(),
+ })
+}
+
+fn inverse_scalar_canonical_abi_for(
+ u: &mut Unstructured,
+ core_func_ty: &crate::core::FuncType,
+) -> Result<FuncType> {
+ let from_core_ty = |u: &mut Unstructured, core_ty| match core_ty {
+ ValType::I32 => u
+ .choose(&[
+ ComponentValType::Primitive(PrimitiveValType::Char),
+ ComponentValType::Primitive(PrimitiveValType::Bool),
+ ComponentValType::Primitive(PrimitiveValType::S8),
+ ComponentValType::Primitive(PrimitiveValType::U8),
+ ComponentValType::Primitive(PrimitiveValType::S16),
+ ComponentValType::Primitive(PrimitiveValType::U16),
+ ComponentValType::Primitive(PrimitiveValType::S32),
+ ComponentValType::Primitive(PrimitiveValType::U32),
+ ])
+ .cloned(),
+ ValType::I64 => u
+ .choose(&[
+ ComponentValType::Primitive(PrimitiveValType::S64),
+ ComponentValType::Primitive(PrimitiveValType::U64),
+ ])
+ .cloned(),
+ ValType::F32 => Ok(ComponentValType::Primitive(PrimitiveValType::Float32)),
+ ValType::F64 => Ok(ComponentValType::Primitive(PrimitiveValType::Float64)),
+ ValType::V128 | ValType::Ref(_) => {
+ unreachable!("not used in canonical ABI")
+ }
+ };
+
+ let mut names = HashSet::default();
+ let mut params = vec![];
+
+ for core_ty in &core_func_ty.params {
+ params.push((
+ crate::unique_kebab_string(100, &mut names, u)?,
+ from_core_ty(u, *core_ty)?,
+ ));
+ }
+
+ names.clear();
+
+ let results = match core_func_ty.results.len() {
+ 0 => Vec::new(),
+ 1 => vec![(
+ if u.arbitrary()? {
+ Some(crate::unique_kebab_string(100, &mut names, u)?)
+ } else {
+ None
+ },
+ from_core_ty(u, core_func_ty.results[0])?,
+ )],
+ _ => unimplemented!("non-scalar types are not supported yet"),
+ };
+
+ Ok(FuncType { params, results })
+}
+
+#[derive(Debug)]
+enum Section {
+ Custom(CustomSection),
+ CoreModule(crate::Module),
+ CoreInstance(CoreInstanceSection),
+ CoreType(CoreTypeSection),
+ Component(Component),
+ Instance(InstanceSection),
+ Alias(AliasSection),
+ Type(TypeSection),
+ Canonical(CanonicalSection),
+ Start(StartSection),
+ Import(ImportSection),
+ Export(ExportSection),
+}
+
+#[derive(Debug)]
+struct CustomSection {
+ name: String,
+ data: Vec<u8>,
+}
+
+impl<'a> Arbitrary<'a> for CustomSection {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ let name = crate::limited_string(1_000, u)?;
+ let data = u.arbitrary()?;
+ Ok(CustomSection { name, data })
+ }
+}
+
+#[derive(Debug)]
+struct TypeSection {
+ types: Vec<Rc<Type>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum CoreType {
+ Func(Rc<crate::core::FuncType>),
+ Module(Rc<ModuleType>),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Default)]
+struct ModuleType {
+ defs: Vec<ModuleTypeDef>,
+ has_memory: bool,
+ has_canonical_abi_realloc: bool,
+ has_canonical_abi_free: bool,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum ModuleTypeDef {
+ TypeDef(crate::core::Type),
+ Import(crate::core::Import),
+ OuterAlias {
+ count: u32,
+ i: u32,
+ kind: CoreOuterAliasKind,
+ },
+ Export(String, crate::core::EntityType),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum Type {
+ Defined(DefinedType),
+ Func(Rc<FuncType>),
+ Component(Rc<ComponentType>),
+ Instance(Rc<InstanceType>),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum CoreInstanceExportAliasKind {
+ Func,
+ Table,
+ Memory,
+ Global,
+ Tag,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum CoreOuterAliasKind {
+ Type(Rc<crate::core::FuncType>),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum Alias {
+ InstanceExport {
+ instance: u32,
+ name: String,
+ kind: InstanceExportAliasKind,
+ },
+ CoreInstanceExport {
+ instance: u32,
+ name: String,
+ kind: CoreInstanceExportAliasKind,
+ },
+ Outer {
+ count: u32,
+ i: u32,
+ kind: OuterAliasKind,
+ },
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum InstanceExportAliasKind {
+ Module,
+ Component,
+ Instance,
+ Func,
+ Value,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum OuterAliasKind {
+ Module,
+ Component,
+ CoreType(Rc<CoreType>),
+ Type(Rc<Type>),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct ComponentType {
+ defs: Vec<ComponentTypeDef>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum ComponentTypeDef {
+ CoreType(Rc<CoreType>),
+ Type(Rc<Type>),
+ Alias(Alias),
+ Import(Import),
+ Export {
+ name: KebabString,
+ url: Option<String>,
+ ty: ComponentTypeRef,
+ },
+}
+
+impl From<InstanceTypeDecl> for ComponentTypeDef {
+ fn from(def: InstanceTypeDecl) -> Self {
+ match def {
+ InstanceTypeDecl::CoreType(t) => Self::CoreType(t),
+ InstanceTypeDecl::Type(t) => Self::Type(t),
+ InstanceTypeDecl::Export { name, url, ty } => Self::Export { name, url, ty },
+ InstanceTypeDecl::Alias(a) => Self::Alias(a),
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct InstanceType {
+ defs: Vec<InstanceTypeDecl>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum InstanceTypeDecl {
+ CoreType(Rc<CoreType>),
+ Type(Rc<Type>),
+ Alias(Alias),
+ Export {
+ name: KebabString,
+ url: Option<String>,
+ ty: ComponentTypeRef,
+ },
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct FuncType {
+ params: Vec<(KebabString, ComponentValType)>,
+ results: Vec<(Option<KebabString>, ComponentValType)>,
+}
+
+impl FuncType {
+ fn unnamed_result_ty(&self) -> Option<ComponentValType> {
+ if self.results.len() == 1 {
+ let (name, ty) = &self.results[0];
+ if name.is_none() {
+ return Some(*ty);
+ }
+ }
+ None
+ }
+
+ fn is_scalar(&self) -> bool {
+ self.params.iter().all(|(_, ty)| is_scalar(ty))
+ && self.results.len() == 1
+ && is_scalar(&self.results[0].1)
+ }
+}
+
+fn is_scalar(ty: &ComponentValType) -> bool {
+ match ty {
+ ComponentValType::Primitive(prim) => match prim {
+ PrimitiveValType::Bool
+ | PrimitiveValType::S8
+ | PrimitiveValType::U8
+ | PrimitiveValType::S16
+ | PrimitiveValType::U16
+ | PrimitiveValType::S32
+ | PrimitiveValType::U32
+ | PrimitiveValType::S64
+ | PrimitiveValType::U64
+ | PrimitiveValType::Float32
+ | PrimitiveValType::Float64
+ | PrimitiveValType::Char => true,
+ PrimitiveValType::String => false,
+ },
+ ComponentValType::Type(_) => false,
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum DefinedType {
+ Primitive(PrimitiveValType),
+ Record(RecordType),
+ Variant(VariantType),
+ List(ListType),
+ Tuple(TupleType),
+ Flags(FlagsType),
+ Enum(EnumType),
+ Union(UnionType),
+ Option(OptionType),
+ Result(ResultType),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct RecordType {
+ fields: Vec<(KebabString, ComponentValType)>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct VariantType {
+ cases: Vec<(KebabString, Option<ComponentValType>, Option<u32>)>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct ListType {
+ elem_ty: ComponentValType,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct TupleType {
+ fields: Vec<ComponentValType>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct FlagsType {
+ fields: Vec<KebabString>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct EnumType {
+ variants: Vec<KebabString>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct UnionType {
+ variants: Vec<ComponentValType>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct OptionType {
+ inner_ty: ComponentValType,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct ResultType {
+ ok_ty: Option<ComponentValType>,
+ err_ty: Option<ComponentValType>,
+}
+
+#[derive(Debug)]
+struct ImportSection {
+ imports: Vec<Import>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct Import {
+ name: KebabString,
+ url: Option<String>,
+ ty: ComponentTypeRef,
+}
+
+#[derive(Debug)]
+struct CanonicalSection {
+ funcs: Vec<Func>,
+}
+
+#[derive(Debug)]
+enum Func {
+ CanonLift {
+ func_ty: u32,
+ options: Vec<CanonOpt>,
+ core_func_index: u32,
+ },
+ CanonLower {
+ options: Vec<CanonOpt>,
+ func_index: u32,
+ },
+}
+
+#[derive(Debug)]
+enum CanonOpt {
+ StringUtf8,
+ StringUtf16,
+ StringLatin1Utf16,
+ Memory(u32),
+ Realloc(u32),
+ PostReturn(u32),
+}
+
+#[derive(Debug)]
+struct InstanceSection {}
+
+#[derive(Debug)]
+struct ExportSection {}
+
+#[derive(Debug)]
+struct StartSection {}
+
+#[derive(Debug)]
+struct AliasSection {}
+
+#[derive(Debug)]
+struct CoreInstanceSection {}
+
+#[derive(Debug)]
+struct CoreTypeSection {
+ types: Vec<Rc<CoreType>>,
+}
diff --git a/third_party/rust/wasm-smith/src/component/encode.rs b/third_party/rust/wasm-smith/src/component/encode.rs
new file mode 100644
index 0000000000..f2bfe734fc
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/component/encode.rs
@@ -0,0 +1,319 @@
+use super::*;
+use wasm_encoder::{ComponentExportKind, ComponentOuterAliasKind, ExportKind};
+use wasmparser::types::KebabStr;
+
+impl Component {
+ /// Encode this Wasm component into bytes.
+ pub fn to_bytes(&self) -> Vec<u8> {
+ self.encoded().finish()
+ }
+
+ fn encoded(&self) -> wasm_encoder::Component {
+ let mut component = wasm_encoder::Component::new();
+ for section in &self.sections {
+ section.encode(&mut component);
+ }
+ component
+ }
+}
+
+impl Section {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ match self {
+ Self::Custom(sec) => sec.encode(component),
+ Self::CoreModule(module) => {
+ let bytes = module.to_bytes();
+ component.section(&wasm_encoder::RawSection {
+ id: wasm_encoder::ComponentSectionId::CoreModule as u8,
+ data: &bytes,
+ });
+ }
+ Self::CoreInstance(_) => todo!(),
+ Self::CoreType(sec) => sec.encode(component),
+ Self::Component(comp) => {
+ let bytes = comp.to_bytes();
+ component.section(&wasm_encoder::RawSection {
+ id: wasm_encoder::ComponentSectionId::Component as u8,
+ data: &bytes,
+ });
+ }
+ Self::Instance(_) => todo!(),
+ Self::Alias(_) => todo!(),
+ Self::Type(sec) => sec.encode(component),
+ Self::Canonical(sec) => sec.encode(component),
+ Self::Start(_) => todo!(),
+ Self::Import(sec) => sec.encode(component),
+ Self::Export(_) => todo!(),
+ }
+ }
+}
+
+impl CustomSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ component.section(&wasm_encoder::CustomSection {
+ name: &self.name,
+ data: &self.data,
+ });
+ }
+}
+
+impl TypeSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ let mut sec = wasm_encoder::ComponentTypeSection::new();
+ for ty in &self.types {
+ ty.encode(sec.ty());
+ }
+ component.section(&sec);
+ }
+}
+
+impl ImportSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ let mut sec = wasm_encoder::ComponentImportSection::new();
+ for imp in &self.imports {
+ sec.import(&imp.name, imp.url.as_deref().unwrap_or(""), imp.ty);
+ }
+ component.section(&sec);
+ }
+}
+
+impl CanonicalSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ let mut sec = wasm_encoder::CanonicalFunctionSection::new();
+ for func in &self.funcs {
+ match func {
+ Func::CanonLift {
+ func_ty,
+ options,
+ core_func_index,
+ } => {
+ let options = translate_canon_opt(options);
+ sec.lift(*core_func_index, *func_ty, options);
+ }
+ Func::CanonLower {
+ options,
+ func_index,
+ } => {
+ let options = translate_canon_opt(options);
+ sec.lower(*func_index, options);
+ }
+ }
+ }
+ component.section(&sec);
+ }
+}
+
+impl CoreTypeSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ let mut sec = wasm_encoder::CoreTypeSection::new();
+ for ty in &self.types {
+ ty.encode(sec.ty());
+ }
+ component.section(&sec);
+ }
+}
+
+impl CoreType {
+ fn encode(&self, enc: wasm_encoder::CoreTypeEncoder<'_>) {
+ match self {
+ Self::Func(ty) => {
+ enc.function(ty.params.iter().copied(), ty.results.iter().copied());
+ }
+ Self::Module(mod_ty) => {
+ let mut enc_mod_ty = wasm_encoder::ModuleType::new();
+ for def in &mod_ty.defs {
+ match def {
+ ModuleTypeDef::TypeDef(crate::core::Type::Func(func_ty)) => {
+ enc_mod_ty.ty().function(
+ func_ty.params.iter().copied(),
+ func_ty.results.iter().copied(),
+ );
+ }
+ ModuleTypeDef::OuterAlias { count, i, kind } => match kind {
+ CoreOuterAliasKind::Type(_) => {
+ enc_mod_ty.alias_outer_core_type(*count, *i);
+ }
+ },
+ ModuleTypeDef::Import(imp) => {
+ enc_mod_ty.import(
+ &imp.module,
+ &imp.field,
+ crate::core::encode::translate_entity_type(&imp.entity_type),
+ );
+ }
+ ModuleTypeDef::Export(name, ty) => {
+ enc_mod_ty.export(name, crate::core::encode::translate_entity_type(ty));
+ }
+ }
+ }
+ enc.module(&enc_mod_ty);
+ }
+ }
+ }
+}
+
+impl Type {
+ fn encode(&self, enc: wasm_encoder::ComponentTypeEncoder<'_>) {
+ match self {
+ Self::Defined(ty) => {
+ ty.encode(enc.defined_type());
+ }
+ Self::Func(func_ty) => {
+ let mut f = enc.function();
+
+ f.params(func_ty.params.iter().map(|(name, ty)| (name.as_str(), *ty)));
+
+ if let Some(ty) = func_ty.unnamed_result_ty() {
+ f.result(ty);
+ } else {
+ f.results(
+ func_ty.results.iter().map(|(name, ty)| {
+ (name.as_deref().map(KebabStr::as_str).unwrap(), *ty)
+ }),
+ );
+ }
+ }
+ Self::Component(comp_ty) => {
+ let mut enc_comp_ty = wasm_encoder::ComponentType::new();
+ for def in &comp_ty.defs {
+ match def {
+ ComponentTypeDef::Import(imp) => {
+ enc_comp_ty.import(&imp.name, imp.url.as_deref().unwrap_or(""), imp.ty);
+ }
+ ComponentTypeDef::CoreType(ty) => {
+ ty.encode(enc_comp_ty.core_type());
+ }
+ ComponentTypeDef::Type(ty) => {
+ ty.encode(enc_comp_ty.ty());
+ }
+ ComponentTypeDef::Export { name, url, ty } => {
+ enc_comp_ty.export(name, url.as_deref().unwrap_or(""), *ty);
+ }
+ ComponentTypeDef::Alias(a) => {
+ enc_comp_ty.alias(translate_alias(a));
+ }
+ }
+ }
+ enc.component(&enc_comp_ty);
+ }
+ Self::Instance(inst_ty) => {
+ let mut enc_inst_ty = wasm_encoder::InstanceType::new();
+ for def in &inst_ty.defs {
+ match def {
+ InstanceTypeDecl::CoreType(ty) => {
+ ty.encode(enc_inst_ty.core_type());
+ }
+ InstanceTypeDecl::Type(ty) => {
+ ty.encode(enc_inst_ty.ty());
+ }
+ InstanceTypeDecl::Export { name, url, ty } => {
+ enc_inst_ty.export(name, url.as_deref().unwrap_or(""), *ty);
+ }
+ InstanceTypeDecl::Alias(a) => {
+ enc_inst_ty.alias(translate_alias(a));
+ }
+ }
+ }
+ enc.instance(&enc_inst_ty);
+ }
+ }
+ }
+}
+
+impl DefinedType {
+ fn encode(&self, enc: wasm_encoder::ComponentDefinedTypeEncoder<'_>) {
+ match self {
+ Self::Primitive(ty) => enc.primitive(*ty),
+ Self::Record(ty) => {
+ enc.record(ty.fields.iter().map(|(name, ty)| (name.as_str(), *ty)));
+ }
+ Self::Variant(ty) => {
+ enc.variant(
+ ty.cases
+ .iter()
+ .map(|(name, ty, refines)| (name.as_str(), *ty, *refines)),
+ );
+ }
+ Self::List(ty) => {
+ enc.list(ty.elem_ty);
+ }
+ Self::Tuple(ty) => {
+ enc.tuple(ty.fields.iter().copied());
+ }
+ Self::Flags(ty) => {
+ enc.flags(ty.fields.iter().map(|f| f.as_str()));
+ }
+ Self::Enum(ty) => {
+ enc.enum_type(ty.variants.iter().map(|v| v.as_str()));
+ }
+ Self::Union(ty) => {
+ enc.union(ty.variants.iter().copied());
+ }
+ Self::Option(ty) => {
+ enc.option(ty.inner_ty);
+ }
+ Self::Result(ty) => {
+ enc.result(ty.ok_ty, ty.err_ty);
+ }
+ }
+ }
+}
+
+fn translate_canon_opt(options: &[CanonOpt]) -> Vec<wasm_encoder::CanonicalOption> {
+ options
+ .iter()
+ .map(|o| match o {
+ CanonOpt::StringUtf8 => wasm_encoder::CanonicalOption::UTF8,
+ CanonOpt::StringUtf16 => wasm_encoder::CanonicalOption::UTF16,
+ CanonOpt::StringLatin1Utf16 => wasm_encoder::CanonicalOption::CompactUTF16,
+ CanonOpt::Memory(idx) => wasm_encoder::CanonicalOption::Memory(*idx),
+ CanonOpt::Realloc(idx) => wasm_encoder::CanonicalOption::Realloc(*idx),
+ CanonOpt::PostReturn(idx) => wasm_encoder::CanonicalOption::PostReturn(*idx),
+ })
+ .collect()
+}
+
+fn translate_alias(alias: &Alias) -> wasm_encoder::Alias<'_> {
+ match alias {
+ Alias::InstanceExport {
+ instance,
+ name,
+ kind,
+ } => wasm_encoder::Alias::InstanceExport {
+ instance: *instance,
+ name,
+ kind: match kind {
+ InstanceExportAliasKind::Module => ComponentExportKind::Module,
+ InstanceExportAliasKind::Component => ComponentExportKind::Component,
+ InstanceExportAliasKind::Instance => ComponentExportKind::Instance,
+ InstanceExportAliasKind::Func => ComponentExportKind::Func,
+ InstanceExportAliasKind::Value => ComponentExportKind::Value,
+ },
+ },
+ Alias::CoreInstanceExport {
+ instance,
+ name,
+ kind,
+ } => wasm_encoder::Alias::CoreInstanceExport {
+ instance: *instance,
+ name,
+ kind: match kind {
+ CoreInstanceExportAliasKind::Func => ExportKind::Func,
+ CoreInstanceExportAliasKind::Table => ExportKind::Table,
+ CoreInstanceExportAliasKind::Global => ExportKind::Global,
+ CoreInstanceExportAliasKind::Memory => ExportKind::Memory,
+ CoreInstanceExportAliasKind::Tag => ExportKind::Tag,
+ },
+ },
+ Alias::Outer { count, i, kind } => wasm_encoder::Alias::Outer {
+ count: *count,
+ index: *i,
+ kind: match kind {
+ OuterAliasKind::Module => ComponentOuterAliasKind::CoreModule,
+ OuterAliasKind::Component => ComponentOuterAliasKind::Component,
+ OuterAliasKind::Type(_) => ComponentOuterAliasKind::Type,
+ OuterAliasKind::CoreType(_) => ComponentOuterAliasKind::CoreType,
+ },
+ },
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/config.rs b/third_party/rust/wasm-smith/src/config.rs
new file mode 100644
index 0000000000..0d3b39abe8
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/config.rs
@@ -0,0 +1,837 @@
+//! Configuring the shape of generated Wasm modules.
+
+use crate::InstructionKinds;
+use arbitrary::{Arbitrary, Result, Unstructured};
+use std::borrow::Cow;
+
+/// Configuration for a generated module.
+///
+/// Don't care to configure your generated modules? Just use
+/// [`Module`][crate::Module], which internally uses
+/// [`DefaultConfig`][crate::DefaultConfig].
+///
+/// If you want to configure generated modules, then define a `MyConfig` type,
+/// implement this trait for it, and use
+/// [`ConfiguredModule<MyConfig>`][crate::ConfiguredModule] instead of `Module`.
+///
+/// Every trait method has a provided default implementation, so that you only
+/// need to override the methods for things you want to change away from the
+/// default.
+pub trait Config: 'static + std::fmt::Debug {
+ /// The minimum number of types to generate. Defaults to 0.
+ fn min_types(&self) -> usize {
+ 0
+ }
+
+ /// The maximum number of types to generate. Defaults to 100.
+ fn max_types(&self) -> usize {
+ 100
+ }
+
+ /// The minimum number of imports to generate. Defaults to 0.
+ ///
+ /// Note that if the sum of the maximum function[^1], table, global and
+ /// memory counts is less than the minimum number of imports, then it will
+ /// not be possible to satisfy all constraints (because imports count
+ /// against the limits for those element kinds). In that case, we strictly
+ /// follow the max-constraints, and can fail to satisfy this minimum number.
+ ///
+ /// [^1]: the maximum number of functions is also limited by the number of
+ /// function types arbitrarily chosen; strictly speaking, then, the
+ /// maximum number of imports that can be created due to
+ /// max-constraints is `sum(min(num_func_types, max_funcs), max_tables,
+ /// max_globals, max_memories)`.
+ fn min_imports(&self) -> usize {
+ 0
+ }
+
+ /// The maximum number of imports to generate. Defaults to 100.
+ fn max_imports(&self) -> usize {
+ 100
+ }
+
+ /// The minimum number of tags to generate. Defaults to 0.
+ fn min_tags(&self) -> usize {
+ 0
+ }
+
+ /// The maximum number of tags to generate. Defaults to 100.
+ fn max_tags(&self) -> usize {
+ 100
+ }
+
+ /// The imports that may be used when generating the module.
+ ///
+ /// Defaults to `None` which means that any arbitrary import can be generated.
+ ///
+ /// To only allow specific imports, override this method to return a WebAssembly module which
+ /// describes the imports allowed.
+ ///
+ /// Note that [`Self::min_imports`] is ignored when `available_imports` are enabled.
+ ///
+ /// # Panics
+ ///
+ /// The returned value must be a valid binary encoding of a WebAssembly module. `wasm-smith`
+ /// will panic if the module cannot be parsed.
+ ///
+ /// # Example
+ ///
+ /// An implementation of this method could use the `wat` crate to provide a human-readable and
+ /// maintainable description:
+ ///
+ /// ```rust
+ /// Some(wat::parse_str(r#"
+ /// (module
+ /// (import "env" "ping" (func (param i32)))
+ /// (import "env" "pong" (func (result i32)))
+ /// (import "env" "memory" (memory 1))
+ /// (import "env" "table" (table 1))
+ /// (import "env" "tag" (tag (param i32)))
+ /// )
+ /// "#))
+ /// # ;
+ /// ```
+ fn available_imports(&self) -> Option<Cow<'_, [u8]>> {
+ None
+ }
+
+ /// The minimum number of functions to generate. Defaults to 0. This
+ /// includes imported functions.
+ fn min_funcs(&self) -> usize {
+ 0
+ }
+
+ /// The maximum number of functions to generate. Defaults to 100. This
+ /// includes imported functions.
+ fn max_funcs(&self) -> usize {
+ 100
+ }
+
+ /// The minimum number of globals to generate. Defaults to 0. This includes
+ /// imported globals.
+ fn min_globals(&self) -> usize {
+ 0
+ }
+
+ /// The maximum number of globals to generate. Defaults to 100. This
+ /// includes imported globals.
+ fn max_globals(&self) -> usize {
+ 100
+ }
+
+ /// The minimum number of exports to generate. Defaults to 0.
+ fn min_exports(&self) -> usize {
+ 0
+ }
+
+ /// The maximum number of exports to generate. Defaults to 100.
+ fn max_exports(&self) -> usize {
+ 100
+ }
+
+ /// Export all WebAssembly objects in the module. This overrides
+ /// [`Config::min_exports`] and [`Config::max_exports`]. Defaults to false.
+ fn export_everything(&self) -> bool {
+ false
+ }
+
+ /// The minimum number of element segments to generate. Defaults to 0.
+ fn min_element_segments(&self) -> usize {
+ 0
+ }
+
+ /// The maximum number of element segments to generate. Defaults to 100.
+ fn max_element_segments(&self) -> usize {
+ 100
+ }
+
+ /// The minimum number of elements within a segment to generate. Defaults to
+ /// 0.
+ fn min_elements(&self) -> usize {
+ 0
+ }
+
+ /// The maximum number of elements within a segment to generate. Defaults to
+ /// 100.
+ fn max_elements(&self) -> usize {
+ 100
+ }
+
+ /// The minimum number of data segments to generate. Defaults to 0.
+ fn min_data_segments(&self) -> usize {
+ 0
+ }
+
+ /// The maximum number of data segments to generate. Defaults to 100.
+ fn max_data_segments(&self) -> usize {
+ 100
+ }
+
+ /// The maximum number of instructions to generate in a function
+ /// body. Defaults to 100.
+ ///
+ /// Note that some additional `end`s, `else`s, and `unreachable`s may be
+ /// appended to the function body to finish block scopes.
+ fn max_instructions(&self) -> usize {
+ 100
+ }
+
+ /// The minimum number of memories to use. Defaults to 0. This includes
+ /// imported memories.
+ fn min_memories(&self) -> u32 {
+ 0
+ }
+
+ /// The maximum number of memories to use. Defaults to 1. This includes
+ /// imported memories.
+ ///
+ /// Note that more than one memory is in the realm of the multi-memory wasm
+ /// proposal.
+ fn max_memories(&self) -> usize {
+ 1
+ }
+
+ /// The minimum number of tables to use. Defaults to 0. This includes
+ /// imported tables.
+ fn min_tables(&self) -> u32 {
+ 0
+ }
+
+ /// The maximum number of tables to use. Defaults to 1. This includes
+ /// imported tables.
+ ///
+ /// Note that more than one table is in the realm of the reference types
+ /// proposal.
+ fn max_tables(&self) -> usize {
+ 1
+ }
+
+ /// The maximum, in 64k Wasm pages, of any memory's initial or maximum size.
+ ///
+ /// Defaults to 2^16 = 65536 for 32-bit Wasm and 2^48 for 64-bit wasm.
+ fn max_memory_pages(&self, is_64: bool) -> u64 {
+ if is_64 {
+ 1 << 48
+ } else {
+ 1 << 16
+ }
+ }
+
+ /// Whether every Wasm memory must have a maximum size specified. Defaults
+ /// to `false`.
+ fn memory_max_size_required(&self) -> bool {
+ false
+ }
+
+ /// The maximum, elements, of any table's initial or maximum size.
+ ///
+ /// Defaults to 1 million.
+ fn max_table_elements(&self) -> u32 {
+ 1_000_000
+ }
+
+ /// Whether every Wasm table must have a maximum size specified. Defaults
+ /// to `false`.
+ fn table_max_size_required(&self) -> bool {
+ false
+ }
+
+ /// The maximum number of instances to use. Defaults to 10. This includes
+ /// imported instances.
+ ///
+ /// Note that this is irrelevant unless module linking is enabled.
+ fn max_instances(&self) -> usize {
+ 10
+ }
+
+ /// The maximum number of modules to use. Defaults to 10. This includes
+ /// imported modules.
+ ///
+ /// Note that this is irrelevant unless component model support is enabled.
+ fn max_modules(&self) -> usize {
+ 10
+ }
+
+ /// The maximum number of components to use. Defaults to 10. This includes
+ /// imported components.
+ ///
+ /// Note that this is irrelevant unless component model support is enabled.
+ fn max_components(&self) -> usize {
+ 10
+ }
+
+ /// The maximum number of values to use. Defaults to 10. This includes
+ /// imported values.
+ ///
+ /// Note that this is irrelevant unless value model support is enabled.
+ fn max_values(&self) -> usize {
+ 10
+ }
+
+ /// Control the probability of generating memory offsets that are in bounds
+ /// vs. potentially out of bounds.
+ ///
+ /// Return a tuple `(a, b, c)` where
+ ///
+ /// * `a / (a+b+c)` is the probability of generating a memory offset within
+ /// `0..memory.min_size`, i.e. an offset that is definitely in bounds of a
+ /// non-empty memory. (Note that if a memory is zero-sized, however, no
+ /// offset will ever be in bounds.)
+ ///
+ /// * `b / (a+b+c)` is the probability of generating a memory offset within
+ /// `memory.min_size..memory.max_size`, i.e. an offset that is possibly in
+ /// bounds if the memory has been grown.
+ ///
+ /// * `c / (a+b+c)` is the probability of generating a memory offset within
+ /// the range `memory.max_size..`, i.e. an offset that is definitely out
+ /// of bounds.
+ ///
+ /// At least one of `a`, `b`, and `c` must be non-zero.
+ ///
+ /// If you want to always generate memory offsets that are definitely in
+ /// bounds of a non-zero-sized memory, for example, you could return `(1, 0,
+ /// 0)`.
+ ///
+ /// By default, returns `(75, 24, 1)`.
+ fn memory_offset_choices(&self) -> (u32, u32, u32) {
+ (75, 24, 1)
+ }
+
+ /// The minimum size, in bytes, of all leb-encoded integers. Defaults to 1.
+ ///
+ /// This is useful for ensuring that all leb-encoded integers are decoded as
+ /// such rather than as simply one byte. This will forcibly extend leb
+ /// integers with an over-long encoding in some locations if the size would
+ /// otherwise be smaller than number returned here.
+ fn min_uleb_size(&self) -> u8 {
+ 1
+ }
+
+ /// Determines whether the bulk memory proposal is enabled for generating
+ /// instructions.
+ ///
+ /// Defaults to `false`.
+ fn bulk_memory_enabled(&self) -> bool {
+ false
+ }
+
+ /// Determines whether the reference types proposal is enabled for
+ /// generating instructions.
+ ///
+ /// Defaults to `false`.
+ fn reference_types_enabled(&self) -> bool {
+ false
+ }
+
+ /// Determines whether the tail calls proposal is enabled for generating
+ /// instructions.
+ ///
+ /// Defaults to `false`.
+ fn tail_call_enabled(&self) -> bool {
+ false
+ }
+
+ /// Determines whether the SIMD proposal is enabled for
+ /// generating instructions.
+ ///
+ /// Defaults to `false`.
+ fn simd_enabled(&self) -> bool {
+ false
+ }
+
+ /// Determines whether the Relaxed SIMD proposal is enabled for
+ /// generating instructions.
+ ///
+ /// Defaults to `false`.
+ fn relaxed_simd_enabled(&self) -> bool {
+ false
+ }
+
+ /// Determines whether the exception-handling proposal is enabled for
+ /// generating instructions.
+ ///
+ /// Defaults to `false`.
+ fn exceptions_enabled(&self) -> bool {
+ false
+ }
+
+ /// Determines whether the multi-value results are enabled.
+ ///
+ /// Defaults to `true`.
+ fn multi_value_enabled(&self) -> bool {
+ true
+ }
+
+ /// Determines whether the nontrapping-float-to-int-conversions propsal is enabled.
+ ///
+ /// Defaults to `true`.
+ fn saturating_float_to_int_enabled(&self) -> bool {
+ true
+ }
+
+ /// Determines whether the sign-extension-ops propsal is enabled.
+ ///
+ /// Defaults to `true`.
+ fn sign_extension_ops_enabled(&self) -> bool {
+ true
+ }
+
+ /// Determines whether a `start` export may be included. Defaults to `true`.
+ fn allow_start_export(&self) -> bool {
+ true
+ }
+
+ /// Returns the maximal size of the `alias` section.
+ fn max_aliases(&self) -> usize {
+ 1_000
+ }
+
+ /// Returns the maximal nesting depth of modules with the module linking
+ /// proposal.
+ fn max_nesting_depth(&self) -> usize {
+ 10
+ }
+
+ /// Returns the maximal effective size of any type generated by wasm-smith.
+ ///
+ /// Note that this number is roughly in units of "how many types would be
+ /// needed to represent the recursive type". A function with 8 parameters
+ /// and 2 results would take 11 types (one for the type, 10 for
+ /// params/results). A module type with 2 imports and 3 exports would
+ /// take 6 (module + imports + exports) plus the size of each import/export
+ /// type. This is a somewhat rough measurement that is not intended to be
+ /// very precise.
+ fn max_type_size(&self) -> u32 {
+ 1_000
+ }
+
+ /// Returns whether 64-bit memories are allowed.
+ ///
+ /// Note that this is the gate for the memory64 proposal to WebAssembly.
+ fn memory64_enabled(&self) -> bool {
+ false
+ }
+
+ /// Returns whether NaN values are canonicalized after all f32/f64
+ /// operation.
+ ///
+ /// This can be useful when a generated wasm module is executed in multiple
+ /// runtimes which may produce different NaN values. This ensures that the
+ /// generated module will always use the same NaN representation for all
+ /// instructions which have visible side effects, for example writing floats
+ /// to memory or float-to-int bitcast instructions.
+ fn canonicalize_nans(&self) -> bool {
+ false
+ }
+
+ /// Returns the kinds of instructions allowed in the generated wasm
+ /// programs.
+ ///
+ /// The categories of instructions match the categories used by the
+ /// [WebAssembly
+ /// specification](https://webassembly.github.io/spec/core/syntax/instructions.html);
+ /// e.g., numeric, vector, control, memory, etc. Note that modifying this
+ /// setting is separate from the proposal flags; that is, if `simd_enabled()
+ /// == true` but `allowed_instruction()` does not include vector
+ /// instructions, the generated programs will not include these instructions
+ /// but could contain vector types.
+ fn allowed_instructions(&self) -> InstructionKinds {
+ InstructionKinds::all()
+ }
+
+ /// Returns whether we should generate custom sections or not.
+ ///
+ /// This is false by default.
+ fn generate_custom_sections(&self) -> bool {
+ false
+ }
+
+ /// Determines whether the threads proposal is enabled.
+ ///
+ /// The [threads proposal] involves shared linear memory, new atomic
+ /// instructions, and new `wait` and `notify` instructions.
+ ///
+ /// [threads proposal]: https://github.com/WebAssembly/threads/blob/master/proposals/threads/Overview.md
+ ///
+ /// Defaults to `false`.
+ fn threads_enabled(&self) -> bool {
+ false
+ }
+
+ /// Returns whether we should avoid generating code that will possibly trap.
+ ///
+ /// For some trapping instructions, this will emit extra instructions to
+ /// ensure they don't trap, while some instructions will simply be excluded.
+ /// In cases where we would run into a trap, we instead choose some
+ /// arbitrary non-trapping behavior. For example, if we detect that a Load
+ /// instruction would attempt to access out-of-bounds memory, we instead
+ /// pretend the load succeeded and push 0 onto the stack.
+ ///
+ /// One type of trap that we can't currently avoid is StackOverflow. Even
+ /// when `disallow_traps` is set to true, wasm-smith will eventually
+ /// generate a program that infinitely recurses, causing the call stack to
+ /// be exhausted.
+ ///
+ /// Defaults to `false`.
+ fn disallow_traps(&self) -> bool {
+ false
+ }
+}
+
+/// The default configuration.
+#[derive(Arbitrary, Debug, Default, Copy, Clone)]
+pub struct DefaultConfig;
+
+impl Config for DefaultConfig {}
+
+/// A module configuration that uses [swarm testing].
+///
+/// Dynamically -- but still deterministically, via its `Arbitrary`
+/// implementation -- chooses configuration options.
+///
+/// [swarm testing]: https://www.cs.utah.edu/~regehr/papers/swarm12.pdf
+///
+/// Note that we pick only *maximums*, not minimums, here because it is more
+/// complex to describe the domain of valid configs when minima are involved
+/// (`min <= max` for each variable) and minima are mostly used to ensure
+/// certain elements are present, but do not widen the range of generated Wasm
+/// modules.
+#[derive(Clone, Debug)]
+#[allow(missing_docs)]
+pub struct SwarmConfig {
+ pub allow_start_export: bool,
+ pub available_imports: Option<Vec<u8>>,
+ pub bulk_memory_enabled: bool,
+ pub canonicalize_nans: bool,
+ pub disallow_traps: bool,
+ pub exceptions_enabled: bool,
+ pub export_everything: bool,
+ pub max_aliases: usize,
+ pub max_components: usize,
+ pub max_data_segments: usize,
+ pub max_element_segments: usize,
+ pub max_elements: usize,
+ pub max_exports: usize,
+ pub max_funcs: usize,
+ pub max_globals: usize,
+ pub max_imports: usize,
+ pub max_instances: usize,
+ pub max_instructions: usize,
+ pub max_memories: usize,
+ pub max_memory_pages: u64,
+ pub max_modules: usize,
+ pub max_nesting_depth: usize,
+ pub max_tables: usize,
+ pub max_tags: usize,
+ pub max_type_size: u32,
+ pub max_types: usize,
+ pub max_values: usize,
+ pub memory64_enabled: bool,
+ pub memory_max_size_required: bool,
+ pub memory_offset_choices: (u32, u32, u32),
+ pub min_data_segments: usize,
+ pub min_element_segments: usize,
+ pub min_elements: usize,
+ pub min_exports: usize,
+ pub min_funcs: usize,
+ pub min_globals: usize,
+ pub min_imports: usize,
+ pub min_memories: u32,
+ pub min_tables: u32,
+ pub min_tags: usize,
+ pub min_types: usize,
+ pub min_uleb_size: u8,
+ pub multi_value_enabled: bool,
+ pub reference_types_enabled: bool,
+ pub tail_call_enabled: bool,
+ pub relaxed_simd_enabled: bool,
+ pub saturating_float_to_int_enabled: bool,
+ pub sign_extension_enabled: bool,
+ pub simd_enabled: bool,
+ pub threads_enabled: bool,
+ pub allowed_instructions: InstructionKinds,
+ pub max_table_elements: u32,
+ pub table_max_size_required: bool,
+}
+
+impl<'a> Arbitrary<'a> for SwarmConfig {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ const MAX_MAXIMUM: usize = 1000;
+
+ let reference_types_enabled: bool = u.arbitrary()?;
+ let max_tables = if reference_types_enabled { 100 } else { 1 };
+
+ Ok(SwarmConfig {
+ max_types: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_imports: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_tags: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_funcs: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_globals: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_exports: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_element_segments: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_elements: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_data_segments: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_instructions: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_memories: u.int_in_range(0..=100)?,
+ max_tables,
+ max_memory_pages: u.arbitrary()?,
+ min_uleb_size: u.int_in_range(0..=5)?,
+ bulk_memory_enabled: reference_types_enabled || u.arbitrary()?,
+ reference_types_enabled,
+ tail_call_enabled: u.arbitrary()?,
+ simd_enabled: u.arbitrary()?,
+ multi_value_enabled: u.arbitrary()?,
+ max_aliases: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_nesting_depth: u.int_in_range(0..=10)?,
+ saturating_float_to_int_enabled: u.arbitrary()?,
+ sign_extension_enabled: u.arbitrary()?,
+ allowed_instructions: {
+ use flagset::Flags;
+ let mut allowed = Vec::new();
+ for kind in crate::core::InstructionKind::LIST {
+ if u.arbitrary()? {
+ allowed.push(*kind);
+ }
+ }
+ InstructionKinds::new(&allowed)
+ },
+ table_max_size_required: u.arbitrary()?,
+ max_table_elements: u.int_in_range(0..=1_000_000)?,
+
+ // These fields, unlike the ones above, are less useful to set.
+ // They either make weird inputs or are for features not widely
+ // implemented yet so they're turned off by default.
+ min_types: 0,
+ min_imports: 0,
+ min_tags: 0,
+ min_funcs: 0,
+ min_globals: 0,
+ min_exports: 0,
+ min_element_segments: 0,
+ min_elements: 0,
+ min_data_segments: 0,
+ min_memories: 0,
+ min_tables: 0,
+ memory_max_size_required: false,
+ max_instances: 0,
+ max_modules: 0,
+ max_components: 0,
+ max_values: 0,
+ memory_offset_choices: (75, 24, 1),
+ allow_start_export: true,
+ relaxed_simd_enabled: false,
+ exceptions_enabled: false,
+ memory64_enabled: false,
+ max_type_size: 1000,
+ canonicalize_nans: false,
+ available_imports: None,
+ threads_enabled: false,
+ export_everything: false,
+ disallow_traps: false,
+ })
+ }
+}
+
+impl Config for SwarmConfig {
+ fn min_types(&self) -> usize {
+ self.min_types
+ }
+
+ fn max_types(&self) -> usize {
+ self.max_types
+ }
+
+ fn min_imports(&self) -> usize {
+ self.min_imports
+ }
+
+ fn max_imports(&self) -> usize {
+ self.max_imports
+ }
+
+ fn available_imports(&self) -> Option<Cow<'_, [u8]>> {
+ self.available_imports
+ .as_ref()
+ .map(|is| Cow::Borrowed(&is[..]))
+ }
+
+ fn min_funcs(&self) -> usize {
+ self.min_funcs
+ }
+
+ fn max_funcs(&self) -> usize {
+ self.max_funcs
+ }
+
+ fn min_globals(&self) -> usize {
+ self.min_globals
+ }
+
+ fn max_globals(&self) -> usize {
+ self.max_globals
+ }
+
+ fn min_exports(&self) -> usize {
+ self.min_exports
+ }
+
+ fn max_exports(&self) -> usize {
+ self.max_exports
+ }
+
+ fn export_everything(&self) -> bool {
+ self.export_everything
+ }
+
+ fn min_element_segments(&self) -> usize {
+ self.min_element_segments
+ }
+
+ fn max_element_segments(&self) -> usize {
+ self.max_element_segments
+ }
+
+ fn min_elements(&self) -> usize {
+ self.min_elements
+ }
+
+ fn max_elements(&self) -> usize {
+ self.max_elements
+ }
+
+ fn min_data_segments(&self) -> usize {
+ self.min_data_segments
+ }
+
+ fn max_data_segments(&self) -> usize {
+ self.max_data_segments
+ }
+
+ fn max_instructions(&self) -> usize {
+ self.max_instructions
+ }
+
+ fn min_memories(&self) -> u32 {
+ self.min_memories
+ }
+
+ fn max_memories(&self) -> usize {
+ self.max_memories
+ }
+
+ fn min_tables(&self) -> u32 {
+ self.min_tables
+ }
+
+ fn max_tables(&self) -> usize {
+ self.max_tables
+ }
+
+ fn max_memory_pages(&self, is_64: bool) -> u64 {
+ if is_64 {
+ self.max_memory_pages.min(1 << 48)
+ } else {
+ self.max_memory_pages.min(1 << 16)
+ }
+ }
+
+ fn memory_max_size_required(&self) -> bool {
+ self.memory_max_size_required
+ }
+
+ fn max_instances(&self) -> usize {
+ self.max_instances
+ }
+
+ fn max_modules(&self) -> usize {
+ self.max_modules
+ }
+
+ fn memory_offset_choices(&self) -> (u32, u32, u32) {
+ self.memory_offset_choices
+ }
+
+ fn min_uleb_size(&self) -> u8 {
+ self.min_uleb_size
+ }
+
+ fn bulk_memory_enabled(&self) -> bool {
+ self.bulk_memory_enabled
+ }
+
+ fn reference_types_enabled(&self) -> bool {
+ self.reference_types_enabled
+ }
+
+ fn tail_call_enabled(&self) -> bool {
+ self.tail_call_enabled
+ }
+
+ fn simd_enabled(&self) -> bool {
+ self.simd_enabled
+ }
+
+ fn relaxed_simd_enabled(&self) -> bool {
+ self.relaxed_simd_enabled
+ }
+
+ fn exceptions_enabled(&self) -> bool {
+ self.exceptions_enabled
+ }
+
+ fn multi_value_enabled(&self) -> bool {
+ self.multi_value_enabled
+ }
+
+ fn saturating_float_to_int_enabled(&self) -> bool {
+ self.saturating_float_to_int_enabled
+ }
+
+ fn sign_extension_ops_enabled(&self) -> bool {
+ self.sign_extension_enabled
+ }
+
+ fn allow_start_export(&self) -> bool {
+ self.allow_start_export
+ }
+
+ fn max_aliases(&self) -> usize {
+ self.max_aliases
+ }
+
+ fn max_nesting_depth(&self) -> usize {
+ self.max_nesting_depth
+ }
+
+ fn max_type_size(&self) -> u32 {
+ self.max_type_size
+ }
+
+ fn memory64_enabled(&self) -> bool {
+ self.memory64_enabled
+ }
+
+ fn canonicalize_nans(&self) -> bool {
+ self.canonicalize_nans
+ }
+
+ fn threads_enabled(&self) -> bool {
+ self.threads_enabled
+ }
+
+ fn allowed_instructions(&self) -> InstructionKinds {
+ self.allowed_instructions
+ }
+
+ fn max_table_elements(&self) -> u32 {
+ self.max_table_elements
+ }
+
+ fn table_max_size_required(&self) -> bool {
+ self.table_max_size_required
+ }
+
+ fn disallow_traps(&self) -> bool {
+ self.disallow_traps
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/core.rs b/third_party/rust/wasm-smith/src/core.rs
new file mode 100644
index 0000000000..1d3702179c
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core.rs
@@ -0,0 +1,1741 @@
+//! Generating arbitary core Wasm modules.
+
+mod code_builder;
+pub(crate) mod encode;
+mod terminate;
+
+use crate::{arbitrary_loop, limited_string, unique_string, Config, DefaultConfig};
+use arbitrary::{Arbitrary, Result, Unstructured};
+use code_builder::CodeBuilderAllocations;
+use flagset::{flags, FlagSet};
+use std::collections::HashSet;
+use std::convert::TryFrom;
+use std::marker;
+use std::ops::Range;
+use std::rc::Rc;
+use std::str::{self, FromStr};
+use wasm_encoder::{BlockType, ConstExpr, ExportKind, HeapType, RefType, ValType};
+pub(crate) use wasm_encoder::{GlobalType, MemoryType, TableType};
+
+// NB: these constants are used to control the rate at which various events
+// occur. For more information see where these constants are used. Their values
+// are somewhat random in the sense that they're not scientifically determined
+// or anything like that, I just threw a bunch of random data at wasm-smith and
+// measured various rates of ooms/traps/etc and adjusted these so abnormal
+// events were ~1% of the time.
+const CHANCE_OFFSET_INBOUNDS: usize = 10; // bigger = less traps
+const CHANCE_SEGMENT_ON_EMPTY: usize = 10; // bigger = less traps
+const PCT_INBOUNDS: f64 = 0.995; // bigger = less traps
+
+type Instruction = wasm_encoder::Instruction<'static>;
+
+/// A pseudo-random WebAssembly module.
+///
+/// Construct instances of this type with [the `Arbitrary`
+/// trait](https://docs.rs/arbitrary/*/arbitrary/trait.Arbitrary.html).
+///
+/// ## Configuring Generated Modules
+///
+/// This uses the [`DefaultConfig`][crate::DefaultConfig] configuration. If you
+/// want to customize the shape of generated modules, define your own
+/// configuration type, implement the [`Config`][crate::Config] trait for it,
+/// and use [`ConfiguredModule<YourConfigType>`][crate::ConfiguredModule]
+/// instead of plain `Module`.
+#[derive(Debug)]
+pub struct Module {
+ config: Rc<dyn Config>,
+ duplicate_imports_behavior: DuplicateImportsBehavior,
+ valtypes: Vec<ValType>,
+
+ /// All types locally defined in this module (available in the type index
+ /// space).
+ types: Vec<Type>,
+
+ /// Whether we should encode a types section, even if `self.types` is empty.
+ should_encode_types: bool,
+
+ /// All of this module's imports. These don't have their own index space,
+ /// but instead introduce entries to each imported entity's associated index
+ /// space.
+ imports: Vec<Import>,
+
+ /// Whether we should encode an imports section, even if `self.imports` is
+ /// empty.
+ should_encode_imports: bool,
+
+ /// Indices within `types` that are function types.
+ func_types: Vec<u32>,
+
+ /// Number of imported items into this module.
+ num_imports: usize,
+
+ /// The number of tags defined in this module (not imported or
+ /// aliased).
+ num_defined_tags: usize,
+
+ /// The number of functions defined in this module (not imported or
+ /// aliased).
+ num_defined_funcs: usize,
+
+ /// The number of tables defined in this module (not imported or
+ /// aliased).
+ num_defined_tables: usize,
+
+ /// The number of memories defined in this module (not imported or
+ /// aliased).
+ num_defined_memories: usize,
+
+ /// The indexes and initialization expressions of globals defined in this
+ /// module.
+ defined_globals: Vec<(u32, GlobalInitExpr)>,
+
+ /// All tags available to this module, sorted by their index. The list
+ /// entry is the type of each tag.
+ tags: Vec<TagType>,
+
+ /// All functions available to this module, sorted by their index. The list
+ /// entry points to the index in this module where the function type is
+ /// defined (if available) and provides the type of the function.
+ funcs: Vec<(u32, Rc<FuncType>)>,
+
+ /// All tables available to this module, sorted by their index. The list
+ /// entry is the type of each table.
+ tables: Vec<TableType>,
+
+ /// All globals available to this module, sorted by their index. The list
+ /// entry is the type of each global.
+ globals: Vec<GlobalType>,
+
+ /// All memories available to this module, sorted by their index. The list
+ /// entry is the type of each memory.
+ memories: Vec<MemoryType>,
+
+ exports: Vec<(String, ExportKind, u32)>,
+ start: Option<u32>,
+ elems: Vec<ElementSegment>,
+ code: Vec<Code>,
+ data: Vec<DataSegment>,
+
+ /// The predicted size of the effective type of this module, based on this
+ /// module's size of the types of imports/exports.
+ type_size: u32,
+}
+
+impl<'a> Arbitrary<'a> for Module {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ Ok(ConfiguredModule::<DefaultConfig>::arbitrary(u)?.module)
+ }
+}
+
+/// A pseudo-random generated WebAssembly file with custom configuration.
+///
+/// If you don't care about custom configuration, use [`Module`][crate::Module]
+/// instead.
+///
+/// For details on configuring, see the [`Config`][crate::Config] trait.
+#[derive(Debug)]
+pub struct ConfiguredModule<C> {
+ /// The generated module, controlled by the configuration of `C` in the
+ /// `Arbitrary` implementation.
+ pub module: Module,
+ _marker: marker::PhantomData<C>,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub(crate) enum DuplicateImportsBehavior {
+ Allowed,
+ Disallowed,
+}
+
+impl Module {
+ /// Returns a reference to the internal configuration.
+ pub fn config(&self) -> &dyn Config {
+ &*self.config
+ }
+
+ /// Creates a new `Module` with the specified `config` for
+ /// configuration and `Unstructured` for the DNA of this module.
+ pub fn new(config: impl Config, u: &mut Unstructured<'_>) -> Result<Self> {
+ Self::new_internal(Rc::new(config), u, DuplicateImportsBehavior::Allowed)
+ }
+
+ pub(crate) fn new_internal(
+ config: Rc<dyn Config>,
+ u: &mut Unstructured<'_>,
+ duplicate_imports_behavior: DuplicateImportsBehavior,
+ ) -> Result<Self> {
+ let mut module = Module::empty(config, duplicate_imports_behavior);
+ module.build(u, false)?;
+ Ok(module)
+ }
+
+ fn empty(config: Rc<dyn Config>, duplicate_imports_behavior: DuplicateImportsBehavior) -> Self {
+ Module {
+ config,
+ duplicate_imports_behavior,
+ valtypes: Vec::new(),
+ types: Vec::new(),
+ should_encode_types: false,
+ imports: Vec::new(),
+ should_encode_imports: false,
+ func_types: Vec::new(),
+ num_imports: 0,
+ num_defined_tags: 0,
+ num_defined_funcs: 0,
+ num_defined_tables: 0,
+ num_defined_memories: 0,
+ defined_globals: Vec::new(),
+ tags: Vec::new(),
+ funcs: Vec::new(),
+ tables: Vec::new(),
+ globals: Vec::new(),
+ memories: Vec::new(),
+ exports: Vec::new(),
+ start: None,
+ elems: Vec::new(),
+ code: Vec::new(),
+ data: Vec::new(),
+ type_size: 0,
+ }
+ }
+}
+
+impl<'a, C: Config + Arbitrary<'a>> Arbitrary<'a> for ConfiguredModule<C> {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ Ok(ConfiguredModule {
+ module: Module::new(C::arbitrary(u)?, u)?,
+ _marker: marker::PhantomData,
+ })
+ }
+}
+
+/// Same as [`Module`], but may be invalid.
+///
+/// This module generates function bodies differnetly than `Module` to try to
+/// better explore wasm decoders and such.
+#[derive(Debug)]
+pub struct MaybeInvalidModule {
+ module: Module,
+}
+
+impl MaybeInvalidModule {
+ /// Encode this Wasm module into bytes.
+ pub fn to_bytes(&self) -> Vec<u8> {
+ self.module.to_bytes()
+ }
+}
+
+impl<'a> Arbitrary<'a> for MaybeInvalidModule {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ let mut module = Module::empty(Rc::new(DefaultConfig), DuplicateImportsBehavior::Allowed);
+ module.build(u, true)?;
+ Ok(MaybeInvalidModule { module })
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) enum Type {
+ Func(Rc<FuncType>),
+}
+
+/// A function signature.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub(crate) struct FuncType {
+ /// Types of the parameter values.
+ pub(crate) params: Vec<ValType>,
+ /// Types of the result values.
+ pub(crate) results: Vec<ValType>,
+}
+
+/// An import of an entity provided externally or by a component.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) struct Import {
+ /// The name of the module providing this entity.
+ pub(crate) module: String,
+ /// The name of the entity.
+ pub(crate) field: String,
+ /// The type of this entity.
+ pub(crate) entity_type: EntityType,
+}
+
+/// Type of an entity.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) enum EntityType {
+ /// A global entity.
+ Global(GlobalType),
+ /// A table entity.
+ Table(TableType),
+ /// A memory entity.
+ Memory(MemoryType),
+ /// A tag entity.
+ Tag(TagType),
+ /// A function entity.
+ Func(u32, Rc<FuncType>),
+}
+
+/// Type of a tag.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) struct TagType {
+ /// Index of the function type.
+ func_type_idx: u32,
+ /// Type of the function.
+ func_type: Rc<FuncType>,
+}
+
+#[derive(Debug)]
+struct ElementSegment {
+ kind: ElementKind,
+ ty: RefType,
+ items: Elements,
+}
+
+#[derive(Debug)]
+enum ElementKind {
+ Passive,
+ Declared,
+ Active {
+ table: Option<u32>, // None == table 0 implicitly
+ offset: Offset,
+ },
+}
+
+#[derive(Debug)]
+enum Elements {
+ Functions(Vec<u32>),
+ Expressions(Vec<Option<u32>>),
+}
+
+#[derive(Debug)]
+struct Code {
+ locals: Vec<ValType>,
+ instructions: Instructions,
+}
+
+#[derive(Debug)]
+enum Instructions {
+ Generated(Vec<Instruction>),
+ Arbitrary(Vec<u8>),
+}
+
+#[derive(Debug)]
+struct DataSegment {
+ kind: DataSegmentKind,
+ init: Vec<u8>,
+}
+
+#[derive(Debug)]
+enum DataSegmentKind {
+ Passive,
+ Active { memory_index: u32, offset: Offset },
+}
+
+#[derive(Debug)]
+pub(crate) enum Offset {
+ Const32(i32),
+ Const64(i64),
+ Global(u32),
+}
+
+#[derive(Debug)]
+pub(crate) enum GlobalInitExpr {
+ FuncRef(u32),
+ ConstExpr(ConstExpr),
+}
+
+impl Module {
+ fn build(&mut self, u: &mut Unstructured, allow_invalid: bool) -> Result<()> {
+ self.valtypes = configured_valtypes(&*self.config);
+
+ // We attempt to figure out our available imports *before* creating the types section here,
+ // because the types for the imports are already well-known (specified by the user) and we
+ // must have those populated for all function/etc. imports, no matter what.
+ //
+ // This can affect the available capacity for types and such.
+ if self.arbitrary_imports_from_available(u)? {
+ self.arbitrary_types(u)?;
+ } else {
+ self.arbitrary_types(u)?;
+ self.arbitrary_imports(u)?;
+ }
+
+ self.should_encode_types = !self.types.is_empty() || u.arbitrary()?;
+ self.should_encode_imports = !self.imports.is_empty() || u.arbitrary()?;
+
+ self.arbitrary_tags(u)?;
+ self.arbitrary_funcs(u)?;
+ self.arbitrary_tables(u)?;
+ self.arbitrary_memories(u)?;
+ self.arbitrary_globals(u)?;
+ self.arbitrary_exports(u)?;
+ self.arbitrary_start(u)?;
+ self.arbitrary_elems(u)?;
+ self.arbitrary_data(u)?;
+ self.arbitrary_code(u, allow_invalid)?;
+ Ok(())
+ }
+
+ fn arbitrary_types(&mut self, u: &mut Unstructured) -> Result<()> {
+ // NB: It isn't guaranteed that `self.types.is_empty()` because when
+ // available imports are configured, we may add eagerly specigfic types
+ // for the available imports before generating arbitrary types here.
+ let min = self.config.min_types().saturating_sub(self.types.len());
+ let max = self.config.max_types().saturating_sub(self.types.len());
+ arbitrary_loop(u, min, max, |u| {
+ let ty = self.arbitrary_type(u)?;
+ self.record_type(&ty);
+ self.types.push(ty);
+ Ok(true)
+ })?;
+ Ok(())
+ }
+
+ fn record_type(&mut self, ty: &Type) {
+ let list = match &ty {
+ Type::Func(_) => &mut self.func_types,
+ };
+ list.push(self.types.len() as u32);
+ }
+
+ fn arbitrary_type(&mut self, u: &mut Unstructured) -> Result<Type> {
+ Ok(Type::Func(self.arbitrary_func_type(u)?))
+ }
+
+ fn arbitrary_func_type(&mut self, u: &mut Unstructured) -> Result<Rc<FuncType>> {
+ arbitrary_func_type(
+ u,
+ &self.valtypes,
+ if !self.config.multi_value_enabled() {
+ Some(1)
+ } else {
+ None
+ },
+ )
+ }
+
+ fn can_add_local_or_import_tag(&self) -> bool {
+ self.config.exceptions_enabled()
+ && self.has_tag_func_types()
+ && self.tags.len() < self.config.max_tags()
+ }
+
+ fn can_add_local_or_import_func(&self) -> bool {
+ !self.func_types.is_empty() && self.funcs.len() < self.config.max_funcs()
+ }
+
+ fn can_add_local_or_import_table(&self) -> bool {
+ self.tables.len() < self.config.max_tables()
+ }
+
+ fn can_add_local_or_import_global(&self) -> bool {
+ self.globals.len() < self.config.max_globals()
+ }
+
+ fn can_add_local_or_import_memory(&self) -> bool {
+ self.memories.len() < self.config.max_memories()
+ }
+
+ fn arbitrary_imports(&mut self, u: &mut Unstructured) -> Result<()> {
+ if self.config.max_type_size() < self.type_size {
+ return Ok(());
+ }
+
+ let mut import_strings = HashSet::new();
+ let mut choices: Vec<fn(&mut Unstructured, &mut Module) -> Result<EntityType>> =
+ Vec::with_capacity(5);
+ let min = self.config.min_imports().saturating_sub(self.num_imports);
+ let max = self.config.max_imports().saturating_sub(self.num_imports);
+ arbitrary_loop(u, min, max, |u| {
+ choices.clear();
+ if self.can_add_local_or_import_tag() {
+ choices.push(|u, m| {
+ let ty = m.arbitrary_tag_type(u)?;
+ Ok(EntityType::Tag(ty))
+ });
+ }
+ if self.can_add_local_or_import_func() {
+ choices.push(|u, m| {
+ let idx = *u.choose(&m.func_types)?;
+ let ty = m.func_type(idx).clone();
+ Ok(EntityType::Func(idx, ty))
+ });
+ }
+ if self.can_add_local_or_import_global() {
+ choices.push(|u, m| {
+ let ty = m.arbitrary_global_type(u)?;
+ Ok(EntityType::Global(ty))
+ });
+ }
+ if self.can_add_local_or_import_memory() {
+ choices.push(|u, m| {
+ let ty = arbitrary_memtype(u, m.config())?;
+ Ok(EntityType::Memory(ty))
+ });
+ }
+ if self.can_add_local_or_import_table() {
+ choices.push(|u, m| {
+ let ty = arbitrary_table_type(u, m.config())?;
+ Ok(EntityType::Table(ty))
+ });
+ }
+
+ if choices.is_empty() {
+ // We are out of choices. If we have not have reached the
+ // minimum yet, then we have no way to satisfy the constraint,
+ // but we follow max-constraints before the min-import
+ // constraint.
+ return Ok(false);
+ }
+
+ // Generate a type to import, but only actually add the item if the
+ // type size budget allows us to.
+ let f = u.choose(&choices)?;
+ let entity_type = f(u, self)?;
+ let budget = self.config.max_type_size() - self.type_size;
+ if entity_type.size() + 1 > budget {
+ return Ok(false);
+ }
+ self.type_size += entity_type.size() + 1;
+
+ // Generate an arbitrary module/name pair to name this import.
+ let mut import_pair = unique_import_strings(1_000, u)?;
+ if self.duplicate_imports_behavior == DuplicateImportsBehavior::Disallowed {
+ while import_strings.contains(&import_pair) {
+ use std::fmt::Write;
+ write!(&mut import_pair.1, "{}", import_strings.len()).unwrap();
+ }
+ import_strings.insert(import_pair.clone());
+ }
+ let (module, field) = import_pair;
+
+ // Once our name is determined, then we push the typed item into the
+ // appropriate namespace.
+ match &entity_type {
+ EntityType::Tag(ty) => self.tags.push(ty.clone()),
+ EntityType::Func(idx, ty) => self.funcs.push((*idx, ty.clone())),
+ EntityType::Global(ty) => self.globals.push(*ty),
+ EntityType::Table(ty) => self.tables.push(*ty),
+ EntityType::Memory(ty) => self.memories.push(*ty),
+ }
+
+ self.num_imports += 1;
+ self.imports.push(Import {
+ module,
+ field,
+ entity_type,
+ });
+ Ok(true)
+ })?;
+
+ Ok(())
+ }
+
+ /// Generate some arbitrary imports from the list of available imports.
+ ///
+ /// Returns `true` if there was a list of available imports configured. Otherwise `false` and
+ /// the caller should generate arbitrary imports.
+ fn arbitrary_imports_from_available(&mut self, u: &mut Unstructured) -> Result<bool> {
+ let example_module = if let Some(wasm) = self.config.available_imports() {
+ wasm
+ } else {
+ return Ok(false);
+ };
+
+ // First, parse the module-by-example to collect the types and imports.
+ //
+ // `available_types` will map from a signature index (which is the same as the index into
+ // this vector) as it appears in the parsed code, to the type itself as well as to the
+ // index in our newly generated module. Initially the option is `None` and will become a
+ // `Some` when we encounter an import that uses this signature in the next portion of this
+ // function. See also the `make_func_type` closure below.
+ let mut available_types = Vec::<(wasmparser::Type, Option<u32>)>::new();
+ let mut available_imports = Vec::<wasmparser::Import>::new();
+ for payload in wasmparser::Parser::new(0).parse_all(&example_module) {
+ match payload.expect("could not parse the available import payload") {
+ wasmparser::Payload::TypeSection(type_reader) => {
+ for ty in type_reader {
+ let ty = ty.expect("could not parse type section");
+ available_types.push((ty, None));
+ }
+ }
+ wasmparser::Payload::ImportSection(import_reader) => {
+ for im in import_reader {
+ let im = im.expect("could not read import");
+ // We can immediately filter whether this is an import we want to
+ // use.
+ let use_import = u.arbitrary().unwrap_or(false);
+ if !use_import {
+ continue;
+ }
+ available_imports.push(im);
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // In this function we need to place imported function/tag types in the types section and
+ // generate import entries (which refer to said types) at the same time.
+ let max_types = self.config.max_types();
+ let multi_value_enabled = self.config.multi_value_enabled();
+ let mut new_imports = Vec::with_capacity(available_imports.len());
+ let first_type_index = self.types.len();
+ let mut new_types = Vec::<Type>::new();
+
+ // Returns the index to the translated type in the to-be type section, and the reference to
+ // the type itself.
+ let mut make_func_type = |parsed_sig_idx: u32| {
+ let serialized_sig_idx = match available_types.get_mut(parsed_sig_idx as usize) {
+ None => panic!("signature index refers to a type out of bounds"),
+ Some((_, Some(idx))) => *idx as usize,
+ Some((wasmparser::Type::Func(func_type), index_store)) => {
+ let multi_value_required = func_type.results().len() > 1;
+ let new_index = first_type_index + new_types.len();
+ if new_index >= max_types || (multi_value_required && !multi_value_enabled) {
+ return None;
+ }
+ let func_type = Rc::new(FuncType {
+ params: func_type
+ .params()
+ .iter()
+ .map(|t| convert_type(*t))
+ .collect(),
+ results: func_type
+ .results()
+ .iter()
+ .map(|t| convert_type(*t))
+ .collect(),
+ });
+ index_store.replace(new_index as u32);
+ new_types.push(Type::Func(Rc::clone(&func_type)));
+ new_index
+ }
+ };
+ match &new_types[serialized_sig_idx - first_type_index] {
+ Type::Func(f) => Some((serialized_sig_idx as u32, Rc::clone(f))),
+ }
+ };
+
+ for import in available_imports {
+ let type_size_budget = self.config.max_type_size() - self.type_size;
+ let entity_type = match &import.ty {
+ wasmparser::TypeRef::Func(sig_idx) => {
+ if self.funcs.len() >= self.config.max_funcs() {
+ continue;
+ } else if let Some((sig_idx, func_type)) = make_func_type(*sig_idx) {
+ let entity = EntityType::Func(sig_idx as u32, Rc::clone(&func_type));
+ if type_size_budget < entity.size() {
+ continue;
+ }
+ self.funcs.push((sig_idx, func_type));
+ entity
+ } else {
+ continue;
+ }
+ }
+
+ wasmparser::TypeRef::Tag(wasmparser::TagType { func_type_idx, .. }) => {
+ let can_add_tag = self.tags.len() < self.config.max_tags();
+ if !self.config.exceptions_enabled() || !can_add_tag {
+ continue;
+ } else if let Some((sig_idx, func_type)) = make_func_type(*func_type_idx) {
+ let tag_type = TagType {
+ func_type_idx: sig_idx,
+ func_type,
+ };
+ let entity = EntityType::Tag(tag_type.clone());
+ if type_size_budget < entity.size() {
+ continue;
+ }
+ self.tags.push(tag_type);
+ entity
+ } else {
+ continue;
+ }
+ }
+
+ wasmparser::TypeRef::Table(table_ty) => {
+ let table_ty = TableType {
+ element_type: convert_reftype(table_ty.element_type),
+ minimum: table_ty.initial,
+ maximum: table_ty.maximum,
+ };
+ let entity = EntityType::Table(table_ty);
+ let type_size = entity.size();
+ if type_size_budget < type_size || !self.can_add_local_or_import_table() {
+ continue;
+ }
+ self.type_size += type_size;
+ self.tables.push(table_ty);
+ entity
+ }
+
+ wasmparser::TypeRef::Memory(memory_ty) => {
+ let memory_ty = MemoryType {
+ minimum: memory_ty.initial,
+ maximum: memory_ty.maximum,
+ memory64: memory_ty.memory64,
+ shared: memory_ty.shared,
+ };
+ let entity = EntityType::Memory(memory_ty);
+ let type_size = entity.size();
+ if type_size_budget < type_size || !self.can_add_local_or_import_memory() {
+ continue;
+ }
+ self.type_size += type_size;
+ self.memories.push(memory_ty);
+ entity
+ }
+
+ wasmparser::TypeRef::Global(global_ty) => {
+ let global_ty = GlobalType {
+ val_type: convert_type(global_ty.content_type),
+ mutable: global_ty.mutable,
+ };
+ let entity = EntityType::Global(global_ty);
+ let type_size = entity.size();
+ if type_size_budget < type_size || !self.can_add_local_or_import_global() {
+ continue;
+ }
+ self.type_size += type_size;
+ self.globals.push(global_ty);
+ entity
+ }
+ };
+ new_imports.push(Import {
+ module: import.module.to_string(),
+ field: import.name.to_string(),
+ entity_type,
+ });
+ self.num_imports += 1;
+ }
+
+ // Finally, add the entities we just generated.
+ self.types.extend(new_types);
+ self.imports.extend(new_imports);
+
+ Ok(true)
+ }
+
+ fn type_of(&self, kind: ExportKind, index: u32) -> EntityType {
+ match kind {
+ ExportKind::Global => EntityType::Global(self.globals[index as usize]),
+ ExportKind::Memory => EntityType::Memory(self.memories[index as usize]),
+ ExportKind::Table => EntityType::Table(self.tables[index as usize]),
+ ExportKind::Func => {
+ let (_idx, ty) = &self.funcs[index as usize];
+ EntityType::Func(u32::max_value(), ty.clone())
+ }
+ ExportKind::Tag => EntityType::Tag(self.tags[index as usize].clone()),
+ }
+ }
+
+ fn ty(&self, idx: u32) -> &Type {
+ &self.types[idx as usize]
+ }
+
+ fn func_types(&self) -> impl Iterator<Item = (u32, &FuncType)> + '_ {
+ self.func_types
+ .iter()
+ .copied()
+ .map(move |type_i| (type_i, &**self.func_type(type_i)))
+ }
+
+ fn func_type(&self, idx: u32) -> &Rc<FuncType> {
+ match self.ty(idx) {
+ Type::Func(f) => f,
+ }
+ }
+
+ fn tags(&self) -> impl Iterator<Item = (u32, &TagType)> + '_ {
+ self.tags
+ .iter()
+ .enumerate()
+ .map(move |(i, ty)| (i as u32, ty))
+ }
+
+ fn funcs(&self) -> impl Iterator<Item = (u32, &Rc<FuncType>)> + '_ {
+ self.funcs
+ .iter()
+ .enumerate()
+ .map(move |(i, (_, ty))| (i as u32, ty))
+ }
+
+ fn has_tag_func_types(&self) -> bool {
+ self.tag_func_types().next().is_some()
+ }
+
+ fn tag_func_types(&self) -> impl Iterator<Item = u32> + '_ {
+ self.func_types
+ .iter()
+ .copied()
+ .filter(move |i| self.func_type(*i).results.is_empty())
+ }
+
+ fn arbitrary_valtype(&self, u: &mut Unstructured) -> Result<ValType> {
+ Ok(*u.choose(&self.valtypes)?)
+ }
+
+ fn arbitrary_global_type(&self, u: &mut Unstructured) -> Result<GlobalType> {
+ Ok(GlobalType {
+ val_type: self.arbitrary_valtype(u)?,
+ mutable: u.arbitrary()?,
+ })
+ }
+
+ fn arbitrary_tag_type(&self, u: &mut Unstructured) -> Result<TagType> {
+ let candidate_func_types: Vec<_> = self.tag_func_types().collect();
+ arbitrary_tag_type(u, &candidate_func_types, |ty_idx| {
+ self.func_type(ty_idx).clone()
+ })
+ }
+
+ fn arbitrary_tags(&mut self, u: &mut Unstructured) -> Result<()> {
+ if !self.config.exceptions_enabled() || !self.has_tag_func_types() {
+ return Ok(());
+ }
+
+ arbitrary_loop(u, self.config.min_tags(), self.config.max_tags(), |u| {
+ if !self.can_add_local_or_import_tag() {
+ return Ok(false);
+ }
+ self.tags.push(self.arbitrary_tag_type(u)?);
+ self.num_defined_tags += 1;
+ Ok(true)
+ })
+ }
+
+ fn arbitrary_funcs(&mut self, u: &mut Unstructured) -> Result<()> {
+ if self.func_types.is_empty() {
+ return Ok(());
+ }
+
+ arbitrary_loop(u, self.config.min_funcs(), self.config.max_funcs(), |u| {
+ if !self.can_add_local_or_import_func() {
+ return Ok(false);
+ }
+ let max = self.func_types.len() - 1;
+ let ty = self.func_types[u.int_in_range(0..=max)?];
+ self.funcs.push((ty, self.func_type(ty).clone()));
+ self.num_defined_funcs += 1;
+ Ok(true)
+ })
+ }
+
+ fn arbitrary_tables(&mut self, u: &mut Unstructured) -> Result<()> {
+ arbitrary_loop(
+ u,
+ self.config.min_tables() as usize,
+ self.config.max_tables() as usize,
+ |u| {
+ if !self.can_add_local_or_import_table() {
+ return Ok(false);
+ }
+ self.num_defined_tables += 1;
+ let ty = arbitrary_table_type(u, self.config())?;
+ self.tables.push(ty);
+ Ok(true)
+ },
+ )
+ }
+
+ fn arbitrary_memories(&mut self, u: &mut Unstructured) -> Result<()> {
+ arbitrary_loop(
+ u,
+ self.config.min_memories() as usize,
+ self.config.max_memories() as usize,
+ |u| {
+ if !self.can_add_local_or_import_memory() {
+ return Ok(false);
+ }
+ self.num_defined_memories += 1;
+ self.memories.push(arbitrary_memtype(u, self.config())?);
+ Ok(true)
+ },
+ )
+ }
+
+ fn arbitrary_globals(&mut self, u: &mut Unstructured) -> Result<()> {
+ let mut choices: Vec<Box<dyn Fn(&mut Unstructured, ValType) -> Result<GlobalInitExpr>>> =
+ vec![];
+ let num_imported_globals = self.globals.len();
+
+ arbitrary_loop(
+ u,
+ self.config.min_globals(),
+ self.config.max_globals(),
+ |u| {
+ if !self.can_add_local_or_import_global() {
+ return Ok(false);
+ }
+
+ let ty = self.arbitrary_global_type(u)?;
+
+ choices.clear();
+ let num_funcs = self.funcs.len() as u32;
+ choices.push(Box::new(move |u, ty| {
+ Ok(GlobalInitExpr::ConstExpr(match ty {
+ ValType::I32 => ConstExpr::i32_const(u.arbitrary()?),
+ ValType::I64 => ConstExpr::i64_const(u.arbitrary()?),
+ ValType::F32 => ConstExpr::f32_const(u.arbitrary()?),
+ ValType::F64 => ConstExpr::f64_const(u.arbitrary()?),
+ ValType::V128 => ConstExpr::v128_const(u.arbitrary()?),
+ ValType::Ref(ty) => {
+ assert!(ty.nullable);
+ if ty.heap_type == HeapType::Func && num_funcs > 0 && u.arbitrary()? {
+ let func = u.int_in_range(0..=num_funcs - 1)?;
+ return Ok(GlobalInitExpr::FuncRef(func));
+ }
+ ConstExpr::ref_null(ty.heap_type)
+ }
+ }))
+ }));
+
+ for (i, g) in self.globals[..num_imported_globals].iter().enumerate() {
+ if !g.mutable && g.val_type == ty.val_type {
+ choices.push(Box::new(move |_, _| {
+ Ok(GlobalInitExpr::ConstExpr(ConstExpr::global_get(i as u32)))
+ }));
+ }
+ }
+
+ let f = u.choose(&choices)?;
+ let expr = f(u, ty.val_type)?;
+ let global_idx = self.globals.len() as u32;
+ self.globals.push(ty);
+ self.defined_globals.push((global_idx, expr));
+ Ok(true)
+ },
+ )
+ }
+
+ fn arbitrary_exports(&mut self, u: &mut Unstructured) -> Result<()> {
+ if self.config.max_type_size() < self.type_size && !self.config.export_everything() {
+ return Ok(());
+ }
+
+ // Build up a list of candidates for each class of import
+ let mut choices: Vec<Vec<(ExportKind, u32)>> = Vec::with_capacity(6);
+ choices.push(
+ (0..self.funcs.len())
+ .map(|i| (ExportKind::Func, i as u32))
+ .collect(),
+ );
+ choices.push(
+ (0..self.tables.len())
+ .map(|i| (ExportKind::Table, i as u32))
+ .collect(),
+ );
+ choices.push(
+ (0..self.memories.len())
+ .map(|i| (ExportKind::Memory, i as u32))
+ .collect(),
+ );
+ choices.push(
+ (0..self.globals.len())
+ .map(|i| (ExportKind::Global, i as u32))
+ .collect(),
+ );
+
+ let mut export_names = HashSet::new();
+
+ // If the configuration demands exporting everything, we do so here and
+ // early-return.
+ if self.config.export_everything() {
+ for choices_by_kind in choices {
+ for (kind, idx) in choices_by_kind {
+ let name = unique_string(1_000, &mut export_names, u)?;
+ self.add_arbitrary_export(name, kind, idx)?;
+ }
+ }
+ return Ok(());
+ }
+
+ arbitrary_loop(
+ u,
+ self.config.min_exports(),
+ self.config.max_exports(),
+ |u| {
+ // Remove all candidates for export whose type size exceeds our
+ // remaining budget for type size. Then also remove any classes
+ // of exports which no longer have any candidates.
+ //
+ // If there's nothing remaining after this, then we're done.
+ let max_size = self.config.max_type_size() - self.type_size;
+ for list in choices.iter_mut() {
+ list.retain(|(kind, idx)| self.type_of(*kind, *idx).size() + 1 < max_size);
+ }
+ choices.retain(|list| !list.is_empty());
+ if choices.is_empty() {
+ return Ok(false);
+ }
+
+ // Pick a name, then pick the export, and then we can record
+ // information about the chosen export.
+ let name = unique_string(1_000, &mut export_names, u)?;
+ let list = u.choose(&choices)?;
+ let (kind, idx) = *u.choose(list)?;
+ self.add_arbitrary_export(name, kind, idx)?;
+ Ok(true)
+ },
+ )
+ }
+
+ fn add_arbitrary_export(&mut self, name: String, kind: ExportKind, idx: u32) -> Result<()> {
+ let ty = self.type_of(kind, idx);
+ self.type_size += 1 + ty.size();
+ if self.type_size <= self.config.max_type_size() {
+ self.exports.push((name, kind, idx));
+ Ok(())
+ } else {
+ // If our addition of exports takes us above the allowed number of
+ // types, we fail; this error code is not the most illustrative of
+ // the cause but is the best available from `arbitrary`.
+ Err(arbitrary::Error::IncorrectFormat)
+ }
+ }
+
+ fn arbitrary_start(&mut self, u: &mut Unstructured) -> Result<()> {
+ if !self.config.allow_start_export() {
+ return Ok(());
+ }
+
+ let mut choices = Vec::with_capacity(self.funcs.len() as usize);
+
+ for (func_idx, ty) in self.funcs() {
+ if ty.params.is_empty() && ty.results.is_empty() {
+ choices.push(func_idx);
+ }
+ }
+
+ if !choices.is_empty() && u.arbitrary().unwrap_or(false) {
+ let f = *u.choose(&choices)?;
+ self.start = Some(f);
+ }
+
+ Ok(())
+ }
+
+ fn arbitrary_elems(&mut self, u: &mut Unstructured) -> Result<()> {
+ let func_max = self.funcs.len() as u32;
+
+ // Create a helper closure to choose an arbitrary offset.
+ let mut offset_global_choices = vec![];
+ if !self.config.disallow_traps() {
+ for (i, g) in self.globals[..self.globals.len() - self.defined_globals.len()]
+ .iter()
+ .enumerate()
+ {
+ if !g.mutable && g.val_type == ValType::I32 {
+ offset_global_choices.push(i as u32);
+ }
+ }
+ }
+ let arbitrary_active_elem = |u: &mut Unstructured,
+ min_mem_size: u32,
+ table: Option<u32>,
+ disallow_traps: bool,
+ table_ty: &TableType| {
+ let (offset, max_size_hint) = if !offset_global_choices.is_empty() && u.arbitrary()? {
+ let g = u.choose(&offset_global_choices)?;
+ (Offset::Global(*g), None)
+ } else {
+ let max_mem_size = if disallow_traps {
+ table_ty.minimum
+ } else {
+ u32::MAX
+ };
+ let offset =
+ arbitrary_offset(u, min_mem_size.into(), max_mem_size.into(), 0)? as u32;
+ let max_size_hint = if disallow_traps
+ || (offset <= min_mem_size && u.int_in_range(0..=CHANCE_OFFSET_INBOUNDS)? != 0)
+ {
+ Some(min_mem_size - offset)
+ } else {
+ None
+ };
+ (Offset::Const32(offset as i32), max_size_hint)
+ };
+ Ok((ElementKind::Active { table, offset }, max_size_hint))
+ };
+
+ type GenElemSegment<'a> =
+ dyn Fn(&mut Unstructured) -> Result<(ElementKind, Option<u32>)> + 'a;
+ let mut funcrefs: Vec<Box<GenElemSegment>> = Vec::new();
+ let mut externrefs: Vec<Box<GenElemSegment>> = Vec::new();
+ let disallow_traps = self.config().disallow_traps();
+ for (i, ty) in self.tables.iter().enumerate() {
+ // If this table starts with no capacity then any non-empty element
+ // segment placed onto it will immediately trap, which isn't too
+ // too interesting. If that's the case give it an unlikely chance
+ // of proceeding.
+ if ty.minimum == 0 && u.int_in_range(0..=CHANCE_SEGMENT_ON_EMPTY)? != 0 {
+ continue;
+ }
+
+ let dst = if ty.element_type == RefType::FUNCREF {
+ &mut funcrefs
+ } else {
+ &mut externrefs
+ };
+ let minimum = ty.minimum;
+ // If the first table is a funcref table then it's a candidate for
+ // the MVP encoding of element segments.
+ if i == 0 && ty.element_type == RefType::FUNCREF {
+ dst.push(Box::new(move |u| {
+ arbitrary_active_elem(u, minimum, None, disallow_traps, ty)
+ }));
+ }
+ if self.config.bulk_memory_enabled() {
+ let idx = Some(i as u32);
+ dst.push(Box::new(move |u| {
+ arbitrary_active_elem(u, minimum, idx, disallow_traps, ty)
+ }));
+ }
+ }
+
+ // Reference types allows us to create passive and declared element
+ // segments.
+ if self.config.reference_types_enabled() {
+ funcrefs.push(Box::new(|_| Ok((ElementKind::Passive, None))));
+ externrefs.push(Box::new(|_| Ok((ElementKind::Passive, None))));
+ funcrefs.push(Box::new(|_| Ok((ElementKind::Declared, None))));
+ externrefs.push(Box::new(|_| Ok((ElementKind::Declared, None))));
+ }
+
+ let mut choices = Vec::new();
+ if !funcrefs.is_empty() {
+ choices.push((&funcrefs, RefType::FUNCREF));
+ }
+ if !externrefs.is_empty() {
+ choices.push((&externrefs, RefType::EXTERNREF));
+ }
+
+ if choices.is_empty() {
+ return Ok(());
+ }
+ arbitrary_loop(
+ u,
+ self.config.min_element_segments(),
+ self.config.max_element_segments(),
+ |u| {
+ // Choose whether to generate a segment whose elements are initialized via
+ // expressions, or one whose elements are initialized via function indices.
+ let (kind_candidates, ty) = *u.choose(&choices)?;
+
+ // Select a kind for this segment now that we know the number of
+ // items the segment will hold.
+ let (kind, max_size_hint) = u.choose(kind_candidates)?(u)?;
+ let max = max_size_hint
+ .map(|i| usize::try_from(i).unwrap())
+ .unwrap_or_else(|| self.config.max_elements());
+
+ // Pick whether we're going to use expression elements or
+ // indices. Note that externrefs must use expressions,
+ // and functions without reference types must use indices.
+ let items = if ty == RefType::EXTERNREF
+ || (self.config.reference_types_enabled() && u.arbitrary()?)
+ {
+ let mut init = vec![];
+ arbitrary_loop(u, self.config.min_elements(), max, |u| {
+ init.push(
+ if ty == RefType::EXTERNREF || func_max == 0 || u.arbitrary()? {
+ None
+ } else {
+ Some(u.int_in_range(0..=func_max - 1)?)
+ },
+ );
+ Ok(true)
+ })?;
+ Elements::Expressions(init)
+ } else {
+ let mut init = vec![];
+ if func_max > 0 {
+ arbitrary_loop(u, self.config.min_elements(), max, |u| {
+ let func_idx = u.int_in_range(0..=func_max - 1)?;
+ init.push(func_idx);
+ Ok(true)
+ })?;
+ }
+ Elements::Functions(init)
+ };
+
+ self.elems.push(ElementSegment { kind, ty, items });
+ Ok(true)
+ },
+ )
+ }
+
+ fn arbitrary_code(&mut self, u: &mut Unstructured, allow_invalid: bool) -> Result<()> {
+ self.code.reserve(self.num_defined_funcs);
+ let mut allocs = CodeBuilderAllocations::new(self);
+ for (_, ty) in self.funcs[self.funcs.len() - self.num_defined_funcs..].iter() {
+ let body = self.arbitrary_func_body(u, ty, &mut allocs, allow_invalid)?;
+ self.code.push(body);
+ }
+ Ok(())
+ }
+
+ fn arbitrary_func_body(
+ &self,
+ u: &mut Unstructured,
+ ty: &FuncType,
+ allocs: &mut CodeBuilderAllocations,
+ allow_invalid: bool,
+ ) -> Result<Code> {
+ let mut locals = self.arbitrary_locals(u)?;
+ let builder = allocs.builder(ty, &mut locals);
+ let instructions = if allow_invalid && u.arbitrary().unwrap_or(false) {
+ Instructions::Arbitrary(arbitrary_vec_u8(u)?)
+ } else {
+ Instructions::Generated(builder.arbitrary(u, self)?)
+ };
+
+ Ok(Code {
+ locals,
+ instructions,
+ })
+ }
+
+ fn arbitrary_locals(&self, u: &mut Unstructured) -> Result<Vec<ValType>> {
+ let mut ret = Vec::new();
+ arbitrary_loop(u, 0, 100, |u| {
+ ret.push(self.arbitrary_valtype(u)?);
+ Ok(true)
+ })?;
+ Ok(ret)
+ }
+
+ fn arbitrary_data(&mut self, u: &mut Unstructured) -> Result<()> {
+ // With bulk-memory we can generate passive data, otherwise if there are
+ // no memories we can't generate any data.
+ let memories = self.memories.len() as u32;
+ if memories == 0 && !self.config.bulk_memory_enabled() {
+ return Ok(());
+ }
+ let disallow_traps = self.config.disallow_traps();
+ let mut choices32: Vec<Box<dyn Fn(&mut Unstructured, u64, usize) -> Result<Offset>>> =
+ vec![];
+ choices32.push(Box::new(|u, min_size, data_len| {
+ let min = u32::try_from(min_size.saturating_mul(64 * 1024))
+ .unwrap_or(u32::MAX)
+ .into();
+ let max = if disallow_traps { min } else { u32::MAX.into() };
+ Ok(Offset::Const32(
+ arbitrary_offset(u, min, max, data_len)? as i32
+ ))
+ }));
+ let mut choices64: Vec<Box<dyn Fn(&mut Unstructured, u64, usize) -> Result<Offset>>> =
+ vec![];
+ choices64.push(Box::new(|u, min_size, data_len| {
+ let min = min_size.saturating_mul(64 * 1024);
+ let max = if disallow_traps { min } else { u64::MAX };
+ Ok(Offset::Const64(
+ arbitrary_offset(u, min, max, data_len)? as i64
+ ))
+ }));
+ if !self.config().disallow_traps() {
+ for (i, g) in self.globals[..self.globals.len() - self.defined_globals.len()]
+ .iter()
+ .enumerate()
+ {
+ if g.mutable {
+ continue;
+ }
+ if g.val_type == ValType::I32 {
+ choices32.push(Box::new(move |_, _, _| Ok(Offset::Global(i as u32))));
+ } else if g.val_type == ValType::I64 {
+ choices64.push(Box::new(move |_, _, _| Ok(Offset::Global(i as u32))));
+ }
+ }
+ }
+
+ // Build a list of candidate memories that we'll add data initializers
+ // for. If a memory doesn't have an initial size then any initializers
+ // for that memory will trap instantiation, which isn't too
+ // interesting. Try to make this happen less often by making it less
+ // likely that a memory with 0 size will have a data segment.
+ let mut memories = Vec::new();
+ for (i, mem) in self.memories.iter().enumerate() {
+ if mem.minimum > 0 || u.int_in_range(0..=CHANCE_SEGMENT_ON_EMPTY)? == 0 {
+ memories.push(i as u32);
+ }
+ }
+
+ // With memories we can generate data segments, and with bulk memory we
+ // can generate passive segments. Without these though we can't create
+ // a valid module with data segments.
+ if memories.is_empty() && !self.config.bulk_memory_enabled() {
+ return Ok(());
+ }
+
+ arbitrary_loop(
+ u,
+ self.config.min_data_segments(),
+ self.config.max_data_segments(),
+ |u| {
+ let mut init: Vec<u8> = u.arbitrary()?;
+
+ // Passive data can only be generated if bulk memory is enabled.
+ // Otherwise if there are no memories we *only* generate passive
+ // data. Finally if all conditions are met we use an input byte to
+ // determine if it should be passive or active.
+ let kind = if self.config.bulk_memory_enabled()
+ && (memories.is_empty() || u.arbitrary()?)
+ {
+ DataSegmentKind::Passive
+ } else {
+ let memory_index = *u.choose(&memories)?;
+ let mem = &self.memories[memory_index as usize];
+ let f = if mem.memory64 {
+ u.choose(&choices64)?
+ } else {
+ u.choose(&choices32)?
+ };
+ let mut offset = f(u, mem.minimum, init.len())?;
+
+ // If traps are disallowed then truncate the size of the
+ // data segment to the minimum size of memory to guarantee
+ // it will fit. Afterwards ensure that the offset of the
+ // data segment is in-bounds by clamping it to the
+ if self.config.disallow_traps() {
+ let max_size = (u64::MAX / 64 / 1024).min(mem.minimum) * 64 * 1024;
+ init.truncate(max_size as usize);
+ let max_offset = max_size - init.len() as u64;
+ match &mut offset {
+ Offset::Const32(x) => {
+ *x = (*x as u64).min(max_offset) as i32;
+ }
+ Offset::Const64(x) => {
+ *x = (*x as u64).min(max_offset) as i64;
+ }
+ Offset::Global(_) => unreachable!(),
+ }
+ }
+ DataSegmentKind::Active {
+ offset,
+ memory_index,
+ }
+ };
+ self.data.push(DataSegment { kind, init });
+ Ok(true)
+ },
+ )
+ }
+
+ fn params_results(&self, ty: &BlockType) -> (Vec<ValType>, Vec<ValType>) {
+ match ty {
+ BlockType::Empty => (vec![], vec![]),
+ BlockType::Result(t) => (vec![], vec![*t]),
+ BlockType::FunctionType(ty) => {
+ let ty = self.func_type(*ty);
+ (ty.params.to_vec(), ty.results.to_vec())
+ }
+ }
+ }
+}
+
+pub(crate) fn arbitrary_limits32(
+ u: &mut Unstructured,
+ min_minimum: Option<u32>,
+ max_minimum: u32,
+ max_required: bool,
+ max_inbounds: u32,
+) -> Result<(u32, Option<u32>)> {
+ let (min, max) = arbitrary_limits64(
+ u,
+ min_minimum.map(Into::into),
+ max_minimum.into(),
+ max_required,
+ max_inbounds.into(),
+ )?;
+ Ok((
+ u32::try_from(min).unwrap(),
+ max.map(|i| u32::try_from(i).unwrap()),
+ ))
+}
+
+pub(crate) fn arbitrary_limits64(
+ u: &mut Unstructured,
+ min_minimum: Option<u64>,
+ max_minimum: u64,
+ max_required: bool,
+ max_inbounds: u64,
+) -> Result<(u64, Option<u64>)> {
+ let min = gradually_grow(u, min_minimum.unwrap_or(0), max_inbounds, max_minimum)?;
+ let max = if max_required || u.arbitrary().unwrap_or(false) {
+ Some(u.int_in_range(min..=max_minimum)?)
+ } else {
+ None
+ };
+ Ok((min, max))
+}
+
+pub(crate) fn configured_valtypes(config: &dyn Config) -> Vec<ValType> {
+ let mut valtypes = Vec::with_capacity(7);
+ valtypes.push(ValType::I32);
+ valtypes.push(ValType::I64);
+ valtypes.push(ValType::F32);
+ valtypes.push(ValType::F64);
+ if config.simd_enabled() {
+ valtypes.push(ValType::V128);
+ }
+ if config.reference_types_enabled() {
+ valtypes.push(ValType::EXTERNREF);
+ valtypes.push(ValType::FUNCREF);
+ }
+ valtypes
+}
+
+pub(crate) fn arbitrary_func_type(
+ u: &mut Unstructured,
+ valtypes: &[ValType],
+ max_results: Option<usize>,
+) -> Result<Rc<FuncType>> {
+ let mut params = vec![];
+ let mut results = vec![];
+ arbitrary_loop(u, 0, 20, |u| {
+ params.push(arbitrary_valtype(u, valtypes)?);
+ Ok(true)
+ })?;
+ arbitrary_loop(u, 0, max_results.unwrap_or(20), |u| {
+ results.push(arbitrary_valtype(u, valtypes)?);
+ Ok(true)
+ })?;
+ Ok(Rc::new(FuncType { params, results }))
+}
+
+fn arbitrary_valtype(u: &mut Unstructured, valtypes: &[ValType]) -> Result<ValType> {
+ Ok(*u.choose(valtypes)?)
+}
+
+pub(crate) fn arbitrary_table_type(u: &mut Unstructured, config: &dyn Config) -> Result<TableType> {
+ // We don't want to generate tables that are too large on average, so
+ // keep the "inbounds" limit here a bit smaller.
+ let max_inbounds = 10_000;
+ let min_elements = if config.disallow_traps() {
+ Some(1)
+ } else {
+ None
+ };
+ let max_elements = min_elements.unwrap_or(0).max(config.max_table_elements());
+ let (minimum, maximum) = arbitrary_limits32(
+ u,
+ min_elements,
+ max_elements,
+ config.table_max_size_required(),
+ max_inbounds.min(max_elements),
+ )?;
+ if config.disallow_traps() {
+ assert!(minimum > 0);
+ }
+ Ok(TableType {
+ element_type: if config.reference_types_enabled() {
+ *u.choose(&[RefType::FUNCREF, RefType::EXTERNREF])?
+ } else {
+ RefType::FUNCREF
+ },
+ minimum,
+ maximum,
+ })
+}
+
+pub(crate) fn arbitrary_memtype(u: &mut Unstructured, config: &dyn Config) -> Result<MemoryType> {
+ // When threads are enabled, we only want to generate shared memories about
+ // 25% of the time.
+ let shared = config.threads_enabled() && u.ratio(1, 4)?;
+ // We want to favor memories <= 1gb in size, allocate at most 16k pages,
+ // depending on the maximum number of memories.
+ let memory64 = config.memory64_enabled() && u.arbitrary()?;
+ let max_inbounds = 16 * 1024 / u64::try_from(config.max_memories()).unwrap();
+ let min_pages = if config.disallow_traps() {
+ Some(1)
+ } else {
+ None
+ };
+ let max_pages = min_pages
+ .unwrap_or(0)
+ .max(config.max_memory_pages(memory64));
+ let (minimum, maximum) = arbitrary_limits64(
+ u,
+ min_pages,
+ max_pages,
+ config.memory_max_size_required() || shared,
+ max_inbounds.min(max_pages),
+ )?;
+ Ok(MemoryType {
+ minimum,
+ maximum,
+ memory64,
+ shared,
+ })
+}
+
+pub(crate) fn arbitrary_tag_type(
+ u: &mut Unstructured,
+ candidate_func_types: &[u32],
+ get_func_type: impl FnOnce(u32) -> Rc<FuncType>,
+) -> Result<TagType> {
+ let max = candidate_func_types.len() - 1;
+ let ty = candidate_func_types[u.int_in_range(0..=max)?];
+ Ok(TagType {
+ func_type_idx: ty,
+ func_type: get_func_type(ty),
+ })
+}
+
+/// This function generates a number between `min` and `max`, favoring values
+/// between `min` and `max_inbounds`.
+///
+/// The thinking behind this function is that it's used for things like offsets
+/// and minimum sizes which, when very large, can trivially make the wasm oom or
+/// abort with a trap. This isn't the most interesting thing to do so it tries
+/// to favor numbers in the `min..max_inbounds` range to avoid immediate ooms.
+fn gradually_grow(u: &mut Unstructured, min: u64, max_inbounds: u64, max: u64) -> Result<u64> {
+ if min == max {
+ return Ok(min);
+ }
+ let min = min as f64;
+ let max = max as f64;
+ let max_inbounds = max_inbounds as f64;
+ let x = u.arbitrary::<u32>()?;
+ let x = f64::from(x);
+ let x = map_custom(
+ x,
+ f64::from(u32::MIN)..f64::from(u32::MAX),
+ min..max_inbounds,
+ min..max,
+ );
+ return Ok(x.round() as u64);
+
+ /// Map a value from within the input range to the output range(s).
+ ///
+ /// This will first map the input range into the `0..1` input range, and
+ /// then depending on the value it will either map it exponentially
+ /// (favoring small values) into the `output_inbounds` range or it will map
+ /// it into the `output` range.
+ fn map_custom(
+ value: f64,
+ input: Range<f64>,
+ output_inbounds: Range<f64>,
+ output: Range<f64>,
+ ) -> f64 {
+ assert!(!value.is_nan(), "{}", value);
+ assert!(value.is_finite(), "{}", value);
+ assert!(input.start < input.end, "{} < {}", input.start, input.end);
+ assert!(
+ output.start < output.end,
+ "{} < {}",
+ output.start,
+ output.end
+ );
+ assert!(value >= input.start, "{} >= {}", value, input.start);
+ assert!(value <= input.end, "{} <= {}", value, input.end);
+ assert!(
+ output.start <= output_inbounds.start,
+ "{} <= {}",
+ output.start,
+ output_inbounds.start
+ );
+ assert!(
+ output_inbounds.end <= output.end,
+ "{} <= {}",
+ output_inbounds.end,
+ output.end
+ );
+
+ let x = map_linear(value, input, 0.0..1.0);
+ let result = if x < PCT_INBOUNDS {
+ if output_inbounds.start == output_inbounds.end {
+ output_inbounds.start
+ } else {
+ let unscaled = x * x * x * x * x * x;
+ map_linear(unscaled, 0.0..1.0, output_inbounds)
+ }
+ } else {
+ map_linear(x, 0.0..1.0, output.clone())
+ };
+
+ assert!(result >= output.start, "{} >= {}", result, output.start);
+ assert!(result <= output.end, "{} <= {}", result, output.end);
+ result
+ }
+
+ /// Map a value from within the input range linearly to the output range.
+ ///
+ /// For example, mapping `0.5` from the input range `0.0..1.0` to the output
+ /// range `1.0..3.0` produces `2.0`.
+ fn map_linear(
+ value: f64,
+ Range {
+ start: in_low,
+ end: in_high,
+ }: Range<f64>,
+ Range {
+ start: out_low,
+ end: out_high,
+ }: Range<f64>,
+ ) -> f64 {
+ assert!(!value.is_nan(), "{}", value);
+ assert!(value.is_finite(), "{}", value);
+ assert!(in_low < in_high, "{} < {}", in_low, in_high);
+ assert!(out_low < out_high, "{} < {}", out_low, out_high);
+ assert!(value >= in_low, "{} >= {}", value, in_low);
+ assert!(value <= in_high, "{} <= {}", value, in_high);
+
+ let dividend = out_high - out_low;
+ let divisor = in_high - in_low;
+ let slope = dividend / divisor;
+ let result = out_low + (slope * (value - in_low));
+
+ assert!(result >= out_low, "{} >= {}", result, out_low);
+ assert!(result <= out_high, "{} <= {}", result, out_high);
+ result
+ }
+}
+
+/// Selects a reasonable offset for an element or data segment. This favors
+/// having the segment being in-bounds, but it may still generate
+/// any offset.
+fn arbitrary_offset(
+ u: &mut Unstructured,
+ limit_min: u64,
+ limit_max: u64,
+ segment_size: usize,
+) -> Result<u64> {
+ let size = u64::try_from(segment_size).unwrap();
+
+ // If the segment is too big for the whole memory, just give it any
+ // offset.
+ if size > limit_min {
+ u.int_in_range(0..=limit_max)
+ } else {
+ gradually_grow(u, 0, limit_min - size, limit_max)
+ }
+}
+
+fn unique_import_strings(max_size: usize, u: &mut Unstructured) -> Result<(String, String)> {
+ let module = limited_string(max_size, u)?;
+ let field = limited_string(max_size, u)?;
+ Ok((module, field))
+}
+
+fn arbitrary_vec_u8(u: &mut Unstructured) -> Result<Vec<u8>> {
+ let size = u.arbitrary_len::<u8>()?;
+ Ok(u.bytes(size)?.to_vec())
+}
+
+/// Convert a wasmparser's `ValType` to a `wasm_encoder::ValType`.
+fn convert_type(parsed_type: wasmparser::ValType) -> ValType {
+ use wasmparser::ValType::*;
+ match parsed_type {
+ I32 => ValType::I32,
+ I64 => ValType::I64,
+ F32 => ValType::F32,
+ F64 => ValType::F64,
+ V128 => ValType::V128,
+ Ref(ty) => ValType::Ref(convert_reftype(ty)),
+ }
+}
+
+fn convert_reftype(ty: wasmparser::RefType) -> RefType {
+ wasm_encoder::RefType {
+ nullable: ty.nullable,
+ heap_type: match ty.heap_type {
+ wasmparser::HeapType::Func => wasm_encoder::HeapType::Func,
+ wasmparser::HeapType::Extern => wasm_encoder::HeapType::Extern,
+ wasmparser::HeapType::TypedFunc(i) => wasm_encoder::HeapType::TypedFunc(i.into()),
+ },
+ }
+}
+
+impl EntityType {
+ fn size(&self) -> u32 {
+ match self {
+ EntityType::Tag(_)
+ | EntityType::Global(_)
+ | EntityType::Table(_)
+ | EntityType::Memory(_) => 1,
+ EntityType::Func(_, ty) => 1 + (ty.params.len() + ty.results.len()) as u32,
+ }
+ }
+}
+
+// A helper structure used when generating module/instance types to limit the
+// amount of each kind of import created.
+#[derive(Default, Clone, Copy, PartialEq)]
+struct Entities {
+ globals: usize,
+ memories: usize,
+ tables: usize,
+ funcs: usize,
+ tags: usize,
+}
+
+/// A container for the kinds of instructions that wasm-smith is allowed to
+/// emit.
+///
+/// # Example
+///
+/// ```
+/// # use wasm_smith::{InstructionKinds, InstructionKind};
+/// let kinds = InstructionKinds::new(&[InstructionKind::Numeric, InstructionKind::Memory]);
+/// assert!(kinds.contains(InstructionKind::Memory));
+/// ```
+#[derive(Clone, Copy, Debug, Default)]
+pub struct InstructionKinds(pub(crate) FlagSet<InstructionKind>);
+impl InstructionKinds {
+ /// Create a new container.
+ pub fn new(kinds: &[InstructionKind]) -> Self {
+ Self(kinds.iter().fold(FlagSet::default(), |ks, k| ks | *k))
+ }
+
+ /// Include all [InstructionKind]s.
+ pub fn all() -> Self {
+ Self(FlagSet::full())
+ }
+
+ /// Include no [InstructionKind]s.
+ pub fn none() -> Self {
+ Self(FlagSet::default())
+ }
+
+ /// Check if the [InstructionKind] is contained in this set.
+ #[inline]
+ pub fn contains(&self, kind: InstructionKind) -> bool {
+ self.0.contains(kind)
+ }
+}
+
+flags! {
+ /// Enumerate the categories of instructions defined in the [WebAssembly
+ /// specification](https://webassembly.github.io/spec/core/syntax/instructions.html).
+ #[allow(missing_docs)]
+ #[cfg_attr(feature = "_internal_cli", derive(serde::Deserialize))]
+ pub enum InstructionKind: u16 {
+ Numeric,
+ Vector,
+ Reference,
+ Parametric,
+ Variable,
+ Table,
+ Memory,
+ Control,
+ }
+}
+
+impl FromStr for InstructionKind {
+ type Err = String;
+ fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
+ match s.to_lowercase().as_str() {
+ "numeric" => Ok(InstructionKind::Numeric),
+ "vector" => Ok(InstructionKind::Vector),
+ "reference" => Ok(InstructionKind::Reference),
+ "parametric" => Ok(InstructionKind::Parametric),
+ "variable" => Ok(InstructionKind::Variable),
+ "table" => Ok(InstructionKind::Table),
+ "memory" => Ok(InstructionKind::Memory),
+ "control" => Ok(InstructionKind::Control),
+ _ => Err(format!("unknown instruction kind: {}", s)),
+ }
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/core/code_builder.rs b/third_party/rust/wasm-smith/src/core/code_builder.rs
new file mode 100644
index 0000000000..1ac01c1af7
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core/code_builder.rs
@@ -0,0 +1,5355 @@
+use super::{
+ Elements, FuncType, GlobalInitExpr, Instruction, InstructionKind::*, InstructionKinds, Module,
+ ValType,
+};
+use arbitrary::{Result, Unstructured};
+use std::collections::{BTreeMap, BTreeSet};
+use std::convert::TryFrom;
+use std::rc::Rc;
+use wasm_encoder::{BlockType, MemArg, RefType};
+mod no_traps;
+
+macro_rules! instructions {
+ (
+ $(
+ ($predicate:expr, $generator_fn:ident, $instruction_kind:ident $(, $cost:tt)?),
+ )*
+ ) => {
+ static NUM_OPTIONS: usize = instructions!(
+ @count;
+ $( $generator_fn )*
+ );
+
+ fn choose_instruction(
+ u: &mut Unstructured<'_>,
+ module: &Module,
+ allowed_instructions: InstructionKinds,
+ builder: &mut CodeBuilder,
+ ) -> Option<
+ fn(&mut Unstructured<'_>, &Module, &mut CodeBuilder, &mut Vec<Instruction>) -> Result<()>
+ > {
+ builder.allocs.options.clear();
+ let mut cost = 0;
+ // Unroll the loop that checks whether each instruction is valid in
+ // the current context and, if it is valid, pushes it onto our
+ // options. Unrolling this loops lets us avoid dynamic calls through
+ // function pointers and, furthermore, each call site can be branch
+ // predicted and even inlined. This saved us about 30% of time in
+ // the `corpus` benchmark.
+ $(
+ let predicate: Option<fn(&Module, &mut CodeBuilder) -> bool> = $predicate;
+ if predicate.map_or(true, |f| f(module, builder))
+ && allowed_instructions.contains($instruction_kind) {
+ builder.allocs.options.push(($generator_fn, cost));
+ cost += 1000 $(- $cost)?;
+ }
+ )*
+
+ // If there aren't actually any candidate instructions due to
+ // various filters in place then return `None` to indicate the
+ // situation.
+ if cost == 0 {
+ return None;
+ }
+
+ let i = u.int_in_range(0..=cost).ok()?;
+ let idx = builder
+ .allocs
+ .options
+ .binary_search_by_key(&i,|p| p.1)
+ .unwrap_or_else(|i| i - 1);
+ Some(builder.allocs.options[idx].0)
+ }
+ };
+
+ ( @count; ) => {
+ 0
+ };
+ ( @count; $x:ident $( $xs:ident )* ) => {
+ 1 + instructions!( @count; $( $xs )* )
+ };
+}
+
+// The static set of options of instruction to generate that could be valid at
+// some given time. One entry per Wasm instruction.
+//
+// Each entry is made up of up to three parts:
+//
+// 1. A predicate for whether this is a valid choice, if any. `None` means that
+// the choice is always applicable.
+//
+// 2. The function to generate the instruction, given that we've made this
+// choice.
+//
+// 3. The `InstructionKind` the instruction belongs to; this allows filtering
+// out instructions by category.
+//
+// 4. An optional number used to weight how often this instruction is chosen.
+// Higher numbers are less likely to be chosen, and number specified must be
+// less than 1000.
+instructions! {
+ // Control instructions.
+ (Some(unreachable_valid), unreachable, Control, 990),
+ (None, nop, Control, 800),
+ (None, block, Control),
+ (None, r#loop, Control),
+ (Some(try_valid), r#try, Control),
+ (Some(delegate_valid), delegate, Control),
+ (Some(catch_valid), catch, Control),
+ (Some(catch_all_valid), catch_all, Control),
+ (Some(if_valid), r#if, Control),
+ (Some(else_valid), r#else, Control),
+ (Some(end_valid), end, Control),
+ (Some(br_valid), br, Control),
+ (Some(br_if_valid), br_if, Control),
+ (Some(br_table_valid), br_table, Control),
+ (Some(return_valid), r#return, Control, 900),
+ (Some(call_valid), call, Control),
+ (Some(call_indirect_valid), call_indirect, Control),
+ (Some(return_call_valid), return_call, Control),
+ (Some(return_call_indirect_valid), return_call_indirect, Control),
+ (Some(throw_valid), throw, Control, 850),
+ (Some(rethrow_valid), rethrow, Control),
+ // Parametric instructions.
+ (Some(drop_valid), drop, Parametric, 990),
+ (Some(select_valid), select, Parametric),
+ // Variable instructions.
+ (Some(local_get_valid), local_get, Variable),
+ (Some(local_set_valid), local_set, Variable),
+ (Some(local_set_valid), local_tee, Variable),
+ (Some(global_get_valid), global_get, Variable),
+ (Some(global_set_valid), global_set, Variable),
+ // Memory instructions.
+ (Some(have_memory_and_offset), i32_load, Memory),
+ (Some(have_memory_and_offset), i64_load, Memory),
+ (Some(have_memory_and_offset), f32_load, Memory),
+ (Some(have_memory_and_offset), f64_load, Memory),
+ (Some(have_memory_and_offset), i32_load_8_s, Memory),
+ (Some(have_memory_and_offset), i32_load_8_u, Memory),
+ (Some(have_memory_and_offset), i32_load_16_s, Memory),
+ (Some(have_memory_and_offset), i32_load_16_u, Memory),
+ (Some(have_memory_and_offset), i64_load_8_s, Memory),
+ (Some(have_memory_and_offset), i64_load_16_s, Memory),
+ (Some(have_memory_and_offset), i64_load_32_s, Memory),
+ (Some(have_memory_and_offset), i64_load_8_u, Memory),
+ (Some(have_memory_and_offset), i64_load_16_u, Memory),
+ (Some(have_memory_and_offset), i64_load_32_u, Memory),
+ (Some(i32_store_valid), i32_store, Memory),
+ (Some(i64_store_valid), i64_store, Memory),
+ (Some(f32_store_valid), f32_store, Memory),
+ (Some(f64_store_valid), f64_store, Memory),
+ (Some(i32_store_valid), i32_store_8, Memory),
+ (Some(i32_store_valid), i32_store_16, Memory),
+ (Some(i64_store_valid), i64_store_8, Memory),
+ (Some(i64_store_valid), i64_store_16, Memory),
+ (Some(i64_store_valid), i64_store_32, Memory),
+ (Some(have_memory), memory_size, Memory),
+ (Some(memory_grow_valid), memory_grow, Memory),
+ (Some(memory_init_valid), memory_init, Memory),
+ (Some(data_drop_valid), data_drop, Memory),
+ (Some(memory_copy_valid), memory_copy, Memory),
+ (Some(memory_fill_valid), memory_fill, Memory),
+ // Numeric instructions.
+ (None, i32_const, Numeric),
+ (None, i64_const, Numeric),
+ (None, f32_const, Numeric),
+ (None, f64_const, Numeric),
+ (Some(i32_on_stack), i32_eqz, Numeric),
+ (Some(i32_i32_on_stack), i32_eq, Numeric),
+ (Some(i32_i32_on_stack), i32_ne, Numeric),
+ (Some(i32_i32_on_stack), i32_lt_s, Numeric),
+ (Some(i32_i32_on_stack), i32_lt_u, Numeric),
+ (Some(i32_i32_on_stack), i32_gt_s, Numeric),
+ (Some(i32_i32_on_stack), i32_gt_u, Numeric),
+ (Some(i32_i32_on_stack), i32_le_s, Numeric),
+ (Some(i32_i32_on_stack), i32_le_u, Numeric),
+ (Some(i32_i32_on_stack), i32_ge_s, Numeric),
+ (Some(i32_i32_on_stack), i32_ge_u, Numeric),
+ (Some(i64_on_stack), i64_eqz, Numeric),
+ (Some(i64_i64_on_stack), i64_eq, Numeric),
+ (Some(i64_i64_on_stack), i64_ne, Numeric),
+ (Some(i64_i64_on_stack), i64_lt_s, Numeric),
+ (Some(i64_i64_on_stack), i64_lt_u, Numeric),
+ (Some(i64_i64_on_stack), i64_gt_s, Numeric),
+ (Some(i64_i64_on_stack), i64_gt_u, Numeric),
+ (Some(i64_i64_on_stack), i64_le_s, Numeric),
+ (Some(i64_i64_on_stack), i64_le_u, Numeric),
+ (Some(i64_i64_on_stack), i64_ge_s, Numeric),
+ (Some(i64_i64_on_stack), i64_ge_u, Numeric),
+ (Some(f32_f32_on_stack), f32_eq, Numeric),
+ (Some(f32_f32_on_stack), f32_ne, Numeric),
+ (Some(f32_f32_on_stack), f32_lt, Numeric),
+ (Some(f32_f32_on_stack), f32_gt, Numeric),
+ (Some(f32_f32_on_stack), f32_le, Numeric),
+ (Some(f32_f32_on_stack), f32_ge, Numeric),
+ (Some(f64_f64_on_stack), f64_eq, Numeric),
+ (Some(f64_f64_on_stack), f64_ne, Numeric),
+ (Some(f64_f64_on_stack), f64_lt, Numeric),
+ (Some(f64_f64_on_stack), f64_gt, Numeric),
+ (Some(f64_f64_on_stack), f64_le, Numeric),
+ (Some(f64_f64_on_stack), f64_ge, Numeric),
+ (Some(i32_on_stack), i32_clz, Numeric),
+ (Some(i32_on_stack), i32_ctz, Numeric),
+ (Some(i32_on_stack), i32_popcnt, Numeric),
+ (Some(i32_i32_on_stack), i32_add, Numeric),
+ (Some(i32_i32_on_stack), i32_sub, Numeric),
+ (Some(i32_i32_on_stack), i32_mul, Numeric),
+ (Some(i32_i32_on_stack), i32_div_s, Numeric),
+ (Some(i32_i32_on_stack), i32_div_u, Numeric),
+ (Some(i32_i32_on_stack), i32_rem_s, Numeric),
+ (Some(i32_i32_on_stack), i32_rem_u, Numeric),
+ (Some(i32_i32_on_stack), i32_and, Numeric),
+ (Some(i32_i32_on_stack), i32_or, Numeric),
+ (Some(i32_i32_on_stack), i32_xor, Numeric),
+ (Some(i32_i32_on_stack), i32_shl, Numeric),
+ (Some(i32_i32_on_stack), i32_shr_s, Numeric),
+ (Some(i32_i32_on_stack), i32_shr_u, Numeric),
+ (Some(i32_i32_on_stack), i32_rotl, Numeric),
+ (Some(i32_i32_on_stack), i32_rotr, Numeric),
+ (Some(i64_on_stack), i64_clz, Numeric),
+ (Some(i64_on_stack), i64_ctz, Numeric),
+ (Some(i64_on_stack), i64_popcnt, Numeric),
+ (Some(i64_i64_on_stack), i64_add, Numeric),
+ (Some(i64_i64_on_stack), i64_sub, Numeric),
+ (Some(i64_i64_on_stack), i64_mul, Numeric),
+ (Some(i64_i64_on_stack), i64_div_s, Numeric),
+ (Some(i64_i64_on_stack), i64_div_u, Numeric),
+ (Some(i64_i64_on_stack), i64_rem_s, Numeric),
+ (Some(i64_i64_on_stack), i64_rem_u, Numeric),
+ (Some(i64_i64_on_stack), i64_and, Numeric),
+ (Some(i64_i64_on_stack), i64_or, Numeric),
+ (Some(i64_i64_on_stack), i64_xor, Numeric),
+ (Some(i64_i64_on_stack), i64_shl, Numeric),
+ (Some(i64_i64_on_stack), i64_shr_s, Numeric),
+ (Some(i64_i64_on_stack), i64_shr_u, Numeric),
+ (Some(i64_i64_on_stack), i64_rotl, Numeric),
+ (Some(i64_i64_on_stack), i64_rotr, Numeric),
+ (Some(f32_on_stack), f32_abs, Numeric),
+ (Some(f32_on_stack), f32_neg, Numeric),
+ (Some(f32_on_stack), f32_ceil, Numeric),
+ (Some(f32_on_stack), f32_floor, Numeric),
+ (Some(f32_on_stack), f32_trunc, Numeric),
+ (Some(f32_on_stack), f32_nearest, Numeric),
+ (Some(f32_on_stack), f32_sqrt, Numeric),
+ (Some(f32_f32_on_stack), f32_add, Numeric),
+ (Some(f32_f32_on_stack), f32_sub, Numeric),
+ (Some(f32_f32_on_stack), f32_mul, Numeric),
+ (Some(f32_f32_on_stack), f32_div, Numeric),
+ (Some(f32_f32_on_stack), f32_min, Numeric),
+ (Some(f32_f32_on_stack), f32_max, Numeric),
+ (Some(f32_f32_on_stack), f32_copysign, Numeric),
+ (Some(f64_on_stack), f64_abs, Numeric),
+ (Some(f64_on_stack), f64_neg, Numeric),
+ (Some(f64_on_stack), f64_ceil, Numeric),
+ (Some(f64_on_stack), f64_floor, Numeric),
+ (Some(f64_on_stack), f64_trunc, Numeric),
+ (Some(f64_on_stack), f64_nearest, Numeric),
+ (Some(f64_on_stack), f64_sqrt, Numeric),
+ (Some(f64_f64_on_stack), f64_add, Numeric),
+ (Some(f64_f64_on_stack), f64_sub, Numeric),
+ (Some(f64_f64_on_stack), f64_mul, Numeric),
+ (Some(f64_f64_on_stack), f64_div, Numeric),
+ (Some(f64_f64_on_stack), f64_min, Numeric),
+ (Some(f64_f64_on_stack), f64_max, Numeric),
+ (Some(f64_f64_on_stack), f64_copysign, Numeric),
+ (Some(i64_on_stack), i32_wrap_i64, Numeric),
+ (Some(f32_on_stack), i32_trunc_f32_s, Numeric),
+ (Some(f32_on_stack), i32_trunc_f32_u, Numeric),
+ (Some(f64_on_stack), i32_trunc_f64_s, Numeric),
+ (Some(f64_on_stack), i32_trunc_f64_u, Numeric),
+ (Some(i32_on_stack), i64_extend_i32_s, Numeric),
+ (Some(i32_on_stack), i64_extend_i32_u, Numeric),
+ (Some(f32_on_stack), i64_trunc_f32_s, Numeric),
+ (Some(f32_on_stack), i64_trunc_f32_u, Numeric),
+ (Some(f64_on_stack), i64_trunc_f64_s, Numeric),
+ (Some(f64_on_stack), i64_trunc_f64_u, Numeric),
+ (Some(i32_on_stack), f32_convert_i32_s, Numeric),
+ (Some(i32_on_stack), f32_convert_i32_u, Numeric),
+ (Some(i64_on_stack), f32_convert_i64_s, Numeric),
+ (Some(i64_on_stack), f32_convert_i64_u, Numeric),
+ (Some(f64_on_stack), f32_demote_f64, Numeric),
+ (Some(i32_on_stack), f64_convert_i32_s, Numeric),
+ (Some(i32_on_stack), f64_convert_i32_u, Numeric),
+ (Some(i64_on_stack), f64_convert_i64_s, Numeric),
+ (Some(i64_on_stack), f64_convert_i64_u, Numeric),
+ (Some(f32_on_stack), f64_promote_f32, Numeric),
+ (Some(f32_on_stack), i32_reinterpret_f32, Numeric),
+ (Some(f64_on_stack), i64_reinterpret_f64, Numeric),
+ (Some(i32_on_stack), f32_reinterpret_i32, Numeric),
+ (Some(i64_on_stack), f64_reinterpret_i64, Numeric),
+ (Some(extendable_i32_on_stack), i32_extend_8_s, Numeric),
+ (Some(extendable_i32_on_stack), i32_extend_16_s, Numeric),
+ (Some(extendable_i64_on_stack), i64_extend_8_s, Numeric),
+ (Some(extendable_i64_on_stack), i64_extend_16_s, Numeric),
+ (Some(extendable_i64_on_stack), i64_extend_32_s, Numeric),
+ (Some(nontrapping_f32_on_stack), i32_trunc_sat_f32_s, Numeric),
+ (Some(nontrapping_f32_on_stack), i32_trunc_sat_f32_u, Numeric),
+ (Some(nontrapping_f64_on_stack), i32_trunc_sat_f64_s, Numeric),
+ (Some(nontrapping_f64_on_stack), i32_trunc_sat_f64_u, Numeric),
+ (Some(nontrapping_f32_on_stack), i64_trunc_sat_f32_s, Numeric),
+ (Some(nontrapping_f32_on_stack), i64_trunc_sat_f32_u, Numeric),
+ (Some(nontrapping_f64_on_stack), i64_trunc_sat_f64_s, Numeric),
+ (Some(nontrapping_f64_on_stack), i64_trunc_sat_f64_u, Numeric),
+ // reference types proposal
+ (Some(ref_null_valid), ref_null, Reference),
+ (Some(ref_func_valid), ref_func, Reference),
+ (Some(ref_is_null_valid), ref_is_null, Reference),
+ (Some(table_fill_valid), table_fill, Reference),
+ (Some(table_set_valid), table_set, Reference),
+ (Some(table_get_valid), table_get, Reference),
+ (Some(table_size_valid), table_size, Reference),
+ (Some(table_grow_valid), table_grow, Reference),
+ (Some(table_copy_valid), table_copy, Reference),
+ (Some(table_init_valid), table_init, Reference),
+ (Some(elem_drop_valid), elem_drop, Reference),
+ // SIMD instructions.
+ (Some(simd_have_memory_and_offset), v128_load, Vector),
+ (Some(simd_have_memory_and_offset), v128_load8x8s, Vector),
+ (Some(simd_have_memory_and_offset), v128_load8x8u, Vector),
+ (Some(simd_have_memory_and_offset), v128_load16x4s, Vector),
+ (Some(simd_have_memory_and_offset), v128_load16x4u, Vector),
+ (Some(simd_have_memory_and_offset), v128_load32x2s, Vector),
+ (Some(simd_have_memory_and_offset), v128_load32x2u, Vector),
+ (Some(simd_have_memory_and_offset), v128_load8_splat, Vector),
+ (Some(simd_have_memory_and_offset), v128_load16_splat, Vector),
+ (Some(simd_have_memory_and_offset), v128_load32_splat, Vector),
+ (Some(simd_have_memory_and_offset), v128_load64_splat, Vector),
+ (Some(simd_have_memory_and_offset), v128_load32_zero, Vector),
+ (Some(simd_have_memory_and_offset), v128_load64_zero, Vector),
+ (Some(simd_v128_store_valid), v128_store, Vector),
+ (Some(simd_load_lane_valid), v128_load8_lane, Vector),
+ (Some(simd_load_lane_valid), v128_load16_lane, Vector),
+ (Some(simd_load_lane_valid), v128_load32_lane, Vector),
+ (Some(simd_load_lane_valid), v128_load64_lane, Vector),
+ (Some(simd_store_lane_valid), v128_store8_lane, Vector),
+ (Some(simd_store_lane_valid), v128_store16_lane, Vector),
+ (Some(simd_store_lane_valid), v128_store32_lane, Vector),
+ (Some(simd_store_lane_valid), v128_store64_lane, Vector),
+ (Some(simd_enabled), v128_const, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_shuffle, Vector),
+ (Some(simd_v128_on_stack), i8x16_extract_lane_s, Vector),
+ (Some(simd_v128_on_stack), i8x16_extract_lane_u, Vector),
+ (Some(simd_v128_i32_on_stack), i8x16_replace_lane, Vector),
+ (Some(simd_v128_on_stack), i16x8_extract_lane_s, Vector),
+ (Some(simd_v128_on_stack), i16x8_extract_lane_u, Vector),
+ (Some(simd_v128_i32_on_stack), i16x8_replace_lane, Vector),
+ (Some(simd_v128_on_stack), i32x4_extract_lane, Vector),
+ (Some(simd_v128_i32_on_stack), i32x4_replace_lane, Vector),
+ (Some(simd_v128_on_stack), i64x2_extract_lane, Vector),
+ (Some(simd_v128_i64_on_stack), i64x2_replace_lane, Vector),
+ (Some(simd_v128_on_stack), f32x4_extract_lane, Vector),
+ (Some(simd_v128_f32_on_stack), f32x4_replace_lane, Vector),
+ (Some(simd_v128_on_stack), f64x2_extract_lane, Vector),
+ (Some(simd_v128_f64_on_stack), f64x2_replace_lane, Vector),
+ (Some(simd_i32_on_stack), i8x16_splat, Vector),
+ (Some(simd_i32_on_stack), i16x8_splat, Vector),
+ (Some(simd_i32_on_stack), i32x4_splat, Vector),
+ (Some(simd_i64_on_stack), i64x2_splat, Vector),
+ (Some(simd_f32_on_stack), f32x4_splat, Vector),
+ (Some(simd_f64_on_stack), f64x2_splat, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_swizzle, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), i8x16_relaxed_swizzle, Vector),
+ (Some(simd_v128_v128_v128_on_stack), v128_bitselect, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i8x16_relaxed_laneselect, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i16x8_relaxed_laneselect, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i32x4_relaxed_laneselect, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i64x2_relaxed_laneselect, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_eq, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_ne, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_lt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_lt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_gt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_gt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_le_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_le_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_ge_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_ge_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_eq, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_ne, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_lt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_lt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_gt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_gt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_le_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_le_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_ge_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_ge_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_eq, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_ne, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_lt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_lt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_gt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_gt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_le_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_le_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_ge_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_ge_u, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_eq, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_ne, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_lt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_gt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_le_s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_ge_s, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_eq, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_ne, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_lt, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_gt, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_le, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_ge, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_eq, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_ne, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_lt, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_gt, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_le, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_ge, Vector),
+ (Some(simd_v128_on_stack), v128_not, Vector),
+ (Some(simd_v128_v128_on_stack), v128_and, Vector),
+ (Some(simd_v128_v128_on_stack), v128_and_not, Vector),
+ (Some(simd_v128_v128_on_stack), v128_or, Vector),
+ (Some(simd_v128_v128_on_stack), v128_xor, Vector),
+ (Some(simd_v128_v128_on_stack), v128_any_true, Vector),
+ (Some(simd_v128_on_stack), i8x16_abs, Vector),
+ (Some(simd_v128_on_stack), i8x16_neg, Vector),
+ (Some(simd_v128_on_stack), i8x16_popcnt, Vector),
+ (Some(simd_v128_on_stack), i8x16_all_true, Vector),
+ (Some(simd_v128_on_stack), i8x16_bitmask, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_narrow_i16x8s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_narrow_i16x8u, Vector),
+ (Some(simd_v128_i32_on_stack), i8x16_shl, Vector),
+ (Some(simd_v128_i32_on_stack), i8x16_shr_s, Vector),
+ (Some(simd_v128_i32_on_stack), i8x16_shr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_add, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_add_sat_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_add_sat_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_sub, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_sub_sat_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_sub_sat_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_min_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_min_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_max_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_max_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_avgr_u, Vector),
+ (Some(simd_v128_on_stack), i16x8_extadd_pairwise_i8x16s, Vector),
+ (Some(simd_v128_on_stack), i16x8_extadd_pairwise_i8x16u, Vector),
+ (Some(simd_v128_on_stack), i16x8_abs, Vector),
+ (Some(simd_v128_on_stack), i16x8_neg, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8q15_mulr_sat_s, Vector),
+ (Some(simd_v128_on_stack), i16x8_all_true, Vector),
+ (Some(simd_v128_on_stack), i16x8_bitmask, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_narrow_i32x4s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_narrow_i32x4u, Vector),
+ (Some(simd_v128_on_stack), i16x8_extend_low_i8x16s, Vector),
+ (Some(simd_v128_on_stack), i16x8_extend_high_i8x16s, Vector),
+ (Some(simd_v128_on_stack), i16x8_extend_low_i8x16u, Vector),
+ (Some(simd_v128_on_stack), i16x8_extend_high_i8x16u, Vector),
+ (Some(simd_v128_i32_on_stack), i16x8_shl, Vector),
+ (Some(simd_v128_i32_on_stack), i16x8_shr_s, Vector),
+ (Some(simd_v128_i32_on_stack), i16x8_shr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_add, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_add_sat_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_add_sat_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_sub, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_sub_sat_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_sub_sat_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_mul, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_min_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_min_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_max_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_max_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_avgr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_extmul_low_i8x16s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_extmul_high_i8x16s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_extmul_low_i8x16u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_extmul_high_i8x16u, Vector),
+ (Some(simd_v128_on_stack), i32x4_extadd_pairwise_i16x8s, Vector),
+ (Some(simd_v128_on_stack), i32x4_extadd_pairwise_i16x8u, Vector),
+ (Some(simd_v128_on_stack), i32x4_abs, Vector),
+ (Some(simd_v128_on_stack), i32x4_neg, Vector),
+ (Some(simd_v128_on_stack), i32x4_all_true, Vector),
+ (Some(simd_v128_on_stack), i32x4_bitmask, Vector),
+ (Some(simd_v128_on_stack), i32x4_extend_low_i16x8s, Vector),
+ (Some(simd_v128_on_stack), i32x4_extend_high_i16x8s, Vector),
+ (Some(simd_v128_on_stack), i32x4_extend_low_i16x8u, Vector),
+ (Some(simd_v128_on_stack), i32x4_extend_high_i16x8u, Vector),
+ (Some(simd_v128_i32_on_stack), i32x4_shl, Vector),
+ (Some(simd_v128_i32_on_stack), i32x4_shr_s, Vector),
+ (Some(simd_v128_i32_on_stack), i32x4_shr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_add, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_sub, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_mul, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_min_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_min_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_max_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_max_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_dot_i16x8s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_extmul_low_i16x8s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_extmul_high_i16x8s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_extmul_low_i16x8u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_extmul_high_i16x8u, Vector),
+ (Some(simd_v128_on_stack), i64x2_abs, Vector),
+ (Some(simd_v128_on_stack), i64x2_neg, Vector),
+ (Some(simd_v128_on_stack), i64x2_all_true, Vector),
+ (Some(simd_v128_on_stack), i64x2_bitmask, Vector),
+ (Some(simd_v128_on_stack), i64x2_extend_low_i32x4s, Vector),
+ (Some(simd_v128_on_stack), i64x2_extend_high_i32x4s, Vector),
+ (Some(simd_v128_on_stack), i64x2_extend_low_i32x4u, Vector),
+ (Some(simd_v128_on_stack), i64x2_extend_high_i32x4u, Vector),
+ (Some(simd_v128_i32_on_stack), i64x2_shl, Vector),
+ (Some(simd_v128_i32_on_stack), i64x2_shr_s, Vector),
+ (Some(simd_v128_i32_on_stack), i64x2_shr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_add, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_sub, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_mul, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_extmul_low_i32x4s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_extmul_high_i32x4s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_extmul_low_i32x4u, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_extmul_high_i32x4u, Vector),
+ (Some(simd_v128_on_stack), f32x4_ceil, Vector),
+ (Some(simd_v128_on_stack), f32x4_floor, Vector),
+ (Some(simd_v128_on_stack), f32x4_trunc, Vector),
+ (Some(simd_v128_on_stack), f32x4_nearest, Vector),
+ (Some(simd_v128_on_stack), f32x4_abs, Vector),
+ (Some(simd_v128_on_stack), f32x4_neg, Vector),
+ (Some(simd_v128_on_stack), f32x4_sqrt, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_add, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_sub, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_mul, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_div, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_min, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_max, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4p_min, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4p_max, Vector),
+ (Some(simd_v128_on_stack), f64x2_ceil, Vector),
+ (Some(simd_v128_on_stack), f64x2_floor, Vector),
+ (Some(simd_v128_on_stack), f64x2_trunc, Vector),
+ (Some(simd_v128_on_stack), f64x2_nearest, Vector),
+ (Some(simd_v128_on_stack), f64x2_abs, Vector),
+ (Some(simd_v128_on_stack), f64x2_neg, Vector),
+ (Some(simd_v128_on_stack), f64x2_sqrt, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_add, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_sub, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_mul, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_div, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_min, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_max, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2p_min, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2p_max, Vector),
+ (Some(simd_v128_on_stack), i32x4_trunc_sat_f32x4s, Vector),
+ (Some(simd_v128_on_stack), i32x4_trunc_sat_f32x4u, Vector),
+ (Some(simd_v128_on_stack), f32x4_convert_i32x4s, Vector),
+ (Some(simd_v128_on_stack), f32x4_convert_i32x4u, Vector),
+ (Some(simd_v128_on_stack), i32x4_trunc_sat_f64x2s_zero, Vector),
+ (Some(simd_v128_on_stack), i32x4_trunc_sat_f64x2u_zero, Vector),
+ (Some(simd_v128_on_stack), f64x2_convert_low_i32x4s, Vector),
+ (Some(simd_v128_on_stack), f64x2_convert_low_i32x4u, Vector),
+ (Some(simd_v128_on_stack), f32x4_demote_f64x2_zero, Vector),
+ (Some(simd_v128_on_stack), f64x2_promote_low_f32x4, Vector),
+ (Some(simd_v128_on_stack_relaxed), i32x4_relaxed_trunc_f32x4s, Vector),
+ (Some(simd_v128_on_stack_relaxed), i32x4_relaxed_trunc_f32x4u, Vector),
+ (Some(simd_v128_on_stack_relaxed), i32x4_relaxed_trunc_f64x2s_zero, Vector),
+ (Some(simd_v128_on_stack_relaxed), i32x4_relaxed_trunc_f64x2u_zero, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), f32x4_relaxed_madd, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), f32x4_relaxed_nmadd, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), f64x2_relaxed_madd, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), f64x2_relaxed_nmadd, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), f32x4_relaxed_min, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), f32x4_relaxed_max, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), f64x2_relaxed_min, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), f64x2_relaxed_max, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), i16x8_relaxed_q15mulr_s, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), i16x8_relaxed_dot_i8x16_i7x16_s, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i32x4_relaxed_dot_i8x16_i7x16_add_s, Vector),
+}
+
+pub(crate) struct CodeBuilderAllocations {
+ // The control labels in scope right now.
+ controls: Vec<Control>,
+
+ // The types on the operand stack right now.
+ operands: Vec<Option<ValType>>,
+
+ // Dynamic set of options of instruction we can generate that are known to
+ // be valid right now.
+ options: Vec<(
+ fn(&mut Unstructured, &Module, &mut CodeBuilder, &mut Vec<Instruction>) -> Result<()>,
+ u32,
+ )>,
+
+ // Cached information about the module that we're generating functions for,
+ // used to speed up validity checks. The mutable globals map is a map of the
+ // type of global to the global indices which have that type (and they're
+ // all mutable).
+ mutable_globals: BTreeMap<ValType, Vec<u32>>,
+
+ // Like mutable globals above this is a map from function types to the list
+ // of functions that have that function type.
+ functions: BTreeMap<Rc<FuncType>, Vec<u32>>,
+
+ // Like functions above this is a map from tag types to the list of tags
+ // have that tag type.
+ tags: BTreeMap<Vec<ValType>, Vec<u32>>,
+
+ // Tables in this module which have a funcref element type.
+ funcref_tables: Vec<u32>,
+
+ // Functions that are referenced in the module through globals and segments.
+ referenced_functions: Vec<u32>,
+
+ // Flag that indicates if any element segments have the same type as any
+ // table
+ table_init_possible: bool,
+
+ // Lists of memory indices which are either 32-bit or 64-bit. This is used
+ // for faster lookup in validating instructions to know which memories have
+ // which types. For example if there are no 64-bit memories then we
+ // shouldn't ever look for i64 on the stack for `i32.load`.
+ memory32: Vec<u32>,
+ memory64: Vec<u32>,
+}
+
+pub(crate) struct CodeBuilder<'a> {
+ func_ty: &'a FuncType,
+ locals: &'a mut Vec<ValType>,
+ allocs: &'a mut CodeBuilderAllocations,
+
+ // Temporary locals injected and used by nan canonicalization. Note that
+ // this list of extra locals is appended to `self.locals` at the end of code
+ // generation, and it's kept separate here to avoid using these locals in
+ // `local.get` and similar instructions.
+ extra_locals: Vec<ValType>,
+ f32_scratch: Option<usize>,
+ f64_scratch: Option<usize>,
+ v128_scratch: Option<usize>,
+}
+
+/// A control frame.
+#[derive(Debug, Clone)]
+struct Control {
+ kind: ControlKind,
+ /// Value types that must be on the stack when entering this control frame.
+ params: Vec<ValType>,
+ /// Value types that are left on the stack when exiting this control frame.
+ results: Vec<ValType>,
+ /// How far down the operand stack instructions inside this control frame
+ /// can reach.
+ height: usize,
+}
+
+impl Control {
+ fn label_types(&self) -> &[ValType] {
+ if self.kind == ControlKind::Loop {
+ &self.params
+ } else {
+ &self.results
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum ControlKind {
+ Block,
+ If,
+ Loop,
+ Try,
+ Catch,
+ CatchAll,
+}
+
+enum Float {
+ F32,
+ F64,
+ F32x4,
+ F64x2,
+}
+
+impl CodeBuilderAllocations {
+ pub(crate) fn new(module: &Module) -> Self {
+ let mut mutable_globals = BTreeMap::new();
+ for (i, global) in module.globals.iter().enumerate() {
+ if global.mutable {
+ mutable_globals
+ .entry(global.val_type)
+ .or_insert(Vec::new())
+ .push(i as u32);
+ }
+ }
+
+ let mut tags = BTreeMap::new();
+ for (idx, tag_type) in module.tags() {
+ tags.entry(tag_type.func_type.params.to_vec())
+ .or_insert(Vec::new())
+ .push(idx);
+ }
+
+ let mut functions = BTreeMap::new();
+ for (idx, func) in module.funcs() {
+ functions
+ .entry(func.clone())
+ .or_insert(Vec::new())
+ .push(idx);
+ }
+
+ let mut funcref_tables = Vec::new();
+ let mut table_tys = Vec::new();
+ for (i, table) in module.tables.iter().enumerate() {
+ table_tys.push(table.element_type);
+ if table.element_type == RefType::FUNCREF {
+ funcref_tables.push(i as u32);
+ }
+ }
+
+ let mut referenced_functions = BTreeSet::new();
+ for (_, expr) in module.defined_globals.iter() {
+ if let GlobalInitExpr::FuncRef(i) = *expr {
+ referenced_functions.insert(i);
+ }
+ }
+ for g in module.elems.iter() {
+ match &g.items {
+ Elements::Expressions(e) => {
+ let iter = e.iter().filter_map(|i| *i);
+ referenced_functions.extend(iter);
+ }
+ Elements::Functions(e) => {
+ referenced_functions.extend(e.iter().cloned());
+ }
+ }
+ }
+
+ let table_init_possible = module.elems.iter().any(|e| table_tys.contains(&e.ty));
+
+ let mut memory32 = Vec::new();
+ let mut memory64 = Vec::new();
+ for (i, mem) in module.memories.iter().enumerate() {
+ if mem.memory64 {
+ memory64.push(i as u32);
+ } else {
+ memory32.push(i as u32);
+ }
+ }
+
+ CodeBuilderAllocations {
+ controls: Vec::with_capacity(4),
+ operands: Vec::with_capacity(16),
+ options: Vec::with_capacity(NUM_OPTIONS),
+ functions,
+ tags,
+ mutable_globals,
+ funcref_tables,
+ referenced_functions: referenced_functions.into_iter().collect(),
+ table_init_possible,
+ memory32,
+ memory64,
+ }
+ }
+
+ pub(crate) fn builder<'a>(
+ &'a mut self,
+ func_ty: &'a FuncType,
+ locals: &'a mut Vec<ValType>,
+ ) -> CodeBuilder<'a> {
+ self.controls.clear();
+ self.controls.push(Control {
+ kind: ControlKind::Block,
+ params: vec![],
+ results: func_ty.results.to_vec(),
+ height: 0,
+ });
+
+ self.operands.clear();
+ self.options.clear();
+
+ CodeBuilder {
+ func_ty,
+ locals,
+ allocs: self,
+ extra_locals: Vec::new(),
+ f32_scratch: None,
+ f64_scratch: None,
+ v128_scratch: None,
+ }
+ }
+}
+
+impl CodeBuilder<'_> {
+ /// Get the operands that are in-scope within the current control frame.
+ fn operands(&self) -> &[Option<ValType>] {
+ let height = self.allocs.controls.last().map_or(0, |c| c.height);
+ &self.allocs.operands[height..]
+ }
+
+ fn pop_operands(&mut self, to_pop: &[ValType]) {
+ debug_assert!(self.types_on_stack(to_pop));
+ self.allocs
+ .operands
+ .truncate(self.allocs.operands.len() - to_pop.len());
+ }
+
+ fn push_operands(&mut self, to_push: &[ValType]) {
+ self.allocs
+ .operands
+ .extend(to_push.iter().copied().map(Some));
+ }
+
+ fn label_types_on_stack(&self, to_check: &Control) -> bool {
+ self.types_on_stack(to_check.label_types())
+ }
+
+ fn type_on_stack(&self, ty: ValType) -> bool {
+ match self.operands().last() {
+ None => false,
+ Some(None) => true,
+ Some(Some(x)) => *x == ty,
+ }
+ }
+
+ fn types_on_stack(&self, types: &[ValType]) -> bool {
+ self.operands().len() >= types.len()
+ && self
+ .operands()
+ .iter()
+ .rev()
+ .zip(types.iter().rev())
+ .all(|(a, b)| match (a, b) {
+ (None, _) => true,
+ (Some(x), y) => x == y,
+ })
+ }
+
+ #[inline(never)]
+ fn arbitrary_block_type(&self, u: &mut Unstructured, module: &Module) -> Result<BlockType> {
+ let mut options: Vec<Box<dyn Fn(&mut Unstructured) -> Result<BlockType>>> = vec![
+ Box::new(|_| Ok(BlockType::Empty)),
+ Box::new(|u| Ok(BlockType::Result(module.arbitrary_valtype(u)?))),
+ ];
+ if module.config.multi_value_enabled() {
+ for (i, ty) in module.func_types() {
+ if self.types_on_stack(&ty.params) {
+ options.push(Box::new(move |_| Ok(BlockType::FunctionType(i as u32))));
+ }
+ }
+ }
+ let f = u.choose(&options)?;
+ f(u)
+ }
+
+ pub(crate) fn arbitrary(
+ mut self,
+ u: &mut Unstructured,
+ module: &Module,
+ ) -> Result<Vec<Instruction>> {
+ let max_instructions = module.config.max_instructions();
+ let allowed_instructions = module.config.allowed_instructions();
+ let mut instructions = vec![];
+
+ while !self.allocs.controls.is_empty() {
+ let keep_going = instructions.len() < max_instructions
+ && u.arbitrary().map_or(false, |b: u8| b != 0);
+ if !keep_going {
+ self.end_active_control_frames(
+ u,
+ &mut instructions,
+ module.config.disallow_traps(),
+ );
+ break;
+ }
+
+ match choose_instruction(u, module, allowed_instructions, &mut self) {
+ Some(f) => {
+ f(u, module, &mut self, &mut instructions)?;
+ }
+ // Choosing an instruction can fail because there is not enough
+ // underlying data, so we really cannot generate any more
+ // instructions. In this case we swallow that error and instead
+ // just terminate our wasm function's frames.
+ None => {
+ self.end_active_control_frames(
+ u,
+ &mut instructions,
+ module.config.disallow_traps(),
+ );
+ break;
+ }
+ }
+
+ // If the configuration for this module requests nan
+ // canonicalization then perform that here based on whether or not
+ // the previous instruction needs canonicalization. Note that this
+ // is based off Cranelift's pass for nan canonicalization for which
+ // instructions to canonicalize, but the general idea is most
+ // floating-point operations.
+ if module.config.canonicalize_nans() {
+ match instructions.last().unwrap() {
+ Instruction::F32Ceil
+ | Instruction::F32Floor
+ | Instruction::F32Nearest
+ | Instruction::F32Sqrt
+ | Instruction::F32Trunc
+ | Instruction::F32Div
+ | Instruction::F32Max
+ | Instruction::F32Min
+ | Instruction::F32Mul
+ | Instruction::F32Sub
+ | Instruction::F32Add => self.canonicalize_nan(Float::F32, &mut instructions),
+ Instruction::F64Ceil
+ | Instruction::F64Floor
+ | Instruction::F64Nearest
+ | Instruction::F64Sqrt
+ | Instruction::F64Trunc
+ | Instruction::F64Div
+ | Instruction::F64Max
+ | Instruction::F64Min
+ | Instruction::F64Mul
+ | Instruction::F64Sub
+ | Instruction::F64Add => self.canonicalize_nan(Float::F64, &mut instructions),
+ Instruction::F32x4Ceil
+ | Instruction::F32x4Floor
+ | Instruction::F32x4Nearest
+ | Instruction::F32x4Sqrt
+ | Instruction::F32x4Trunc
+ | Instruction::F32x4Div
+ | Instruction::F32x4Max
+ | Instruction::F32x4Min
+ | Instruction::F32x4Mul
+ | Instruction::F32x4Sub
+ | Instruction::F32x4Add => {
+ self.canonicalize_nan(Float::F32x4, &mut instructions)
+ }
+ Instruction::F64x2Ceil
+ | Instruction::F64x2Floor
+ | Instruction::F64x2Nearest
+ | Instruction::F64x2Sqrt
+ | Instruction::F64x2Trunc
+ | Instruction::F64x2Div
+ | Instruction::F64x2Max
+ | Instruction::F64x2Min
+ | Instruction::F64x2Mul
+ | Instruction::F64x2Sub
+ | Instruction::F64x2Add => {
+ self.canonicalize_nan(Float::F64x2, &mut instructions)
+ }
+ _ => {}
+ }
+ }
+ }
+
+ self.locals.extend(self.extra_locals.drain(..));
+
+ Ok(instructions)
+ }
+
+ fn canonicalize_nan(&mut self, ty: Float, ins: &mut Vec<Instruction>) {
+ // We'll need to temporarily save the top of the stack into a local, so
+ // figure out that local here. Note that this tries to use the same
+ // local if canonicalization happens more than once in a function.
+ let (local, val_ty) = match ty {
+ Float::F32 => (&mut self.f32_scratch, ValType::F32),
+ Float::F64 => (&mut self.f64_scratch, ValType::F64),
+ Float::F32x4 | Float::F64x2 => (&mut self.v128_scratch, ValType::V128),
+ };
+ let local = match *local {
+ Some(i) => i as u32,
+ None => self.alloc_local(val_ty),
+ };
+
+ // Save the previous instruction's result into a local. This also leaves
+ // a value on the stack as `val1` for the `select` instruction.
+ ins.push(Instruction::LocalTee(local));
+
+ // The `val2` value input to the `select` below, our nan pattern.
+ //
+ // The nan patterns here are chosen to be a canonical representation
+ // which is still NaN but the wasm will always produce the same bits of
+ // a nan so if the wasm takes a look at the nan inside it'll always see
+ // the same representation.
+ const CANON_32BIT_NAN: u32 = 0b01111111110000000000000000000000;
+ const CANON_64BIT_NAN: u64 =
+ 0b0111111111111000000000000000000000000000000000000000000000000000;
+ ins.push(match ty {
+ Float::F32 => Instruction::F32Const(f32::from_bits(CANON_32BIT_NAN)),
+ Float::F64 => Instruction::F64Const(f64::from_bits(CANON_64BIT_NAN)),
+ Float::F32x4 => {
+ let nan = CANON_32BIT_NAN as i128;
+ let nan = nan | (nan << 32) | (nan << 64) | (nan << 96);
+ Instruction::V128Const(nan)
+ }
+ Float::F64x2 => {
+ let nan = CANON_64BIT_NAN as i128;
+ let nan = nan | (nan << 64);
+ Instruction::V128Const(nan)
+ }
+ });
+
+ // the condition of the `select`, which is the float's equality test
+ // with itself.
+ ins.push(Instruction::LocalGet(local));
+ ins.push(Instruction::LocalGet(local));
+ ins.push(match ty {
+ Float::F32 => Instruction::F32Eq,
+ Float::F64 => Instruction::F64Eq,
+ Float::F32x4 => Instruction::F32x4Eq,
+ Float::F64x2 => Instruction::F64x2Eq,
+ });
+
+ // Select the result. If the condition is nonzero (aka the float is
+ // equal to itself) it picks `val1`, otherwise if zero (aka the float
+ // is nan) it picks `val2`.
+ ins.push(match ty {
+ Float::F32 | Float::F64 => Instruction::Select,
+ Float::F32x4 | Float::F64x2 => Instruction::V128Bitselect,
+ });
+ }
+
+ fn alloc_local(&mut self, ty: ValType) -> u32 {
+ let val = self.locals.len() + self.func_ty.params.len() + self.extra_locals.len();
+ self.extra_locals.push(ty);
+ u32::try_from(val).unwrap()
+ }
+
+ fn end_active_control_frames(
+ &mut self,
+ u: &mut Unstructured<'_>,
+ instructions: &mut Vec<Instruction>,
+ disallow_traps: bool,
+ ) {
+ while !self.allocs.controls.is_empty() {
+ // Ensure that this label is valid by placing the right types onto
+ // the operand stack for the end of the label.
+ self.guarantee_label_results(u, instructions, disallow_traps);
+
+ // Remove the label and clear the operand stack since the label has
+ // been removed.
+ let label = self.allocs.controls.pop().unwrap();
+ self.allocs.operands.truncate(label.height);
+
+ // If this is an `if` that is not stack neutral, then it
+ // must have an `else`. Generate synthetic results here in the same
+ // manner we did above.
+ if label.kind == ControlKind::If && label.params != label.results {
+ instructions.push(Instruction::Else);
+ self.allocs.controls.push(label.clone());
+ self.allocs
+ .operands
+ .extend(label.params.into_iter().map(Some));
+ self.guarantee_label_results(u, instructions, disallow_traps);
+ self.allocs.controls.pop();
+ self.allocs.operands.truncate(label.height);
+ }
+
+ // The last control frame for the function return does not
+ // need an `end` instruction.
+ if !self.allocs.controls.is_empty() {
+ instructions.push(Instruction::End);
+ }
+
+ // Place the results of the label onto the operand stack for use
+ // after the label.
+ self.allocs
+ .operands
+ .extend(label.results.into_iter().map(Some));
+ }
+ }
+
+ /// Modifies the instruction stream to guarantee that the current control
+ /// label's results are on the stack and ready for the control label to return.
+ fn guarantee_label_results(
+ &mut self,
+ u: &mut Unstructured<'_>,
+ instructions: &mut Vec<Instruction>,
+ disallow_traps: bool,
+ ) {
+ let mut operands = self.operands();
+ let label = self.allocs.controls.last().unwrap();
+
+ // Already done, yay!
+ if label.results.len() == operands.len() && self.types_on_stack(&label.results) {
+ return;
+ }
+
+ // Generating an unreachable instruction is always a valid way to
+ // generate any types for a label, but it's not too interesting, so
+ // don't favor it.
+ if u.arbitrary::<u16>().unwrap_or(0) == 1 && !disallow_traps {
+ instructions.push(Instruction::Unreachable);
+ return;
+ }
+
+ // Arbitrarily massage the stack to get the expected results. First we
+ // drop all extraneous results to we're only dealing with those we want
+ // to deal with. Afterwards we start at the bottom of the stack and move
+ // up, figuring out what matches and what doesn't. As soon as something
+ // doesn't match we throw out that and everything else remaining,
+ // filling in results with dummy values.
+ while operands.len() > label.results.len() {
+ instructions.push(Instruction::Drop);
+ operands = &operands[..operands.len() - 1];
+ }
+ for (i, expected) in label.results.iter().enumerate() {
+ if let Some(actual) = operands.get(i) {
+ if Some(*expected) == *actual {
+ continue;
+ }
+ for _ in operands[i..].iter() {
+ instructions.push(Instruction::Drop);
+ }
+ operands = &[];
+ }
+ instructions.push(arbitrary_val(*expected, u));
+ }
+ }
+}
+
+fn arbitrary_val(ty: ValType, u: &mut Unstructured<'_>) -> Instruction {
+ match ty {
+ ValType::I32 => Instruction::I32Const(u.arbitrary().unwrap_or(0)),
+ ValType::I64 => Instruction::I64Const(u.arbitrary().unwrap_or(0)),
+ ValType::F32 => Instruction::F32Const(u.arbitrary().unwrap_or(0.0)),
+ ValType::F64 => Instruction::F64Const(u.arbitrary().unwrap_or(0.0)),
+ ValType::V128 => Instruction::V128Const(u.arbitrary().unwrap_or(0)),
+ ValType::Ref(ty) => {
+ assert!(ty.nullable);
+ Instruction::RefNull(ty.heap_type)
+ }
+ }
+}
+
+#[inline]
+fn unreachable_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+}
+
+fn unreachable(
+ _: &mut Unstructured,
+ _: &Module,
+ _: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ instructions.push(Instruction::Unreachable);
+ Ok(())
+}
+
+fn nop(
+ _: &mut Unstructured,
+ _: &Module,
+ _: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ instructions.push(Instruction::Nop);
+ Ok(())
+}
+
+fn block(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let block_ty = builder.arbitrary_block_type(u, module)?;
+ let (params, results) = module.params_results(&block_ty);
+ let height = builder.allocs.operands.len() - params.len();
+ builder.allocs.controls.push(Control {
+ kind: ControlKind::Block,
+ params,
+ results,
+ height,
+ });
+ instructions.push(Instruction::Block(block_ty));
+ Ok(())
+}
+
+#[inline]
+fn try_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.config.exceptions_enabled()
+}
+
+fn r#try(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let block_ty = builder.arbitrary_block_type(u, module)?;
+ let (params, results) = module.params_results(&block_ty);
+ let height = builder.allocs.operands.len() - params.len();
+ builder.allocs.controls.push(Control {
+ kind: ControlKind::Try,
+ params,
+ results,
+ height,
+ });
+ instructions.push(Instruction::Try(block_ty));
+ Ok(())
+}
+
+#[inline]
+fn delegate_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ let control_kind = builder.allocs.controls.last().unwrap().kind;
+ // delegate is only valid if end could be used in a try control frame
+ module.config.exceptions_enabled()
+ && control_kind == ControlKind::Try
+ && end_valid(module, builder)
+}
+
+fn delegate(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ // There will always be at least the function's return frame and try
+ // control frame if we are emitting delegate
+ let n = builder.allocs.controls.iter().count();
+ debug_assert!(n >= 2);
+ // Delegate must target an outer control from the try block, and is
+ // encoded with relative depth from the outer control
+ let target_relative_from_last = u.int_in_range(1..=n - 1)?;
+ let target_relative_from_outer = target_relative_from_last - 1;
+ // Delegate ends the try block
+ builder.allocs.controls.pop();
+ instructions.push(Instruction::Delegate(target_relative_from_outer as u32));
+ Ok(())
+}
+
+#[inline]
+fn catch_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ let control_kind = builder.allocs.controls.last().unwrap().kind;
+ // catch is only valid if end could be used in a try or catch (not
+ // catch_all) control frame. There must also be a tag that we can catch.
+ module.config.exceptions_enabled()
+ && (control_kind == ControlKind::Try || control_kind == ControlKind::Catch)
+ && end_valid(module, builder)
+ && module.tags.len() > 0
+}
+
+fn catch(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let tag_idx = u.int_in_range(0..=(module.tags.len() - 1))?;
+ let tag_type = &module.tags[tag_idx];
+ let control = builder.allocs.controls.pop().unwrap();
+ // Pop the results for the previous try or catch
+ builder.pop_operands(&control.results);
+ // Push the params of the tag we're catching
+ builder.push_operands(&tag_type.func_type.params);
+ builder.allocs.controls.push(Control {
+ kind: ControlKind::Catch,
+ ..control
+ });
+ instructions.push(Instruction::Catch(tag_idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn catch_all_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ let control_kind = builder.allocs.controls.last().unwrap().kind;
+ // catch_all is only valid if end could be used in a try or catch (not
+ // catch_all) control frame.
+ module.config.exceptions_enabled()
+ && (control_kind == ControlKind::Try || control_kind == ControlKind::Catch)
+ && end_valid(module, builder)
+}
+
+fn catch_all(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let control = builder.allocs.controls.pop().unwrap();
+ // Pop the results for the previous try or catch
+ builder.pop_operands(&control.results);
+ builder.allocs.controls.push(Control {
+ kind: ControlKind::CatchAll,
+ ..control
+ });
+ instructions.push(Instruction::CatchAll);
+ Ok(())
+}
+
+fn r#loop(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let block_ty = builder.arbitrary_block_type(u, module)?;
+ let (params, results) = module.params_results(&block_ty);
+ let height = builder.allocs.operands.len() - params.len();
+ builder.allocs.controls.push(Control {
+ kind: ControlKind::Loop,
+ params,
+ results,
+ height,
+ });
+ instructions.push(Instruction::Loop(block_ty));
+ Ok(())
+}
+
+#[inline]
+fn if_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.type_on_stack(ValType::I32)
+}
+
+fn r#if(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+
+ let block_ty = builder.arbitrary_block_type(u, module)?;
+ let (params, results) = module.params_results(&block_ty);
+ let height = builder.allocs.operands.len() - params.len();
+ builder.allocs.controls.push(Control {
+ kind: ControlKind::If,
+ params,
+ results,
+ height,
+ });
+ instructions.push(Instruction::If(block_ty));
+ Ok(())
+}
+
+#[inline]
+fn else_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ let last_control = builder.allocs.controls.last().unwrap();
+ last_control.kind == ControlKind::If
+ && builder.operands().len() == last_control.results.len()
+ && builder.types_on_stack(&last_control.results)
+}
+
+fn r#else(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let control = builder.allocs.controls.pop().unwrap();
+ builder.pop_operands(&control.results);
+ builder.push_operands(&control.params);
+ builder.allocs.controls.push(Control {
+ kind: ControlKind::Block,
+ ..control
+ });
+ instructions.push(Instruction::Else);
+ Ok(())
+}
+
+#[inline]
+fn end_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ // Note: first control frame is the function return's control frame, which
+ // does not have an associated `end`.
+ if builder.allocs.controls.len() <= 1 {
+ return false;
+ }
+ let control = builder.allocs.controls.last().unwrap();
+ builder.operands().len() == control.results.len()
+ && builder.types_on_stack(&control.results)
+ // `if`s that don't leave the stack as they found it must have an
+ // `else`.
+ && !(control.kind == ControlKind::If && control.params != control.results)
+}
+
+fn end(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.allocs.controls.pop();
+ instructions.push(Instruction::End);
+ Ok(())
+}
+
+#[inline]
+fn br_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder
+ .allocs
+ .controls
+ .iter()
+ .any(|l| builder.label_types_on_stack(l))
+}
+
+fn br(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| builder.label_types_on_stack(l))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (target, _) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| builder.label_types_on_stack(l))
+ .nth(i)
+ .unwrap();
+ let control = &builder.allocs.controls[builder.allocs.controls.len() - 1 - target];
+ let tys = control.label_types().to_vec();
+ builder.pop_operands(&tys);
+ instructions.push(Instruction::Br(target as u32));
+ Ok(())
+}
+
+#[inline]
+fn br_if_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ if !builder.type_on_stack(ValType::I32) {
+ return false;
+ }
+ let ty = builder.allocs.operands.pop().unwrap();
+ let is_valid = builder
+ .allocs
+ .controls
+ .iter()
+ .any(|l| builder.label_types_on_stack(l));
+ builder.allocs.operands.push(ty);
+ is_valid
+}
+
+fn br_if(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| builder.label_types_on_stack(l))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (target, _) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| builder.label_types_on_stack(l))
+ .nth(i)
+ .unwrap();
+ instructions.push(Instruction::BrIf(target as u32));
+ Ok(())
+}
+
+#[inline]
+fn br_table_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !builder.type_on_stack(ValType::I32) {
+ return false;
+ }
+ let ty = builder.allocs.operands.pop().unwrap();
+ let is_valid = br_valid(module, builder);
+ builder.allocs.operands.push(ty);
+ is_valid
+}
+
+fn br_table(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| builder.label_types_on_stack(l))
+ .count();
+ debug_assert!(n > 0);
+
+ let i = u.int_in_range(0..=n - 1)?;
+ let (default_target, _) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| builder.label_types_on_stack(l))
+ .nth(i)
+ .unwrap();
+ let control = &builder.allocs.controls[builder.allocs.controls.len() - 1 - default_target];
+
+ let targets = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| l.label_types() == control.label_types())
+ .map(|(t, _)| t as u32)
+ .collect();
+
+ let tys = control.label_types().to_vec();
+ builder.pop_operands(&tys);
+
+ instructions.push(Instruction::BrTable(targets, default_target as u32));
+ Ok(())
+}
+
+#[inline]
+fn return_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.label_types_on_stack(&builder.allocs.controls[0])
+}
+
+fn r#return(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let results = builder.allocs.controls[0].results.clone();
+ builder.pop_operands(&results);
+ instructions.push(Instruction::Return);
+ Ok(())
+}
+
+#[inline]
+fn call_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder
+ .allocs
+ .functions
+ .keys()
+ .any(|func_ty| builder.types_on_stack(&func_ty.params))
+}
+
+fn call(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let candidates = builder
+ .allocs
+ .functions
+ .iter()
+ .filter(|(func_ty, _)| builder.types_on_stack(&func_ty.params))
+ .flat_map(|(_, v)| v.iter().copied())
+ .collect::<Vec<_>>();
+ assert!(candidates.len() > 0);
+ let i = u.int_in_range(0..=candidates.len() - 1)?;
+ let (func_idx, ty) = module.funcs().nth(candidates[i] as usize).unwrap();
+ builder.pop_operands(&ty.params);
+ builder.push_operands(&ty.results);
+ instructions.push(Instruction::Call(func_idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn call_indirect_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if builder.allocs.funcref_tables.is_empty() || !builder.type_on_stack(ValType::I32) {
+ return false;
+ }
+ if module.config.disallow_traps() {
+ // We have no way to reflect, at run time, on a `funcref` in
+ // the `i`th slot in a table and dynamically avoid trapping
+ // `call_indirect`s. Therefore, we can't emit *any*
+ // `call_indirect` instructions if we want to avoid traps.
+ return false;
+ }
+ let ty = builder.allocs.operands.pop().unwrap();
+ let is_valid = module
+ .func_types()
+ .any(|(_, ty)| builder.types_on_stack(&ty.params));
+ builder.allocs.operands.push(ty);
+ is_valid
+}
+
+fn call_indirect(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+
+ let choices = module
+ .func_types()
+ .filter(|(_, ty)| builder.types_on_stack(&ty.params))
+ .collect::<Vec<_>>();
+ let (type_idx, ty) = u.choose(&choices)?;
+ builder.pop_operands(&ty.params);
+ builder.push_operands(&ty.results);
+ let table = *u.choose(&builder.allocs.funcref_tables)?;
+ instructions.push(Instruction::CallIndirect {
+ ty: *type_idx as u32,
+ table,
+ });
+ Ok(())
+}
+
+#[inline]
+fn return_call_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.tail_call_enabled() {
+ return false;
+ }
+
+ builder.allocs.functions.keys().any(|func_ty| {
+ builder.types_on_stack(&func_ty.params)
+ && builder.allocs.controls[0].label_types() == &func_ty.results
+ })
+}
+
+fn return_call(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let candidates = builder
+ .allocs
+ .functions
+ .iter()
+ .filter(|(func_ty, _)| {
+ builder.types_on_stack(&func_ty.params)
+ && builder.allocs.controls[0].label_types() == &func_ty.results
+ })
+ .flat_map(|(_, v)| v.iter().copied())
+ .collect::<Vec<_>>();
+ assert!(candidates.len() > 0);
+ let i = u.int_in_range(0..=candidates.len() - 1)?;
+ let (func_idx, ty) = module.funcs().nth(candidates[i] as usize).unwrap();
+ builder.pop_operands(&ty.params);
+ builder.push_operands(&ty.results);
+ instructions.push(Instruction::ReturnCall(func_idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn return_call_indirect_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.tail_call_enabled()
+ || builder.allocs.funcref_tables.is_empty()
+ || !builder.type_on_stack(ValType::I32)
+ {
+ return false;
+ }
+
+ if module.config.disallow_traps() {
+ // See comment in `call_indirect_valid`; same applies here.
+ return false;
+ }
+
+ let ty = builder.allocs.operands.pop().unwrap();
+ let is_valid = module.func_types().any(|(_, ty)| {
+ builder.types_on_stack(&ty.params)
+ && builder.allocs.controls[0].label_types() == &ty.results
+ });
+ builder.allocs.operands.push(ty);
+ is_valid
+}
+
+fn return_call_indirect(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+
+ let choices = module
+ .func_types()
+ .filter(|(_, ty)| {
+ builder.types_on_stack(&ty.params)
+ && builder.allocs.controls[0].label_types() == &ty.results
+ })
+ .collect::<Vec<_>>();
+ let (type_idx, ty) = u.choose(&choices)?;
+ builder.pop_operands(&ty.params);
+ builder.push_operands(&ty.results);
+ let table = *u.choose(&builder.allocs.funcref_tables)?;
+ instructions.push(Instruction::ReturnCallIndirect {
+ ty: *type_idx as u32,
+ table,
+ });
+ Ok(())
+}
+
+#[inline]
+fn throw_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.exceptions_enabled()
+ && builder
+ .allocs
+ .tags
+ .keys()
+ .any(|k| builder.types_on_stack(k))
+}
+
+fn throw(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let candidates = builder
+ .allocs
+ .tags
+ .iter()
+ .filter(|(k, _)| builder.types_on_stack(k))
+ .flat_map(|(_, v)| v.iter().copied())
+ .collect::<Vec<_>>();
+ assert!(candidates.len() > 0);
+ let i = u.int_in_range(0..=candidates.len() - 1)?;
+ let (tag_idx, tag_type) = module.tags().nth(candidates[i] as usize).unwrap();
+ // Tags have no results, throwing cannot return
+ assert!(tag_type.func_type.results.len() == 0);
+ builder.pop_operands(&tag_type.func_type.params);
+ instructions.push(Instruction::Throw(tag_idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn rethrow_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ // There must be a catch or catch_all control on the stack
+ module.config.exceptions_enabled()
+ && builder
+ .allocs
+ .controls
+ .iter()
+ .any(|l| l.kind == ControlKind::Catch || l.kind == ControlKind::CatchAll)
+}
+
+fn rethrow(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| l.kind == ControlKind::Catch || l.kind == ControlKind::CatchAll)
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (target, _) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| l.kind == ControlKind::Catch || l.kind == ControlKind::CatchAll)
+ .nth(i)
+ .unwrap();
+ instructions.push(Instruction::Rethrow(target as u32));
+ Ok(())
+}
+
+#[inline]
+fn drop_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ !builder.operands().is_empty()
+}
+
+fn drop(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.allocs.operands.pop();
+ instructions.push(Instruction::Drop);
+ Ok(())
+}
+
+#[inline]
+fn select_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ if !(builder.operands().len() >= 3 && builder.type_on_stack(ValType::I32)) {
+ return false;
+ }
+ let t = builder.operands()[builder.operands().len() - 2];
+ let u = builder.operands()[builder.operands().len() - 3];
+ t.is_none() || u.is_none() || t == u
+}
+
+fn select(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.allocs.operands.pop();
+ let t = builder.allocs.operands.pop().unwrap();
+ let u = builder.allocs.operands.pop().unwrap();
+ let ty = t.or(u);
+ builder.allocs.operands.push(ty);
+ match ty {
+ Some(ty @ ValType::Ref(_)) => instructions.push(Instruction::TypedSelect(ty)),
+ Some(ValType::I32) | Some(ValType::I64) | Some(ValType::F32) | Some(ValType::F64)
+ | Some(ValType::V128) | None => instructions.push(Instruction::Select),
+ }
+ Ok(())
+}
+
+#[inline]
+fn local_get_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ !builder.func_ty.params.is_empty() || !builder.locals.is_empty()
+}
+
+fn local_get(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let num_params = builder.func_ty.params.len();
+ let n = num_params + builder.locals.len();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ builder.allocs.operands.push(Some(if i < num_params {
+ builder.func_ty.params[i]
+ } else {
+ builder.locals[i - num_params]
+ }));
+ instructions.push(Instruction::LocalGet(i as u32));
+ Ok(())
+}
+
+#[inline]
+fn local_set_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .any(|ty| builder.type_on_stack(*ty))
+}
+
+fn local_set(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .filter(|ty| builder.type_on_stack(**ty))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (j, _) = builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .enumerate()
+ .filter(|(_, ty)| builder.type_on_stack(**ty))
+ .nth(i)
+ .unwrap();
+ builder.allocs.operands.pop();
+ instructions.push(Instruction::LocalSet(j as u32));
+ Ok(())
+}
+
+fn local_tee(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .filter(|ty| builder.type_on_stack(**ty))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (j, _) = builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .enumerate()
+ .filter(|(_, ty)| builder.type_on_stack(**ty))
+ .nth(i)
+ .unwrap();
+ instructions.push(Instruction::LocalTee(j as u32));
+ Ok(())
+}
+
+#[inline]
+fn global_get_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.globals.len() > 0
+}
+
+fn global_get(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ debug_assert!(module.globals.len() > 0);
+ let global_idx = u.int_in_range(0..=module.globals.len() - 1)?;
+ builder
+ .allocs
+ .operands
+ .push(Some(module.globals[global_idx].val_type));
+ instructions.push(Instruction::GlobalGet(global_idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn global_set_valid(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder
+ .allocs
+ .mutable_globals
+ .iter()
+ .any(|(ty, _)| builder.type_on_stack(*ty))
+}
+
+fn global_set(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let candidates = builder
+ .allocs
+ .mutable_globals
+ .iter()
+ .find(|(ty, _)| builder.type_on_stack(**ty))
+ .unwrap()
+ .1;
+ let i = u.int_in_range(0..=candidates.len() - 1)?;
+ builder.allocs.operands.pop();
+ instructions.push(Instruction::GlobalSet(candidates[i]));
+ Ok(())
+}
+
+#[inline]
+fn have_memory(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.memories.len() > 0
+}
+
+#[inline]
+fn have_memory_and_offset(_module: &Module, builder: &mut CodeBuilder) -> bool {
+ (builder.allocs.memory32.len() > 0 && builder.type_on_stack(ValType::I32))
+ || (builder.allocs.memory64.len() > 0 && builder.type_on_stack(ValType::I64))
+}
+
+#[inline]
+fn have_data(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.data.len() > 0
+}
+
+fn i32_load(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps() {
+ no_traps::load(Instruction::I32Load(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32Load(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps() {
+ no_traps::load(Instruction::I64Load(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64Load(memarg));
+ }
+ Ok(())
+}
+
+fn f32_load(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ builder.allocs.operands.push(Some(ValType::F32));
+ if module.config.disallow_traps() {
+ no_traps::load(Instruction::F32Load(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::F32Load(memarg));
+ }
+ Ok(())
+}
+
+fn f64_load(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3])?;
+ builder.allocs.operands.push(Some(ValType::F64));
+ if module.config.disallow_traps() {
+ no_traps::load(Instruction::F64Load(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::F64Load(memarg));
+ }
+ Ok(())
+}
+
+fn i32_load_8_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I32Load8S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Load8S(memarg));
+ }
+ Ok(())
+}
+
+fn i32_load_8_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I32Load8U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Load8U(memarg));
+ }
+ Ok(())
+}
+
+fn i32_load_16_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I32Load16S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Load16S(memarg));
+ }
+ Ok(())
+}
+
+fn i32_load_16_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I32Load16U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Load16U(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_8_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I64Load8S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load8S(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_16_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I64Load16S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load16S(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_32_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I64Load32S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load32S(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_8_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I64Load8U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load8U(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_16_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I64Load16U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load16U(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_32_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::I64Load32U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load32U(memarg));
+ }
+ Ok(())
+}
+
+#[inline]
+fn store_valid(_module: &Module, builder: &mut CodeBuilder, f: impl Fn() -> ValType) -> bool {
+ (builder.allocs.memory32.len() > 0 && builder.types_on_stack(&[ValType::I32, f()]))
+ || (builder.allocs.memory64.len() > 0 && builder.types_on_stack(&[ValType::I64, f()]))
+}
+
+#[inline]
+fn i32_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ store_valid(module, builder, || ValType::I32)
+}
+
+fn i32_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ if module.config.disallow_traps() {
+ no_traps::store(Instruction::I32Store(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32Store(memarg));
+ }
+ Ok(())
+}
+
+#[inline]
+fn i64_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ store_valid(module, builder, || ValType::I64)
+}
+
+fn i64_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3])?;
+ if module.config.disallow_traps() {
+ no_traps::store(Instruction::I64Store(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64Store(memarg));
+ }
+ Ok(())
+}
+
+#[inline]
+fn f32_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ store_valid(module, builder, || ValType::F32)
+}
+
+fn f32_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ if module.config.disallow_traps() {
+ no_traps::store(Instruction::F32Store(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::F32Store(memarg));
+ }
+ Ok(())
+}
+
+#[inline]
+fn f64_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ store_valid(module, builder, || ValType::F64)
+}
+
+fn f64_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3])?;
+ if module.config.disallow_traps() {
+ no_traps::store(Instruction::F64Store(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::F64Store(memarg));
+ }
+ Ok(())
+}
+
+fn i32_store_8(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ if module.config.disallow_traps() {
+ no_traps::store(
+ Instruction::I32Store8(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Store8(memarg));
+ }
+ Ok(())
+}
+
+fn i32_store_16(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ if module.config.disallow_traps() {
+ no_traps::store(
+ Instruction::I32Store16(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Store16(memarg));
+ }
+ Ok(())
+}
+
+fn i64_store_8(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ if module.config.disallow_traps() {
+ no_traps::store(
+ Instruction::I64Store8(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Store8(memarg));
+ }
+ Ok(())
+}
+
+fn i64_store_16(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ if module.config.disallow_traps() {
+ no_traps::store(
+ Instruction::I64Store16(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Store16(memarg));
+ }
+ Ok(())
+}
+
+fn i64_store_32(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ if module.config.disallow_traps() {
+ no_traps::store(
+ Instruction::I64Store32(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Store32(memarg));
+ }
+ Ok(())
+}
+
+fn memory_size(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let i = u.int_in_range(0..=module.memories.len() - 1)?;
+ let ty = if module.memories[i].memory64 {
+ ValType::I64
+ } else {
+ ValType::I32
+ };
+ builder.push_operands(&[ty]);
+ instructions.push(Instruction::MemorySize(i as u32));
+ Ok(())
+}
+
+#[inline]
+fn memory_grow_valid(_module: &Module, builder: &mut CodeBuilder) -> bool {
+ (builder.allocs.memory32.len() > 0 && builder.type_on_stack(ValType::I32))
+ || (builder.allocs.memory64.len() > 0 && builder.type_on_stack(ValType::I64))
+}
+
+fn memory_grow(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ty = if builder.type_on_stack(ValType::I32) {
+ ValType::I32
+ } else {
+ ValType::I64
+ };
+ let index = memory_index(u, builder, ty)?;
+ builder.pop_operands(&[ty]);
+ builder.push_operands(&[ty]);
+ instructions.push(Instruction::MemoryGrow(index));
+ Ok(())
+}
+
+#[inline]
+fn memory_init_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.bulk_memory_enabled()
+ && have_data(module, builder)
+ && !module.config.disallow_traps() // Non-trapping memory init not yet implemented
+ && (builder.allocs.memory32.len() > 0
+ && builder.types_on_stack(&[ValType::I32, ValType::I32, ValType::I32])
+ || (builder.allocs.memory64.len() > 0
+ && builder.types_on_stack(&[ValType::I64, ValType::I32, ValType::I32])))
+}
+
+fn memory_init(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ let ty = if builder.type_on_stack(ValType::I32) {
+ ValType::I32
+ } else {
+ ValType::I64
+ };
+ let mem = memory_index(u, builder, ty)?;
+ let data_index = data_index(u, module)?;
+ builder.pop_operands(&[ty]);
+ instructions.push(Instruction::MemoryInit { mem, data_index });
+ Ok(())
+}
+
+#[inline]
+fn memory_fill_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.bulk_memory_enabled()
+ && !module.config.disallow_traps() // Non-trapping memory fill generation not yet implemented
+ && (builder.allocs.memory32.len() > 0
+ && builder.types_on_stack(&[ValType::I32, ValType::I32, ValType::I32])
+ || (builder.allocs.memory64.len() > 0
+ && builder.types_on_stack(&[ValType::I64, ValType::I32, ValType::I64])))
+}
+
+fn memory_fill(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ty = if builder.type_on_stack(ValType::I32) {
+ ValType::I32
+ } else {
+ ValType::I64
+ };
+ let mem = memory_index(u, builder, ty)?;
+ builder.pop_operands(&[ty, ValType::I32, ty]);
+ instructions.push(Instruction::MemoryFill(mem));
+ Ok(())
+}
+
+#[inline]
+fn memory_copy_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.bulk_memory_enabled() {
+ return false;
+ }
+
+ // The non-trapping case for memory copy has not yet been implemented,
+ // so we are excluding them for now
+ if module.config.disallow_traps() {
+ return false;
+ }
+
+ if builder.types_on_stack(&[ValType::I64, ValType::I64, ValType::I64])
+ && builder.allocs.memory64.len() > 0
+ {
+ return true;
+ }
+ if builder.types_on_stack(&[ValType::I32, ValType::I32, ValType::I32])
+ && builder.allocs.memory32.len() > 0
+ {
+ return true;
+ }
+ if builder.types_on_stack(&[ValType::I64, ValType::I32, ValType::I32])
+ && builder.allocs.memory32.len() > 0
+ && builder.allocs.memory64.len() > 0
+ {
+ return true;
+ }
+ if builder.types_on_stack(&[ValType::I32, ValType::I64, ValType::I32])
+ && builder.allocs.memory32.len() > 0
+ && builder.allocs.memory64.len() > 0
+ {
+ return true;
+ }
+ false
+}
+
+fn memory_copy(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let (src_mem, dst_mem) = if builder.types_on_stack(&[ValType::I64, ValType::I64, ValType::I64])
+ {
+ builder.pop_operands(&[ValType::I64, ValType::I64, ValType::I64]);
+ (
+ memory_index(u, builder, ValType::I64)?,
+ memory_index(u, builder, ValType::I64)?,
+ )
+ } else if builder.types_on_stack(&[ValType::I32, ValType::I32, ValType::I32]) {
+ builder.pop_operands(&[ValType::I32, ValType::I32, ValType::I32]);
+ (
+ memory_index(u, builder, ValType::I32)?,
+ memory_index(u, builder, ValType::I32)?,
+ )
+ } else if builder.types_on_stack(&[ValType::I64, ValType::I32, ValType::I32]) {
+ builder.pop_operands(&[ValType::I64, ValType::I32, ValType::I32]);
+ (
+ memory_index(u, builder, ValType::I32)?,
+ memory_index(u, builder, ValType::I64)?,
+ )
+ } else if builder.types_on_stack(&[ValType::I32, ValType::I64, ValType::I32]) {
+ builder.pop_operands(&[ValType::I32, ValType::I64, ValType::I32]);
+ (
+ memory_index(u, builder, ValType::I64)?,
+ memory_index(u, builder, ValType::I32)?,
+ )
+ } else {
+ unreachable!()
+ };
+ instructions.push(Instruction::MemoryCopy { dst_mem, src_mem });
+ Ok(())
+}
+
+#[inline]
+fn data_drop_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ have_data(module, builder) && module.config.bulk_memory_enabled()
+}
+
+fn data_drop(
+ u: &mut Unstructured,
+ module: &Module,
+ _builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ instructions.push(Instruction::DataDrop(data_index(u, module)?));
+ Ok(())
+}
+
+fn i32_const(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let x = u.arbitrary()?;
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Const(x));
+ Ok(())
+}
+
+fn i64_const(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let x = u.arbitrary()?;
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Const(x));
+ Ok(())
+}
+
+fn f32_const(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let x = u.arbitrary()?;
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Const(x));
+ Ok(())
+}
+
+fn f64_const(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let x = u.arbitrary()?;
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Const(x));
+ Ok(())
+}
+
+#[inline]
+fn i32_on_stack(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.type_on_stack(ValType::I32)
+}
+
+fn i32_eqz(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Eqz);
+ Ok(())
+}
+
+#[inline]
+fn i32_i32_on_stack(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(&[ValType::I32, ValType::I32])
+}
+
+fn i32_eq(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Eq);
+ Ok(())
+}
+
+fn i32_ne(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Ne);
+ Ok(())
+}
+
+fn i32_lt_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32LtS);
+ Ok(())
+}
+
+fn i32_lt_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32LtU);
+ Ok(())
+}
+
+fn i32_gt_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32GtS);
+ Ok(())
+}
+
+fn i32_gt_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32GtU);
+ Ok(())
+}
+
+fn i32_le_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32LeS);
+ Ok(())
+}
+
+fn i32_le_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32LeU);
+ Ok(())
+}
+
+fn i32_ge_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32GeS);
+ Ok(())
+}
+
+fn i32_ge_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32GeU);
+ Ok(())
+}
+
+#[inline]
+fn i64_on_stack(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(&[ValType::I64])
+}
+
+fn i64_eqz(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64Eqz);
+ Ok(())
+}
+
+#[inline]
+fn i64_i64_on_stack(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(&[ValType::I64, ValType::I64])
+}
+
+fn i64_eq(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64Eq);
+ Ok(())
+}
+
+fn i64_ne(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64Ne);
+ Ok(())
+}
+
+fn i64_lt_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64LtS);
+ Ok(())
+}
+
+fn i64_lt_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64LtU);
+ Ok(())
+}
+
+fn i64_gt_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64GtS);
+ Ok(())
+}
+
+fn i64_gt_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64GtU);
+ Ok(())
+}
+
+fn i64_le_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64LeS);
+ Ok(())
+}
+
+fn i64_le_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64LeU);
+ Ok(())
+}
+
+fn i64_ge_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64GeS);
+ Ok(())
+}
+
+fn i64_ge_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64GeU);
+ Ok(())
+}
+
+fn f32_f32_on_stack(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(&[ValType::F32, ValType::F32])
+}
+
+fn f32_eq(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Eq);
+ Ok(())
+}
+
+fn f32_ne(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Ne);
+ Ok(())
+}
+
+fn f32_lt(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Lt);
+ Ok(())
+}
+
+fn f32_gt(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Gt);
+ Ok(())
+}
+
+fn f32_le(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Le);
+ Ok(())
+}
+
+fn f32_ge(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Ge);
+ Ok(())
+}
+
+fn f64_f64_on_stack(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(&[ValType::F64, ValType::F64])
+}
+
+fn f64_eq(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Eq);
+ Ok(())
+}
+
+fn f64_ne(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Ne);
+ Ok(())
+}
+
+fn f64_lt(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Lt);
+ Ok(())
+}
+
+fn f64_gt(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Gt);
+ Ok(())
+}
+
+fn f64_le(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Le);
+ Ok(())
+}
+
+fn f64_ge(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Ge);
+ Ok(())
+}
+
+fn i32_clz(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Clz);
+ Ok(())
+}
+
+fn i32_ctz(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Ctz);
+ Ok(())
+}
+
+fn i32_popcnt(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Popcnt);
+ Ok(())
+}
+
+fn i32_add(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Add);
+ Ok(())
+}
+
+fn i32_sub(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Sub);
+ Ok(())
+}
+
+fn i32_mul(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Mul);
+ Ok(())
+}
+
+fn i32_div_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps() {
+ no_traps::signed_div_rem(Instruction::I32DivS, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32DivS);
+ }
+ Ok(())
+}
+
+fn i32_div_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps() {
+ no_traps::unsigned_div_rem(Instruction::I32DivU, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32DivU);
+ }
+ Ok(())
+}
+
+fn i32_rem_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps() {
+ no_traps::signed_div_rem(Instruction::I32RemS, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32RemS);
+ }
+ Ok(())
+}
+
+fn i32_rem_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps() {
+ no_traps::unsigned_div_rem(Instruction::I32RemU, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32RemU);
+ }
+ Ok(())
+}
+
+fn i32_and(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32And);
+ Ok(())
+}
+
+fn i32_or(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Or);
+ Ok(())
+}
+
+fn i32_xor(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Xor);
+ Ok(())
+}
+
+fn i32_shl(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Shl);
+ Ok(())
+}
+
+fn i32_shr_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32ShrS);
+ Ok(())
+}
+
+fn i32_shr_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32ShrU);
+ Ok(())
+}
+
+fn i32_rotl(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Rotl);
+ Ok(())
+}
+
+fn i32_rotr(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Rotr);
+ Ok(())
+}
+
+fn i64_clz(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Clz);
+ Ok(())
+}
+
+fn i64_ctz(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Ctz);
+ Ok(())
+}
+
+fn i64_popcnt(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Popcnt);
+ Ok(())
+}
+
+fn i64_add(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Add);
+ Ok(())
+}
+
+fn i64_sub(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Sub);
+ Ok(())
+}
+
+fn i64_mul(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Mul);
+ Ok(())
+}
+
+fn i64_div_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps() {
+ no_traps::signed_div_rem(Instruction::I64DivS, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64DivS);
+ }
+ Ok(())
+}
+
+fn i64_div_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps() {
+ no_traps::unsigned_div_rem(Instruction::I64DivU, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64DivU);
+ }
+ Ok(())
+}
+
+fn i64_rem_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps() {
+ no_traps::signed_div_rem(Instruction::I64RemS, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64RemS);
+ }
+ Ok(())
+}
+
+fn i64_rem_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps() {
+ no_traps::unsigned_div_rem(Instruction::I64RemU, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64RemU);
+ }
+ Ok(())
+}
+
+fn i64_and(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64And);
+ Ok(())
+}
+
+fn i64_or(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Or);
+ Ok(())
+}
+
+fn i64_xor(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Xor);
+ Ok(())
+}
+
+fn i64_shl(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Shl);
+ Ok(())
+}
+
+fn i64_shr_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ShrS);
+ Ok(())
+}
+
+fn i64_shr_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ShrU);
+ Ok(())
+}
+
+fn i64_rotl(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Rotl);
+ Ok(())
+}
+
+fn i64_rotr(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Rotr);
+ Ok(())
+}
+
+#[inline]
+fn f32_on_stack(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(&[ValType::F32])
+}
+
+fn f32_abs(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Abs);
+ Ok(())
+}
+
+fn f32_neg(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Neg);
+ Ok(())
+}
+
+fn f32_ceil(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Ceil);
+ Ok(())
+}
+
+fn f32_floor(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Floor);
+ Ok(())
+}
+
+fn f32_trunc(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Trunc);
+ Ok(())
+}
+
+fn f32_nearest(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Nearest);
+ Ok(())
+}
+
+fn f32_sqrt(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Sqrt);
+ Ok(())
+}
+
+fn f32_add(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Add);
+ Ok(())
+}
+
+fn f32_sub(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Sub);
+ Ok(())
+}
+
+fn f32_mul(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Mul);
+ Ok(())
+}
+
+fn f32_div(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Div);
+ Ok(())
+}
+
+fn f32_min(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Min);
+ Ok(())
+}
+
+fn f32_max(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Max);
+ Ok(())
+}
+
+fn f32_copysign(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Copysign);
+ Ok(())
+}
+
+#[inline]
+fn f64_on_stack(_: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(&[ValType::F64])
+}
+
+fn f64_abs(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Abs);
+ Ok(())
+}
+
+fn f64_neg(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Neg);
+ Ok(())
+}
+
+fn f64_ceil(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Ceil);
+ Ok(())
+}
+
+fn f64_floor(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Floor);
+ Ok(())
+}
+
+fn f64_trunc(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Trunc);
+ Ok(())
+}
+
+fn f64_nearest(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Nearest);
+ Ok(())
+}
+
+fn f64_sqrt(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Sqrt);
+ Ok(())
+}
+
+fn f64_add(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Add);
+ Ok(())
+}
+
+fn f64_sub(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Sub);
+ Ok(())
+}
+
+fn f64_mul(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Mul);
+ Ok(())
+}
+
+fn f64_div(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Div);
+ Ok(())
+}
+
+fn f64_min(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Min);
+ Ok(())
+}
+
+fn f64_max(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Max);
+ Ok(())
+}
+
+fn f64_copysign(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Copysign);
+ Ok(())
+}
+
+fn i32_wrap_i64(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32WrapI64);
+ Ok(())
+}
+
+fn nontrapping_f32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.saturating_float_to_int_enabled() && f32_on_stack(module, builder)
+}
+
+fn i32_trunc_f32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps() {
+ no_traps::trunc(Instruction::I32TruncF32S, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32TruncF32S);
+ }
+ Ok(())
+}
+
+fn i32_trunc_f32_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps() {
+ no_traps::trunc(Instruction::I32TruncF32U, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32TruncF32U);
+ }
+ Ok(())
+}
+
+fn nontrapping_f64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.saturating_float_to_int_enabled() && f64_on_stack(module, builder)
+}
+
+fn i32_trunc_f64_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps() {
+ no_traps::trunc(Instruction::I32TruncF64S, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32TruncF64S);
+ }
+ Ok(())
+}
+
+fn i32_trunc_f64_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps() {
+ no_traps::trunc(Instruction::I32TruncF64U, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32TruncF64U);
+ }
+ Ok(())
+}
+
+fn i64_extend_i32_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ExtendI32S);
+ Ok(())
+}
+
+fn i64_extend_i32_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ExtendI32U);
+ Ok(())
+}
+
+fn i64_trunc_f32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps() {
+ no_traps::trunc(Instruction::I64TruncF32S, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64TruncF32S);
+ }
+ Ok(())
+}
+
+fn i64_trunc_f32_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps() {
+ no_traps::trunc(Instruction::I64TruncF32U, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64TruncF32U);
+ }
+ Ok(())
+}
+
+fn i64_trunc_f64_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps() {
+ no_traps::trunc(Instruction::I64TruncF64S, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64TruncF64S);
+ }
+ Ok(())
+}
+
+fn i64_trunc_f64_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps() {
+ no_traps::trunc(Instruction::I64TruncF64U, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64TruncF64U);
+ }
+ Ok(())
+}
+
+fn f32_convert_i32_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ConvertI32S);
+ Ok(())
+}
+
+fn f32_convert_i32_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ConvertI32U);
+ Ok(())
+}
+
+fn f32_convert_i64_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ConvertI64S);
+ Ok(())
+}
+
+fn f32_convert_i64_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ConvertI64U);
+ Ok(())
+}
+
+fn f32_demote_f64(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32DemoteF64);
+ Ok(())
+}
+
+fn f64_convert_i32_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ConvertI32S);
+ Ok(())
+}
+
+fn f64_convert_i32_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ConvertI32U);
+ Ok(())
+}
+
+fn f64_convert_i64_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ConvertI64S);
+ Ok(())
+}
+
+fn f64_convert_i64_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ConvertI64U);
+ Ok(())
+}
+
+fn f64_promote_f32(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64PromoteF32);
+ Ok(())
+}
+
+fn i32_reinterpret_f32(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32ReinterpretF32);
+ Ok(())
+}
+
+fn i64_reinterpret_f64(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ReinterpretF64);
+ Ok(())
+}
+
+fn f32_reinterpret_i32(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ReinterpretI32);
+ Ok(())
+}
+
+fn f64_reinterpret_i64(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ReinterpretI64);
+ Ok(())
+}
+
+fn extendable_i32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.sign_extension_ops_enabled() && i32_on_stack(module, builder)
+}
+
+fn i32_extend_8_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Extend8S);
+ Ok(())
+}
+
+fn i32_extend_16_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Extend16S);
+ Ok(())
+}
+
+fn extendable_i64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.sign_extension_ops_enabled() && i64_on_stack(module, builder)
+}
+
+fn i64_extend_8_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Extend8S);
+ Ok(())
+}
+
+fn i64_extend_16_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Extend16S);
+ Ok(())
+}
+
+fn i64_extend_32_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Extend32S);
+ Ok(())
+}
+
+fn i32_trunc_sat_f32_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32TruncSatF32S);
+ Ok(())
+}
+
+fn i32_trunc_sat_f32_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32TruncSatF32U);
+ Ok(())
+}
+
+fn i32_trunc_sat_f64_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32TruncSatF64S);
+ Ok(())
+}
+
+fn i32_trunc_sat_f64_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32TruncSatF64U);
+ Ok(())
+}
+
+fn i64_trunc_sat_f32_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64TruncSatF32S);
+ Ok(())
+}
+
+fn i64_trunc_sat_f32_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F32]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64TruncSatF32U);
+ Ok(())
+}
+
+fn i64_trunc_sat_f64_s(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64TruncSatF64S);
+ Ok(())
+}
+
+fn i64_trunc_sat_f64_u(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64TruncSatF64U);
+ Ok(())
+}
+
+fn memory_offset(u: &mut Unstructured, module: &Module, memory_index: u32) -> Result<u64> {
+ let (a, b, c) = module.config.memory_offset_choices();
+ assert!(a + b + c != 0);
+
+ let memory_type = &module.memories[memory_index as usize];
+ let min = memory_type.minimum.saturating_mul(65536);
+ let max = memory_type
+ .maximum
+ .map(|max| max.saturating_mul(65536))
+ .unwrap_or(u64::MAX);
+
+ let (min, max, true_max) = match (memory_type.memory64, module.config.disallow_traps()) {
+ (true, false) => {
+ // 64-bit memories can use the limits calculated above as-is
+ (min, max, u64::MAX)
+ }
+ (false, false) => {
+ // 32-bit memories can't represent a full 4gb offset, so if that's the
+ // min/max sizes then we need to switch the m to `u32::MAX`.
+ (
+ u64::from(u32::try_from(min).unwrap_or(u32::MAX)),
+ u64::from(u32::try_from(max).unwrap_or(u32::MAX)),
+ u64::from(u32::MAX),
+ )
+ }
+ // The logic for non-trapping versions of load/store involves pushing
+ // the offset + load/store size onto the stack as either an i32 or i64
+ // value. So even though offsets can normally be as high as u32 or u64,
+ // we need to limit them to lower in order for our non-trapping logic to
+ // work. 16 is the number of bytes of the largest load type (V128).
+ (true, true) => {
+ let no_trap_max = (i64::MAX - 16) as u64;
+ (min.min(no_trap_max), no_trap_max, no_trap_max)
+ }
+ (false, true) => {
+ let no_trap_max = (i32::MAX - 16) as u64;
+ (min.min(no_trap_max), no_trap_max, no_trap_max)
+ }
+ };
+
+ let choice = u.int_in_range(0..=a + b + c - 1)?;
+ if choice < a {
+ u.int_in_range(0..=min)
+ } else if choice < a + b {
+ u.int_in_range(min..=max)
+ } else {
+ u.int_in_range(max..=true_max)
+ }
+}
+
+fn mem_arg(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ alignments: &[u32],
+) -> Result<MemArg> {
+ let memory_index = if builder.type_on_stack(ValType::I32) {
+ builder.pop_operands(&[ValType::I32]);
+ memory_index(u, builder, ValType::I32)?
+ } else {
+ builder.pop_operands(&[ValType::I64]);
+ memory_index(u, builder, ValType::I64)?
+ };
+ let offset = memory_offset(u, module, memory_index)?;
+ let align = *u.choose(alignments)?;
+ Ok(MemArg {
+ memory_index,
+ offset,
+ align,
+ })
+}
+
+fn memory_index(u: &mut Unstructured, builder: &CodeBuilder, ty: ValType) -> Result<u32> {
+ if ty == ValType::I32 {
+ Ok(*u.choose(&builder.allocs.memory32)?)
+ } else {
+ Ok(*u.choose(&builder.allocs.memory64)?)
+ }
+}
+
+fn data_index(u: &mut Unstructured, module: &Module) -> Result<u32> {
+ let data = module.data.len() as u32;
+ assert!(data > 0);
+ if data == 1 {
+ Ok(0)
+ } else {
+ u.int_in_range(0..=data - 1)
+ }
+}
+
+#[inline]
+fn ref_null_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled()
+}
+
+fn ref_null(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ty = *u.choose(&[RefType::EXTERNREF, RefType::FUNCREF])?;
+ builder.push_operands(&[ty.into()]);
+ instructions.push(Instruction::RefNull(ty.heap_type));
+ Ok(())
+}
+
+#[inline]
+fn ref_func_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled() && builder.allocs.referenced_functions.len() > 0
+}
+
+fn ref_func(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let i = *u.choose(&builder.allocs.referenced_functions)?;
+ builder.push_operands(&[ValType::FUNCREF]);
+ instructions.push(Instruction::RefFunc(i));
+ Ok(())
+}
+
+#[inline]
+fn ref_is_null_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled()
+ && (builder.type_on_stack(ValType::EXTERNREF) || builder.type_on_stack(ValType::FUNCREF))
+}
+
+fn ref_is_null(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ pop_reference_type(builder);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::RefIsNull);
+ Ok(())
+}
+
+#[inline]
+fn table_fill_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled()
+ && module.config.bulk_memory_enabled()
+ && !module.config.disallow_traps() // Non-trapping table fill generation not yet implemented
+ && [ValType::EXTERNREF, ValType::FUNCREF].iter().any(|ty| {
+ builder.types_on_stack(&[ValType::I32, *ty, ValType::I32])
+ && module.tables.iter().any(|t| *ty == t.element_type.into())
+ })
+}
+
+fn table_fill(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ let ty = pop_reference_type(builder);
+ builder.pop_operands(&[ValType::I32]);
+ let table = table_index(ty, u, module)?;
+ instructions.push(Instruction::TableFill(table));
+ Ok(())
+}
+
+#[inline]
+fn table_set_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled()
+ && !module.config.disallow_traps() // Non-trapping table.set generation not yet implemented
+ && [ValType::EXTERNREF, ValType::FUNCREF].iter().any(|ty| {
+ builder.types_on_stack(&[ValType::I32, *ty])
+ && module.tables.iter().any(|t| *ty == t.element_type.into())
+ })
+}
+
+fn table_set(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ty = pop_reference_type(builder);
+ builder.pop_operands(&[ValType::I32]);
+ let table = table_index(ty, u, module)?;
+ instructions.push(Instruction::TableSet(table));
+ Ok(())
+}
+
+#[inline]
+fn table_get_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled()
+ && !module.config.disallow_traps() // Non-trapping table.get generation not yet implemented
+ && builder.type_on_stack(ValType::I32)
+ && module.tables.len() > 0
+}
+
+fn table_get(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ let idx = u.int_in_range(0..=module.tables.len() - 1)?;
+ let ty = module.tables[idx].element_type;
+ builder.push_operands(&[ty.into()]);
+ instructions.push(Instruction::TableGet(idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn table_size_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled() && module.tables.len() > 0
+}
+
+fn table_size(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let table = u.int_in_range(0..=module.tables.len() - 1)? as u32;
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::TableSize(table));
+ Ok(())
+}
+
+#[inline]
+fn table_grow_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled()
+ && [ValType::EXTERNREF, ValType::FUNCREF].iter().any(|ty| {
+ builder.types_on_stack(&[*ty, ValType::I32])
+ && module.tables.iter().any(|t| *ty == t.element_type.into())
+ })
+}
+
+fn table_grow(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32]);
+ let ty = pop_reference_type(builder);
+ let table = table_index(ty, u, module)?;
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::TableGrow(table));
+ Ok(())
+}
+
+#[inline]
+fn table_copy_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled()
+ && !module.config.disallow_traps() // Non-trapping table.copy generation not yet implemented
+ && module.tables.len() > 0
+ && builder.types_on_stack(&[ValType::I32, ValType::I32, ValType::I32])
+}
+
+fn table_copy(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32, ValType::I32]);
+ let src_table = u.int_in_range(0..=module.tables.len() - 1)? as u32;
+ let dst_table = table_index(module.tables[src_table as usize].element_type, u, module)?;
+ instructions.push(Instruction::TableCopy {
+ src_table,
+ dst_table,
+ });
+ Ok(())
+}
+
+#[inline]
+fn table_init_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled()
+ && !module.config.disallow_traps() // Non-trapping table.init generation not yet implemented.
+ && builder.allocs.table_init_possible
+ && builder.types_on_stack(&[ValType::I32, ValType::I32, ValType::I32])
+}
+
+fn table_init(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::I32, ValType::I32, ValType::I32]);
+ let segments = module
+ .elems
+ .iter()
+ .enumerate()
+ .filter(|(_, e)| module.tables.iter().any(|t| t.element_type == e.ty))
+ .map(|(i, _)| i)
+ .collect::<Vec<_>>();
+ let segment = *u.choose(&segments)?;
+ let table = table_index(module.elems[segment].ty, u, module)?;
+ instructions.push(Instruction::TableInit {
+ elem_index: segment as u32,
+ table,
+ });
+ Ok(())
+}
+
+#[inline]
+fn elem_drop_valid(module: &Module, _builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled() && module.elems.len() > 0
+}
+
+fn elem_drop(
+ u: &mut Unstructured,
+ module: &Module,
+ _builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let segment = u.int_in_range(0..=module.elems.len() - 1)? as u32;
+ instructions.push(Instruction::ElemDrop(segment));
+ Ok(())
+}
+
+fn pop_reference_type(builder: &mut CodeBuilder) -> RefType {
+ if builder.type_on_stack(ValType::EXTERNREF) {
+ builder.pop_operands(&[ValType::EXTERNREF]);
+ RefType::EXTERNREF
+ } else {
+ builder.pop_operands(&[ValType::FUNCREF]);
+ RefType::FUNCREF
+ }
+}
+
+fn table_index(ty: RefType, u: &mut Unstructured, module: &Module) -> Result<u32> {
+ let tables = module
+ .tables
+ .iter()
+ .enumerate()
+ .filter(|(_, t)| t.element_type == ty)
+ .map(|t| t.0 as u32)
+ .collect::<Vec<_>>();
+ Ok(*u.choose(&tables)?)
+}
+
+fn lane_index(u: &mut Unstructured, number_of_lanes: u8) -> Result<u8> {
+ u.int_in_range(0..=(number_of_lanes - 1))
+}
+
+#[inline]
+fn simd_v128_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.types_on_stack(&[ValType::V128])
+}
+
+#[inline]
+fn simd_v128_on_stack_relaxed(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.relaxed_simd_enabled()
+ && builder.types_on_stack(&[ValType::V128])
+}
+
+#[inline]
+fn simd_v128_v128_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.types_on_stack(&[ValType::V128, ValType::V128])
+}
+
+#[inline]
+fn simd_v128_v128_on_stack_relaxed(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.relaxed_simd_enabled()
+ && builder.types_on_stack(&[ValType::V128, ValType::V128])
+}
+
+#[inline]
+fn simd_v128_v128_v128_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.types_on_stack(&[ValType::V128, ValType::V128, ValType::V128])
+}
+
+#[inline]
+fn simd_v128_v128_v128_on_stack_relaxed(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.relaxed_simd_enabled()
+ && builder.types_on_stack(&[ValType::V128, ValType::V128, ValType::V128])
+}
+
+#[inline]
+fn simd_v128_i32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.types_on_stack(&[ValType::V128, ValType::I32])
+}
+
+#[inline]
+fn simd_v128_i64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.types_on_stack(&[ValType::V128, ValType::I64])
+}
+
+#[inline]
+fn simd_v128_f32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.types_on_stack(&[ValType::V128, ValType::F32])
+}
+
+#[inline]
+fn simd_v128_f64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.types_on_stack(&[ValType::V128, ValType::F64])
+}
+
+#[inline]
+fn simd_i32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.type_on_stack(ValType::I32)
+}
+
+#[inline]
+fn simd_i64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.type_on_stack(ValType::I64)
+}
+
+#[inline]
+fn simd_f32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.type_on_stack(ValType::F32)
+}
+
+#[inline]
+fn simd_f64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && builder.type_on_stack(ValType::F64)
+}
+
+#[inline]
+fn simd_have_memory_and_offset(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && have_memory_and_offset(module, builder)
+}
+
+#[inline]
+fn simd_have_memory_and_offset_and_v128(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && store_valid(module, builder, || ValType::V128)
+}
+
+#[inline]
+fn simd_load_lane_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ // The SIMD non-trapping case is not yet implemented.
+ !module.config.disallow_traps() && simd_have_memory_and_offset_and_v128(module, builder)
+}
+
+#[inline]
+fn simd_v128_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps()
+ && module.config.simd_enabled()
+ && store_valid(module, builder, || ValType::V128)
+}
+
+#[inline]
+fn simd_store_lane_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ // The SIMD non-trapping case is not yet implemented.
+ !module.config.disallow_traps() && simd_v128_store_valid(module, builder)
+}
+
+#[inline]
+fn simd_enabled(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.config.simd_enabled()
+}
+
+macro_rules! simd_load {
+ ($instruction:ident, $generator_fn_name:ident, $alignments:expr) => {
+ fn $generator_fn_name(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, $alignments)?;
+ builder.push_operands(&[ValType::V128]);
+ if module.config.disallow_traps() {
+ no_traps::load(
+ Instruction::$instruction(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::$instruction(memarg));
+ }
+ Ok(())
+ }
+ };
+}
+
+simd_load!(V128Load, v128_load, &[0, 1, 2, 3, 4]);
+simd_load!(V128Load8x8S, v128_load8x8s, &[0, 1, 2, 3]);
+simd_load!(V128Load8x8U, v128_load8x8u, &[0, 1, 2, 3]);
+simd_load!(V128Load16x4S, v128_load16x4s, &[0, 1, 2, 3]);
+simd_load!(V128Load16x4U, v128_load16x4u, &[0, 1, 2, 3]);
+simd_load!(V128Load32x2S, v128_load32x2s, &[0, 1, 2, 3]);
+simd_load!(V128Load32x2U, v128_load32x2u, &[0, 1, 2, 3]);
+simd_load!(V128Load8Splat, v128_load8_splat, &[0]);
+simd_load!(V128Load16Splat, v128_load16_splat, &[0, 1]);
+simd_load!(V128Load32Splat, v128_load32_splat, &[0, 1, 2]);
+simd_load!(V128Load64Splat, v128_load64_splat, &[0, 1, 2, 3]);
+simd_load!(V128Load32Zero, v128_load32_zero, &[0, 1, 2]);
+simd_load!(V128Load64Zero, v128_load64_zero, &[0, 1, 2, 3]);
+
+fn v128_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::V128]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3, 4])?;
+ if module.config.disallow_traps() {
+ no_traps::store(
+ Instruction::V128Store(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::V128Store(memarg));
+ }
+ Ok(())
+}
+
+macro_rules! simd_load_lane {
+ ($instruction:ident, $generator_fn_name:ident, $alignments:expr, $number_of_lanes:expr) => {
+ fn $generator_fn_name(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(&[ValType::V128]);
+ let memarg = mem_arg(u, module, builder, $alignments)?;
+ builder.push_operands(&[ValType::V128]);
+ instructions.push(Instruction::$instruction {
+ memarg,
+ lane: lane_index(u, $number_of_lanes)?,
+ });
+ Ok(())
+ }
+ };
+}
+
+simd_load_lane!(V128Load8Lane, v128_load8_lane, &[0], 16);
+simd_load_lane!(V128Load16Lane, v128_load16_lane, &[0, 1], 8);
+simd_load_lane!(V128Load32Lane, v128_load32_lane, &[0, 1, 2], 4);
+simd_load_lane!(V128Load64Lane, v128_load64_lane, &[0, 1, 2, 3], 2);
+
+macro_rules! simd_store_lane {
+ ($instruction:ident, $generator_fn_name:ident, $alignments:expr, $number_of_lanes:expr) => {
+ fn $generator_fn_name(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(&[ValType::V128]);
+ let memarg = mem_arg(u, module, builder, $alignments)?;
+ instructions.push(Instruction::$instruction {
+ memarg,
+ lane: lane_index(u, $number_of_lanes)?,
+ });
+ Ok(())
+ }
+ };
+}
+
+simd_store_lane!(V128Store8Lane, v128_store8_lane, &[0], 16);
+simd_store_lane!(V128Store16Lane, v128_store16_lane, &[0, 1], 8);
+simd_store_lane!(V128Store32Lane, v128_store32_lane, &[0, 1, 2], 4);
+simd_store_lane!(V128Store64Lane, v128_store64_lane, &[0, 1, 2, 3], 2);
+
+fn v128_const(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.push_operands(&[ValType::V128]);
+ let c = i128::from_le_bytes(u.arbitrary()?);
+ instructions.push(Instruction::V128Const(c));
+ Ok(())
+}
+
+fn i8x16_shuffle(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(&[ValType::V128, ValType::V128]);
+ builder.push_operands(&[ValType::V128]);
+ let mut lanes = [0; 16];
+ for i in 0..16 {
+ lanes[i] = u.int_in_range(0..=31)?;
+ }
+ instructions.push(Instruction::I8x16Shuffle(lanes));
+ Ok(())
+}
+
+macro_rules! simd_lane_access {
+ ($instruction:ident, $generator_fn_name:ident, $in_types:expr => $out_types:expr, $number_of_lanes:expr) => {
+ fn $generator_fn_name(
+ u: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands($in_types);
+ builder.push_operands($out_types);
+ instructions.push(Instruction::$instruction(lane_index(u, $number_of_lanes)?));
+ Ok(())
+ }
+ };
+}
+
+simd_lane_access!(I8x16ExtractLaneS, i8x16_extract_lane_s, &[ValType::V128] => &[ValType::I32], 16);
+simd_lane_access!(I8x16ExtractLaneU, i8x16_extract_lane_u, &[ValType::V128] => &[ValType::I32], 16);
+simd_lane_access!(I8x16ReplaceLane, i8x16_replace_lane, &[ValType::V128, ValType::I32] => &[ValType::V128], 16);
+simd_lane_access!(I16x8ExtractLaneS, i16x8_extract_lane_s, &[ValType::V128] => &[ValType::I32], 8);
+simd_lane_access!(I16x8ExtractLaneU, i16x8_extract_lane_u, &[ValType::V128] => &[ValType::I32], 8);
+simd_lane_access!(I16x8ReplaceLane, i16x8_replace_lane, &[ValType::V128, ValType::I32] => &[ValType::V128], 8);
+simd_lane_access!(I32x4ExtractLane, i32x4_extract_lane, &[ValType::V128] => &[ValType::I32], 4);
+simd_lane_access!(I32x4ReplaceLane, i32x4_replace_lane, &[ValType::V128, ValType::I32] => &[ValType::V128], 4);
+simd_lane_access!(I64x2ExtractLane, i64x2_extract_lane, &[ValType::V128] => &[ValType::I64], 2);
+simd_lane_access!(I64x2ReplaceLane, i64x2_replace_lane, &[ValType::V128, ValType::I64] => &[ValType::V128], 2);
+simd_lane_access!(F32x4ExtractLane, f32x4_extract_lane, &[ValType::V128] => &[ValType::F32], 4);
+simd_lane_access!(F32x4ReplaceLane, f32x4_replace_lane, &[ValType::V128, ValType::F32] => &[ValType::V128], 4);
+simd_lane_access!(F64x2ExtractLane, f64x2_extract_lane, &[ValType::V128] => &[ValType::F64], 2);
+simd_lane_access!(F64x2ReplaceLane, f64x2_replace_lane, &[ValType::V128, ValType::F64] => &[ValType::V128], 2);
+
+macro_rules! simd_binop {
+ ($instruction:ident, $generator_fn_name:ident) => {
+ fn $generator_fn_name(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(&[ValType::V128, ValType::V128]);
+ builder.push_operands(&[ValType::V128]);
+ instructions.push(Instruction::$instruction);
+ Ok(())
+ }
+ };
+}
+
+macro_rules! simd_unop {
+ ($instruction:ident, $generator_fn_name:ident) => {
+ simd_unop!($instruction, $generator_fn_name, V128 -> V128);
+ };
+
+ ($instruction:ident, $generator_fn_name:ident, $in_type:ident -> $out_type:ident) => {
+ fn $generator_fn_name(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>, ) -> Result<()> {
+ builder.pop_operands(&[ValType::$in_type]);
+ builder.push_operands(&[ValType::$out_type]);
+ instructions.push(Instruction::$instruction);
+ Ok(())
+ }
+ };
+}
+
+macro_rules! simd_ternop {
+ ($instruction:ident, $generator_fn_name:ident) => {
+ fn $generator_fn_name(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(&[ValType::V128, ValType::V128, ValType::V128]);
+ builder.push_operands(&[ValType::V128]);
+ instructions.push(Instruction::$instruction);
+ Ok(())
+ }
+ };
+}
+
+macro_rules! simd_shift {
+ ($instruction:ident, $generator_fn_name:ident) => {
+ fn $generator_fn_name(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(&[ValType::V128, ValType::I32]);
+ builder.push_operands(&[ValType::V128]);
+ instructions.push(Instruction::$instruction);
+ Ok(())
+ }
+ };
+}
+
+simd_unop!(I8x16Splat, i8x16_splat, I32 -> V128);
+simd_unop!(I16x8Splat, i16x8_splat, I32 -> V128);
+simd_unop!(I32x4Splat, i32x4_splat, I32 -> V128);
+simd_unop!(I64x2Splat, i64x2_splat, I64 -> V128);
+simd_unop!(F32x4Splat, f32x4_splat, F32 -> V128);
+simd_unop!(F64x2Splat, f64x2_splat, F64 -> V128);
+simd_binop!(I8x16Swizzle, i8x16_swizzle);
+simd_binop!(I8x16Eq, i8x16_eq);
+simd_binop!(I8x16Ne, i8x16_ne);
+simd_binop!(I8x16LtS, i8x16_lt_s);
+simd_binop!(I8x16LtU, i8x16_lt_u);
+simd_binop!(I8x16GtS, i8x16_gt_s);
+simd_binop!(I8x16GtU, i8x16_gt_u);
+simd_binop!(I8x16LeS, i8x16_le_s);
+simd_binop!(I8x16LeU, i8x16_le_u);
+simd_binop!(I8x16GeS, i8x16_ge_s);
+simd_binop!(I8x16GeU, i8x16_ge_u);
+simd_binop!(I16x8Eq, i16x8_eq);
+simd_binop!(I16x8Ne, i16x8_ne);
+simd_binop!(I16x8LtS, i16x8_lt_s);
+simd_binop!(I16x8LtU, i16x8_lt_u);
+simd_binop!(I16x8GtS, i16x8_gt_s);
+simd_binop!(I16x8GtU, i16x8_gt_u);
+simd_binop!(I16x8LeS, i16x8_le_s);
+simd_binop!(I16x8LeU, i16x8_le_u);
+simd_binop!(I16x8GeS, i16x8_ge_s);
+simd_binop!(I16x8GeU, i16x8_ge_u);
+simd_binop!(I32x4Eq, i32x4_eq);
+simd_binop!(I32x4Ne, i32x4_ne);
+simd_binop!(I32x4LtS, i32x4_lt_s);
+simd_binop!(I32x4LtU, i32x4_lt_u);
+simd_binop!(I32x4GtS, i32x4_gt_s);
+simd_binop!(I32x4GtU, i32x4_gt_u);
+simd_binop!(I32x4LeS, i32x4_le_s);
+simd_binop!(I32x4LeU, i32x4_le_u);
+simd_binop!(I32x4GeS, i32x4_ge_s);
+simd_binop!(I32x4GeU, i32x4_ge_u);
+simd_binop!(I64x2Eq, i64x2_eq);
+simd_binop!(I64x2Ne, i64x2_ne);
+simd_binop!(I64x2LtS, i64x2_lt_s);
+simd_binop!(I64x2GtS, i64x2_gt_s);
+simd_binop!(I64x2LeS, i64x2_le_s);
+simd_binop!(I64x2GeS, i64x2_ge_s);
+simd_binop!(F32x4Eq, f32x4_eq);
+simd_binop!(F32x4Ne, f32x4_ne);
+simd_binop!(F32x4Lt, f32x4_lt);
+simd_binop!(F32x4Gt, f32x4_gt);
+simd_binop!(F32x4Le, f32x4_le);
+simd_binop!(F32x4Ge, f32x4_ge);
+simd_binop!(F64x2Eq, f64x2_eq);
+simd_binop!(F64x2Ne, f64x2_ne);
+simd_binop!(F64x2Lt, f64x2_lt);
+simd_binop!(F64x2Gt, f64x2_gt);
+simd_binop!(F64x2Le, f64x2_le);
+simd_binop!(F64x2Ge, f64x2_ge);
+simd_unop!(V128Not, v128_not);
+simd_binop!(V128And, v128_and);
+simd_binop!(V128AndNot, v128_and_not);
+simd_binop!(V128Or, v128_or);
+simd_binop!(V128Xor, v128_xor);
+simd_unop!(V128AnyTrue, v128_any_true, V128 -> I32);
+simd_unop!(I8x16Abs, i8x16_abs);
+simd_unop!(I8x16Neg, i8x16_neg);
+simd_unop!(I8x16Popcnt, i8x16_popcnt);
+simd_unop!(I8x16AllTrue, i8x16_all_true, V128 -> I32);
+simd_unop!(I8x16Bitmask, i8x16_bitmask, V128 -> I32);
+simd_binop!(I8x16NarrowI16x8S, i8x16_narrow_i16x8s);
+simd_binop!(I8x16NarrowI16x8U, i8x16_narrow_i16x8u);
+simd_shift!(I8x16Shl, i8x16_shl);
+simd_shift!(I8x16ShrS, i8x16_shr_s);
+simd_shift!(I8x16ShrU, i8x16_shr_u);
+simd_binop!(I8x16Add, i8x16_add);
+simd_binop!(I8x16AddSatS, i8x16_add_sat_s);
+simd_binop!(I8x16AddSatU, i8x16_add_sat_u);
+simd_binop!(I8x16Sub, i8x16_sub);
+simd_binop!(I8x16SubSatS, i8x16_sub_sat_s);
+simd_binop!(I8x16SubSatU, i8x16_sub_sat_u);
+simd_binop!(I8x16MinS, i8x16_min_s);
+simd_binop!(I8x16MinU, i8x16_min_u);
+simd_binop!(I8x16MaxS, i8x16_max_s);
+simd_binop!(I8x16MaxU, i8x16_max_u);
+simd_binop!(I8x16AvgrU, i8x16_avgr_u);
+simd_unop!(I16x8ExtAddPairwiseI8x16S, i16x8_extadd_pairwise_i8x16s);
+simd_unop!(I16x8ExtAddPairwiseI8x16U, i16x8_extadd_pairwise_i8x16u);
+simd_unop!(I16x8Abs, i16x8_abs);
+simd_unop!(I16x8Neg, i16x8_neg);
+simd_binop!(I16x8Q15MulrSatS, i16x8q15_mulr_sat_s);
+simd_unop!(I16x8AllTrue, i16x8_all_true, V128 -> I32);
+simd_unop!(I16x8Bitmask, i16x8_bitmask, V128 -> I32);
+simd_binop!(I16x8NarrowI32x4S, i16x8_narrow_i32x4s);
+simd_binop!(I16x8NarrowI32x4U, i16x8_narrow_i32x4u);
+simd_unop!(I16x8ExtendLowI8x16S, i16x8_extend_low_i8x16s);
+simd_unop!(I16x8ExtendHighI8x16S, i16x8_extend_high_i8x16s);
+simd_unop!(I16x8ExtendLowI8x16U, i16x8_extend_low_i8x16u);
+simd_unop!(I16x8ExtendHighI8x16U, i16x8_extend_high_i8x16u);
+simd_shift!(I16x8Shl, i16x8_shl);
+simd_shift!(I16x8ShrS, i16x8_shr_s);
+simd_shift!(I16x8ShrU, i16x8_shr_u);
+simd_binop!(I16x8Add, i16x8_add);
+simd_binop!(I16x8AddSatS, i16x8_add_sat_s);
+simd_binop!(I16x8AddSatU, i16x8_add_sat_u);
+simd_binop!(I16x8Sub, i16x8_sub);
+simd_binop!(I16x8SubSatS, i16x8_sub_sat_s);
+simd_binop!(I16x8SubSatU, i16x8_sub_sat_u);
+simd_binop!(I16x8Mul, i16x8_mul);
+simd_binop!(I16x8MinS, i16x8_min_s);
+simd_binop!(I16x8MinU, i16x8_min_u);
+simd_binop!(I16x8MaxS, i16x8_max_s);
+simd_binop!(I16x8MaxU, i16x8_max_u);
+simd_binop!(I16x8AvgrU, i16x8_avgr_u);
+simd_binop!(I16x8ExtMulLowI8x16S, i16x8_extmul_low_i8x16s);
+simd_binop!(I16x8ExtMulHighI8x16S, i16x8_extmul_high_i8x16s);
+simd_binop!(I16x8ExtMulLowI8x16U, i16x8_extmul_low_i8x16u);
+simd_binop!(I16x8ExtMulHighI8x16U, i16x8_extmul_high_i8x16u);
+simd_unop!(I32x4ExtAddPairwiseI16x8S, i32x4_extadd_pairwise_i16x8s);
+simd_unop!(I32x4ExtAddPairwiseI16x8U, i32x4_extadd_pairwise_i16x8u);
+simd_unop!(I32x4Abs, i32x4_abs);
+simd_unop!(I32x4Neg, i32x4_neg);
+simd_unop!(I32x4AllTrue, i32x4_all_true, V128 -> I32);
+simd_unop!(I32x4Bitmask, i32x4_bitmask, V128 -> I32);
+simd_unop!(I32x4ExtendLowI16x8S, i32x4_extend_low_i16x8s);
+simd_unop!(I32x4ExtendHighI16x8S, i32x4_extend_high_i16x8s);
+simd_unop!(I32x4ExtendLowI16x8U, i32x4_extend_low_i16x8u);
+simd_unop!(I32x4ExtendHighI16x8U, i32x4_extend_high_i16x8u);
+simd_shift!(I32x4Shl, i32x4_shl);
+simd_shift!(I32x4ShrS, i32x4_shr_s);
+simd_shift!(I32x4ShrU, i32x4_shr_u);
+simd_binop!(I32x4Add, i32x4_add);
+simd_binop!(I32x4Sub, i32x4_sub);
+simd_binop!(I32x4Mul, i32x4_mul);
+simd_binop!(I32x4MinS, i32x4_min_s);
+simd_binop!(I32x4MinU, i32x4_min_u);
+simd_binop!(I32x4MaxS, i32x4_max_s);
+simd_binop!(I32x4MaxU, i32x4_max_u);
+simd_binop!(I32x4DotI16x8S, i32x4_dot_i16x8s);
+simd_binop!(I32x4ExtMulLowI16x8S, i32x4_extmul_low_i16x8s);
+simd_binop!(I32x4ExtMulHighI16x8S, i32x4_extmul_high_i16x8s);
+simd_binop!(I32x4ExtMulLowI16x8U, i32x4_extmul_low_i16x8u);
+simd_binop!(I32x4ExtMulHighI16x8U, i32x4_extmul_high_i16x8u);
+simd_unop!(I64x2Abs, i64x2_abs);
+simd_unop!(I64x2Neg, i64x2_neg);
+simd_unop!(I64x2AllTrue, i64x2_all_true, V128 -> I32);
+simd_unop!(I64x2Bitmask, i64x2_bitmask, V128 -> I32);
+simd_unop!(I64x2ExtendLowI32x4S, i64x2_extend_low_i32x4s);
+simd_unop!(I64x2ExtendHighI32x4S, i64x2_extend_high_i32x4s);
+simd_unop!(I64x2ExtendLowI32x4U, i64x2_extend_low_i32x4u);
+simd_unop!(I64x2ExtendHighI32x4U, i64x2_extend_high_i32x4u);
+simd_shift!(I64x2Shl, i64x2_shl);
+simd_shift!(I64x2ShrS, i64x2_shr_s);
+simd_shift!(I64x2ShrU, i64x2_shr_u);
+simd_binop!(I64x2Add, i64x2_add);
+simd_binop!(I64x2Sub, i64x2_sub);
+simd_binop!(I64x2Mul, i64x2_mul);
+simd_binop!(I64x2ExtMulLowI32x4S, i64x2_extmul_low_i32x4s);
+simd_binop!(I64x2ExtMulHighI32x4S, i64x2_extmul_high_i32x4s);
+simd_binop!(I64x2ExtMulLowI32x4U, i64x2_extmul_low_i32x4u);
+simd_binop!(I64x2ExtMulHighI32x4U, i64x2_extmul_high_i32x4u);
+simd_unop!(F32x4Ceil, f32x4_ceil);
+simd_unop!(F32x4Floor, f32x4_floor);
+simd_unop!(F32x4Trunc, f32x4_trunc);
+simd_unop!(F32x4Nearest, f32x4_nearest);
+simd_unop!(F32x4Abs, f32x4_abs);
+simd_unop!(F32x4Neg, f32x4_neg);
+simd_unop!(F32x4Sqrt, f32x4_sqrt);
+simd_binop!(F32x4Add, f32x4_add);
+simd_binop!(F32x4Sub, f32x4_sub);
+simd_binop!(F32x4Mul, f32x4_mul);
+simd_binop!(F32x4Div, f32x4_div);
+simd_binop!(F32x4Min, f32x4_min);
+simd_binop!(F32x4Max, f32x4_max);
+simd_binop!(F32x4PMin, f32x4p_min);
+simd_binop!(F32x4PMax, f32x4p_max);
+simd_unop!(F64x2Ceil, f64x2_ceil);
+simd_unop!(F64x2Floor, f64x2_floor);
+simd_unop!(F64x2Trunc, f64x2_trunc);
+simd_unop!(F64x2Nearest, f64x2_nearest);
+simd_unop!(F64x2Abs, f64x2_abs);
+simd_unop!(F64x2Neg, f64x2_neg);
+simd_unop!(F64x2Sqrt, f64x2_sqrt);
+simd_binop!(F64x2Add, f64x2_add);
+simd_binop!(F64x2Sub, f64x2_sub);
+simd_binop!(F64x2Mul, f64x2_mul);
+simd_binop!(F64x2Div, f64x2_div);
+simd_binop!(F64x2Min, f64x2_min);
+simd_binop!(F64x2Max, f64x2_max);
+simd_binop!(F64x2PMin, f64x2p_min);
+simd_binop!(F64x2PMax, f64x2p_max);
+simd_unop!(I32x4TruncSatF32x4S, i32x4_trunc_sat_f32x4s);
+simd_unop!(I32x4TruncSatF32x4U, i32x4_trunc_sat_f32x4u);
+simd_unop!(F32x4ConvertI32x4S, f32x4_convert_i32x4s);
+simd_unop!(F32x4ConvertI32x4U, f32x4_convert_i32x4u);
+simd_unop!(I32x4TruncSatF64x2SZero, i32x4_trunc_sat_f64x2s_zero);
+simd_unop!(I32x4TruncSatF64x2UZero, i32x4_trunc_sat_f64x2u_zero);
+simd_unop!(F64x2ConvertLowI32x4S, f64x2_convert_low_i32x4s);
+simd_unop!(F64x2ConvertLowI32x4U, f64x2_convert_low_i32x4u);
+simd_unop!(F32x4DemoteF64x2Zero, f32x4_demote_f64x2_zero);
+simd_unop!(F64x2PromoteLowF32x4, f64x2_promote_low_f32x4);
+simd_ternop!(V128Bitselect, v128_bitselect);
+simd_binop!(I8x16RelaxedSwizzle, i8x16_relaxed_swizzle);
+simd_unop!(I32x4RelaxedTruncF32x4S, i32x4_relaxed_trunc_f32x4s);
+simd_unop!(I32x4RelaxedTruncF32x4U, i32x4_relaxed_trunc_f32x4u);
+simd_unop!(I32x4RelaxedTruncF64x2SZero, i32x4_relaxed_trunc_f64x2s_zero);
+simd_unop!(I32x4RelaxedTruncF64x2UZero, i32x4_relaxed_trunc_f64x2u_zero);
+simd_ternop!(F32x4RelaxedMadd, f32x4_relaxed_madd);
+simd_ternop!(F32x4RelaxedNmadd, f32x4_relaxed_nmadd);
+simd_ternop!(F64x2RelaxedMadd, f64x2_relaxed_madd);
+simd_ternop!(F64x2RelaxedNmadd, f64x2_relaxed_nmadd);
+simd_ternop!(I8x16RelaxedLaneselect, i8x16_relaxed_laneselect);
+simd_ternop!(I16x8RelaxedLaneselect, i16x8_relaxed_laneselect);
+simd_ternop!(I32x4RelaxedLaneselect, i32x4_relaxed_laneselect);
+simd_ternop!(I64x2RelaxedLaneselect, i64x2_relaxed_laneselect);
+simd_binop!(F32x4RelaxedMin, f32x4_relaxed_min);
+simd_binop!(F32x4RelaxedMax, f32x4_relaxed_max);
+simd_binop!(F64x2RelaxedMin, f64x2_relaxed_min);
+simd_binop!(F64x2RelaxedMax, f64x2_relaxed_max);
+simd_binop!(I16x8RelaxedQ15mulrS, i16x8_relaxed_q15mulr_s);
+simd_binop!(I16x8RelaxedDotI8x16I7x16S, i16x8_relaxed_dot_i8x16_i7x16_s);
+simd_ternop!(
+ I32x4RelaxedDotI8x16I7x16AddS,
+ i32x4_relaxed_dot_i8x16_i7x16_add_s
+);
diff --git a/third_party/rust/wasm-smith/src/core/code_builder/no_traps.rs b/third_party/rust/wasm-smith/src/core/code_builder/no_traps.rs
new file mode 100644
index 0000000000..a1232b6922
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core/code_builder/no_traps.rs
@@ -0,0 +1,644 @@
+use crate::core::*;
+use wasm_encoder::{BlockType, Instruction, ValType};
+
+use super::CodeBuilder;
+
+// For loads, we dynamically check whether the load will
+// trap, and if it will then we generate a dummy value to
+// use instead.
+pub(crate) fn load<'a>(
+ inst: Instruction<'a>,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ let memarg = get_memarg(&inst);
+ let memory = &module.memories[memarg.memory_index as usize];
+ let address_type = if memory.memory64 {
+ ValType::I64
+ } else {
+ ValType::I32
+ };
+ // Add a temporary local to hold this load's address.
+ let address_local = builder.alloc_local(address_type);
+
+ // Add a temporary local to hold the result of this load.
+ let load_type = type_of_memory_access(&inst);
+ let result_local = builder.alloc_local(load_type);
+
+ // [address:address_type]
+ insts.push(Instruction::LocalSet(address_local));
+ // []
+ insts.push(Instruction::Block(wasm_encoder::BlockType::Empty));
+ {
+ // []
+ insts.push(Instruction::Block(wasm_encoder::BlockType::Empty));
+ {
+ // []
+ insts.push(Instruction::MemorySize(memarg.memory_index));
+ // [mem_size_in_pages:address_type]
+ insts.push(int_const_inst(address_type, 65_536));
+ // [mem_size_in_pages:address_type wasm_page_size:address_type]
+ insts.push(int_mul_inst(address_type));
+ // [mem_size_in_bytes:address_type]
+ insts.push(int_const_inst(
+ address_type,
+ (memarg.offset + size_of_type_in_memory(load_type)) as i64,
+ ));
+ // [mem_size_in_bytes:address_type offset_and_size:address_type]
+ insts.push(Instruction::LocalGet(address_local));
+ // [mem_size_in_bytes:address_type offset_and_size:address_type address:address_type]
+ insts.push(int_add_inst(address_type));
+ // [mem_size_in_bytes:address_type highest_byte_accessed:address_type]
+ insts.push(int_le_u_inst(address_type));
+ // [load_will_trap:i32]
+ insts.push(Instruction::BrIf(0));
+ // []
+ insts.push(Instruction::LocalGet(address_local));
+ // [address:address_type]
+ insts.push(int_const_inst(address_type, 0));
+ // [address:address_type 0:address_type]
+ insts.push(int_le_s_inst(address_type));
+ // [load_will_trap:i32]
+ insts.push(Instruction::BrIf(0));
+ // []
+ insts.push(Instruction::LocalGet(address_local));
+ // [address:address_type]
+ insts.push(inst);
+ // [result:load_type]
+ insts.push(Instruction::LocalSet(result_local));
+ // []
+ insts.push(Instruction::Br(1));
+ // <unreachable>
+ }
+ // []
+ insts.push(Instruction::End);
+ // []
+ insts.push(dummy_value_inst(load_type));
+ // [dummy_value:load_type]
+ insts.push(Instruction::LocalSet(result_local));
+ // []
+ }
+ // []
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(result_local));
+ // [result:load_type]
+}
+
+// Stores are similar to loads: we check whether the store
+// will trap, and if it will then we just drop the value.
+pub(crate) fn store<'a>(
+ inst: Instruction<'a>,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ let memarg = get_memarg(&inst);
+ let memory = &module.memories[memarg.memory_index as usize];
+ let address_type = if memory.memory64 {
+ ValType::I64
+ } else {
+ ValType::I32
+ };
+
+ // Add a temporary local to hold this store's address.
+ let address_local = builder.alloc_local(address_type);
+
+ // Add a temporary local to hold the value to store.
+ let store_type = type_of_memory_access(&inst);
+ let value_local = builder.alloc_local(store_type);
+
+ // [address:address_type value:store_type]
+ insts.push(Instruction::LocalSet(value_local));
+ // [address:address_type]
+ insts.push(Instruction::LocalSet(address_local));
+ // []
+ insts.push(Instruction::MemorySize(memarg.memory_index));
+ // [mem_size_in_pages:address_type]
+ insts.push(int_const_inst(address_type, 65_536));
+ // [mem_size_in_pages:address_type wasm_page_size:address_type]
+ insts.push(int_mul_inst(address_type));
+ // [mem_size_in_bytes:address_type]
+ insts.push(int_const_inst(
+ address_type,
+ (memarg.offset + size_of_type_in_memory(store_type)) as i64,
+ ));
+ // [mem_size_in_bytes:address_type offset_and_size:address_type]
+ insts.push(Instruction::LocalGet(address_local));
+ // [mem_size_in_bytes:address_type offset_and_size:address_type address:address_type]
+ insts.push(int_add_inst(address_type));
+ // [mem_size_in_bytes:address_type highest_byte_accessed:address_type]
+ insts.push(int_le_u_inst(address_type));
+ // [store_will_trap:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ insts.push(Instruction::Else);
+ {
+ // []
+ insts.push(Instruction::LocalGet(address_local));
+ // [address:address_type]
+ insts.push(int_const_inst(address_type, 0));
+ // [address:address_type 0:address_type]
+ insts.push(int_le_s_inst(address_type));
+ // [load_will_trap:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ insts.push(Instruction::Else);
+ {
+ // []
+ insts.push(Instruction::LocalGet(address_local));
+ // [address:address_type]
+ insts.push(Instruction::LocalGet(value_local));
+ // [address:address_type value:store_type]
+ insts.push(inst);
+ // []
+ }
+ insts.push(Instruction::End);
+ }
+ // []
+ insts.push(Instruction::End);
+}
+
+// Unsigned integer division and remainder will trap when
+// the divisor is 0. To avoid the trap, we will set any 0
+// divisors to 1 prior to the operation.
+//
+// The code below is equivalent to this expression:
+//
+// local.set $temp_divisor
+// (select (i32.eqz (local.get $temp_divisor) (i32.const 1) (local.get $temp_divisor))
+pub(crate) fn unsigned_div_rem<'a>(
+ inst: Instruction<'a>,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ let op_type = type_of_integer_operation(&inst);
+ let temp_divisor = builder.alloc_local(op_type);
+
+ // [dividend:op_type divisor:op_type]
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // [dividend:op_type]
+ insts.push(int_const_inst(op_type, 1));
+ // [dividend:op_type 1:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type 1:op_type divisor:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type 1:op_type divisor:op_type divisor:op_type]
+ insts.push(eqz_inst(op_type));
+ // [dividend:op_type 1:op_type divisor:op_type is_zero:i32]
+ insts.push(Instruction::Select);
+ // [dividend:op_type divisor:op_type]
+ insts.push(inst);
+ // [result:op_type]
+}
+
+pub(crate) fn trunc<'a>(
+ inst: Instruction<'a>,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ // If NaN or ±inf, replace with dummy value. Our method of checking for NaN
+ // is to use `ne` because NaN is the only value that is not equal to itself
+ let conv_type = type_of_float_conversion(&inst);
+ let temp_float = builder.alloc_local(conv_type);
+ // [input:conv_type]
+ insts.push(Instruction::LocalTee(temp_float));
+ // [input:conv_type]
+ insts.push(Instruction::LocalGet(temp_float));
+ // [input:conv_type input:conv_type]
+ insts.push(ne_inst(conv_type));
+ // [is_nan:i32]
+ insts.push(Instruction::LocalGet(temp_float));
+ // [is_nan:i32 input:conv_type]
+ insts.push(flt_inf_const_inst(conv_type));
+ // [is_nan:i32 input:conv_type inf:conv_type]
+ insts.push(eq_inst(conv_type));
+ // [is_nan:i32 is_inf:i32]
+ insts.push(Instruction::LocalGet(temp_float));
+ // [is_nan:i32 is_inf:i32 input:conv_type]
+ insts.push(flt_neg_inf_const_inst(conv_type));
+ // [is_nan:i32 is_inf:i32 input:conv_type neg_inf:conv_type]
+ insts.push(eq_inst(conv_type));
+ // [is_nan:i32 is_inf:i32 is_neg_inf:i32]
+ insts.push(Instruction::I32Or);
+ // [is_nan:i32 is_±inf:i32]
+ insts.push(Instruction::I32Or);
+ // [is_nan_or_inf:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ {
+ // []
+ insts.push(dummy_value_inst(conv_type));
+ // [0:conv_type]
+ insts.push(Instruction::LocalSet(temp_float));
+ // []
+ }
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(temp_float));
+ // [input_or_0:conv_type]
+
+ // first ensure that it is >= the min value of our target type
+ insts.push(min_input_const_for_trunc(&inst));
+ // [input_or_0:conv_type min_value_of_target_type:conv_type]
+ insts.push(flt_lt_inst(conv_type));
+ // [input_lt_min:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ {
+ // []
+ insts.push(min_input_const_for_trunc(&inst));
+ // [min_value_of_target_type:conv_type]
+ insts.push(Instruction::LocalSet(temp_float));
+ }
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(temp_float));
+ // [coerced_input:conv_type]
+
+ // next ensure that it is <= the max value of our target type
+ insts.push(max_input_const_for_trunc(&inst));
+ // [input_or_0:conv_type max_value_of_target_type:conv_type]
+ insts.push(flt_gt_inst(conv_type));
+ // [input_gt_min:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ {
+ // []
+ insts.push(max_input_const_for_trunc(&inst));
+ // [max_value_of_target_type:conv_type]
+ insts.push(Instruction::LocalSet(temp_float));
+ }
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(temp_float));
+ // [coerced_input:conv_type]
+ insts.push(inst);
+}
+
+// Signed division and remainder will trap in the following instances:
+// - The divisor is 0
+// - The result of the division is 2^(n-1)
+pub(crate) fn signed_div_rem<'a>(
+ inst: Instruction<'a>,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ // If divisor is 0, replace with 1
+ let op_type = type_of_integer_operation(&inst);
+ let temp_divisor = builder.alloc_local(op_type);
+ // [dividend:op_type divisor:op_type]
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // [dividend:op_type]
+ insts.push(int_const_inst(op_type, 1));
+ // [dividend:op_type 1:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type 1:op_type divisor:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type 1:op_type divisor:op_type divisor:op_type]
+ insts.push(eqz_inst(op_type));
+ // [dividend:op_type 1:op_type divisor:op_type is_zero:i32]
+ insts.push(Instruction::Select);
+ // [dividend:op_type divisor:op_type]
+ // If dividend and divisor are -int.max and -1, replace
+ // divisor with 1.
+ let temp_dividend = builder.alloc_local(op_type);
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // [dividend:op_type]
+ insts.push(Instruction::LocalSet(temp_dividend));
+ // []
+ insts.push(Instruction::Block(wasm_encoder::BlockType::Empty));
+ {
+ insts.push(Instruction::Block(wasm_encoder::BlockType::Empty));
+ {
+ // []
+ insts.push(Instruction::LocalGet(temp_dividend));
+ // [dividend:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type divisor:op_type]
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // [dividend:op_type]
+ insts.push(Instruction::LocalTee(temp_dividend));
+ // [dividend:op_type]
+ insts.push(int_min_const_inst(op_type));
+ // [dividend:op_type int_min:op_type]
+ insts.push(ne_inst(op_type));
+ // [not_int_min:i32]
+ insts.push(Instruction::BrIf(0));
+ // []
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [divisor:op_type]
+ insts.push(int_const_inst(op_type, -1));
+ // [divisor:op_type -1:op_type]
+ insts.push(ne_inst(op_type));
+ // [not_neg_one:i32]
+ insts.push(Instruction::BrIf(0));
+ // []
+ insts.push(int_const_inst(op_type, 1));
+ // [divisor:op_type]
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // []
+ insts.push(Instruction::Br(1));
+ }
+ // []
+ insts.push(Instruction::End);
+ }
+ // []
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(temp_dividend));
+ // [dividend:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type divisor:op_type]
+ insts.push(inst);
+}
+
+fn get_memarg(inst: &Instruction) -> wasm_encoder::MemArg {
+ match *inst {
+ Instruction::I32Load(memarg)
+ | Instruction::I64Load(memarg)
+ | Instruction::F32Load(memarg)
+ | Instruction::F64Load(memarg)
+ | Instruction::I32Load8S(memarg)
+ | Instruction::I32Load8U(memarg)
+ | Instruction::I32Load16S(memarg)
+ | Instruction::I32Load16U(memarg)
+ | Instruction::I64Load8S(memarg)
+ | Instruction::I64Load8U(memarg)
+ | Instruction::I64Load16S(memarg)
+ | Instruction::I64Load16U(memarg)
+ | Instruction::I64Load32S(memarg)
+ | Instruction::I64Load32U(memarg)
+ | Instruction::V128Load(memarg)
+ | Instruction::V128Load8x8S(memarg)
+ | Instruction::V128Load8x8U(memarg)
+ | Instruction::V128Load16x4S(memarg)
+ | Instruction::V128Load16x4U(memarg)
+ | Instruction::V128Load32x2S(memarg)
+ | Instruction::V128Load32x2U(memarg)
+ | Instruction::V128Load8Splat(memarg)
+ | Instruction::V128Load16Splat(memarg)
+ | Instruction::V128Load32Splat(memarg)
+ | Instruction::V128Load64Splat(memarg)
+ | Instruction::V128Load32Zero(memarg)
+ | Instruction::V128Load64Zero(memarg)
+ | Instruction::I32Store(memarg)
+ | Instruction::I64Store(memarg)
+ | Instruction::F32Store(memarg)
+ | Instruction::F64Store(memarg)
+ | Instruction::I32Store8(memarg)
+ | Instruction::I32Store16(memarg)
+ | Instruction::I64Store8(memarg)
+ | Instruction::I64Store16(memarg)
+ | Instruction::I64Store32(memarg)
+ | Instruction::V128Store(memarg) => memarg,
+ _ => unreachable!(),
+ }
+}
+
+fn dummy_value_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Const(0),
+ ValType::I64 => Instruction::I64Const(0),
+ ValType::F32 => Instruction::F32Const(0.0),
+ ValType::F64 => Instruction::F64Const(0.0),
+ ValType::V128 => Instruction::V128Const(0),
+ ValType::Ref(ty) => {
+ assert!(ty.nullable);
+ Instruction::RefNull(ty.heap_type)
+ }
+ }
+}
+
+fn eq_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Eq,
+ ValType::F64 => Instruction::F64Eq,
+ ValType::I32 => Instruction::I32Eq,
+ ValType::I64 => Instruction::I64Eq,
+ _ => panic!("not a numeric type"),
+ }
+}
+
+fn eqz_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Eqz,
+ ValType::I64 => Instruction::I64Eqz,
+ _ => panic!("not an integer type"),
+ }
+}
+
+fn type_of_integer_operation(inst: &Instruction) -> ValType {
+ match inst {
+ Instruction::I32DivU
+ | Instruction::I32DivS
+ | Instruction::I32RemU
+ | Instruction::I32RemS => ValType::I32,
+ Instruction::I64RemU
+ | Instruction::I64DivU
+ | Instruction::I64DivS
+ | Instruction::I64RemS => ValType::I64,
+ _ => panic!("not integer division or remainder"),
+ }
+}
+
+fn type_of_float_conversion(inst: &Instruction) -> ValType {
+ match inst {
+ Instruction::I32TruncF32S
+ | Instruction::I32TruncF32U
+ | Instruction::I64TruncF32S
+ | Instruction::I64TruncF32U => ValType::F32,
+ Instruction::I32TruncF64S
+ | Instruction::I32TruncF64U
+ | Instruction::I64TruncF64S
+ | Instruction::I64TruncF64U => ValType::F64,
+ _ => panic!("not a float -> integer conversion"),
+ }
+}
+
+fn min_input_const_for_trunc<'a>(inst: &Instruction) -> Instruction<'a> {
+ // This is the minimum float value that is representable as an i64
+ let min_f64 = -9_223_372_036_854_775_000f64;
+ let min_f32 = -9_223_372_000_000_000_000f32;
+
+ // This is the minimum float value that is representable as as i32
+ let min_f32_as_i32 = -2_147_483_500f32;
+ match inst {
+ Instruction::I32TruncF32S => Instruction::F32Const(min_f32_as_i32),
+ Instruction::I32TruncF32U => Instruction::F32Const(0.0),
+ Instruction::I64TruncF32S => Instruction::F32Const(min_f32),
+ Instruction::I64TruncF32U => Instruction::F32Const(0.0),
+ Instruction::I32TruncF64S => Instruction::F64Const(i32::MIN as f64),
+ Instruction::I32TruncF64U => Instruction::F64Const(0.0),
+ Instruction::I64TruncF64S => Instruction::F64Const(min_f64),
+ Instruction::I64TruncF64U => Instruction::F64Const(0.0),
+ _ => panic!("not a trunc instruction"),
+ }
+}
+
+fn max_input_const_for_trunc<'a>(inst: &Instruction) -> Instruction<'a> {
+ // This is the maximum float value that is representable as as i64
+ let max_f64_as_i64 = 9_223_372_036_854_775_000f64;
+ let max_f32_as_i64 = 9_223_371_500_000_000_000f32;
+
+ // This is the maximum float value that is representable as as i32
+ let max_f32_as_i32 = 2_147_483_500f32;
+ match inst {
+ Instruction::I32TruncF32S | Instruction::I32TruncF32U => {
+ Instruction::F32Const(max_f32_as_i32)
+ }
+ Instruction::I64TruncF32S | Instruction::I64TruncF32U => {
+ Instruction::F32Const(max_f32_as_i64)
+ }
+ Instruction::I32TruncF64S | Instruction::I32TruncF64U => {
+ Instruction::F64Const(i32::MAX as f64)
+ }
+ Instruction::I64TruncF64S | Instruction::I64TruncF64U => {
+ Instruction::F64Const(max_f64_as_i64)
+ }
+ _ => panic!("not a trunc instruction"),
+ }
+}
+
+fn type_of_memory_access(inst: &Instruction) -> ValType {
+ match inst {
+ Instruction::I32Load(_)
+ | Instruction::I32Load8S(_)
+ | Instruction::I32Load8U(_)
+ | Instruction::I32Load16S(_)
+ | Instruction::I32Load16U(_)
+ | Instruction::I32Store(_)
+ | Instruction::I32Store8(_)
+ | Instruction::I32Store16(_) => ValType::I32,
+
+ Instruction::I64Load(_)
+ | Instruction::I64Load8S(_)
+ | Instruction::I64Load8U(_)
+ | Instruction::I64Load16S(_)
+ | Instruction::I64Load16U(_)
+ | Instruction::I64Load32S(_)
+ | Instruction::I64Load32U(_)
+ | Instruction::I64Store(_)
+ | Instruction::I64Store8(_)
+ | Instruction::I64Store16(_)
+ | Instruction::I64Store32(_) => ValType::I64,
+
+ Instruction::F32Load(_) | Instruction::F32Store(_) => ValType::F32,
+
+ Instruction::F64Load(_) | Instruction::F64Store(_) => ValType::F64,
+
+ Instruction::V128Load { .. }
+ | Instruction::V128Load8x8S { .. }
+ | Instruction::V128Load8x8U { .. }
+ | Instruction::V128Load16x4S { .. }
+ | Instruction::V128Load16x4U { .. }
+ | Instruction::V128Load32x2S { .. }
+ | Instruction::V128Load32x2U { .. }
+ | Instruction::V128Load8Splat { .. }
+ | Instruction::V128Load16Splat { .. }
+ | Instruction::V128Load32Splat { .. }
+ | Instruction::V128Load64Splat { .. }
+ | Instruction::V128Load32Zero { .. }
+ | Instruction::V128Load64Zero { .. }
+ | Instruction::V128Store { .. } => ValType::V128,
+
+ _ => panic!("not a memory access instruction"),
+ }
+}
+
+fn int_min_const_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Const(i32::MIN),
+ ValType::I64 => Instruction::I64Const(i64::MIN),
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_const_inst<'a>(ty: ValType, x: i64) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Const(x as i32),
+ ValType::I64 => Instruction::I64Const(x),
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_mul_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Mul,
+ ValType::I64 => Instruction::I64Mul,
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_add_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Add,
+ ValType::I64 => Instruction::I64Add,
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_le_u_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32LeU,
+ ValType::I64 => Instruction::I64LeU,
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_le_s_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32LeS,
+ ValType::I64 => Instruction::I64LeS,
+ _ => panic!("not an int type"),
+ }
+}
+
+fn ne_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Ne,
+ ValType::I64 => Instruction::I64Ne,
+ ValType::F32 => Instruction::F32Ne,
+ ValType::F64 => Instruction::F64Ne,
+ _ => panic!("not a numeric type"),
+ }
+}
+
+fn flt_lt_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Lt,
+ ValType::F64 => Instruction::F64Lt,
+ _ => panic!("not a float type"),
+ }
+}
+
+fn flt_gt_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Gt,
+ ValType::F64 => Instruction::F64Gt,
+ _ => panic!("not a float type"),
+ }
+}
+
+fn flt_inf_const_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Const(f32::INFINITY),
+ ValType::F64 => Instruction::F64Const(f64::INFINITY),
+ _ => panic!("not a float type"),
+ }
+}
+
+fn flt_neg_inf_const_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Const(f32::NEG_INFINITY),
+ ValType::F64 => Instruction::F64Const(f64::NEG_INFINITY),
+ _ => panic!("not a float type"),
+ }
+}
+
+fn size_of_type_in_memory(ty: ValType) -> u64 {
+ match ty {
+ ValType::I32 => 4,
+ ValType::I64 => 8,
+ ValType::F32 => 4,
+ ValType::F64 => 8,
+ ValType::V128 => 16,
+ ValType::Ref(_) => panic!("not a memory type"),
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/core/encode.rs b/third_party/rust/wasm-smith/src/core/encode.rs
new file mode 100644
index 0000000000..33cb7c2817
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core/encode.rs
@@ -0,0 +1,262 @@
+use super::*;
+use std::convert::TryFrom;
+
+impl Module {
+ /// Encode this Wasm module into bytes.
+ pub fn to_bytes(&self) -> Vec<u8> {
+ self.encoded().finish()
+ }
+
+ fn encoded(&self) -> wasm_encoder::Module {
+ let mut module = wasm_encoder::Module::new();
+
+ self.encode_types(&mut module);
+ self.encode_imports(&mut module);
+ self.encode_funcs(&mut module);
+ self.encode_tables(&mut module);
+ self.encode_memories(&mut module);
+ self.encode_tags(&mut module);
+ self.encode_globals(&mut module);
+ self.encode_exports(&mut module);
+ self.encode_start(&mut module);
+ self.encode_elems(&mut module);
+ self.encode_data_count(&mut module);
+ self.encode_code(&mut module);
+ self.encode_data(&mut module);
+
+ module
+ }
+
+ fn encode_types(&self, module: &mut wasm_encoder::Module) {
+ if !self.should_encode_types {
+ return;
+ }
+
+ let mut section = wasm_encoder::TypeSection::new();
+ for ty in &self.types {
+ match ty {
+ Type::Func(ty) => {
+ section.function(ty.params.iter().cloned(), ty.results.iter().cloned());
+ }
+ }
+ }
+ module.section(&section);
+ }
+
+ fn encode_imports(&self, module: &mut wasm_encoder::Module) {
+ if !self.should_encode_imports {
+ return;
+ }
+
+ let mut section = wasm_encoder::ImportSection::new();
+ for im in &self.imports {
+ section.import(
+ &im.module,
+ &im.field,
+ translate_entity_type(&im.entity_type),
+ );
+ }
+ module.section(&section);
+ }
+
+ fn encode_tags(&self, module: &mut wasm_encoder::Module) {
+ if self.num_defined_tags == 0 {
+ return;
+ }
+ let mut tags = wasm_encoder::TagSection::new();
+ for tag in self.tags[self.tags.len() - self.num_defined_tags..].iter() {
+ tags.tag(wasm_encoder::TagType {
+ kind: wasm_encoder::TagKind::Exception,
+ func_type_idx: tag.func_type_idx,
+ });
+ }
+ module.section(&tags);
+ }
+
+ fn encode_funcs(&self, module: &mut wasm_encoder::Module) {
+ if self.num_defined_funcs == 0 {
+ return;
+ }
+ let mut funcs = wasm_encoder::FunctionSection::new();
+ for (ty, _) in self.funcs[self.funcs.len() - self.num_defined_funcs..].iter() {
+ funcs.function(*ty);
+ }
+ module.section(&funcs);
+ }
+
+ fn encode_tables(&self, module: &mut wasm_encoder::Module) {
+ if self.num_defined_tables == 0 {
+ return;
+ }
+ let mut tables = wasm_encoder::TableSection::new();
+ for t in self.tables[self.tables.len() - self.num_defined_tables..].iter() {
+ tables.table(*t);
+ }
+ module.section(&tables);
+ }
+
+ fn encode_memories(&self, module: &mut wasm_encoder::Module) {
+ if self.num_defined_memories == 0 {
+ return;
+ }
+ let mut mems = wasm_encoder::MemorySection::new();
+ for m in self.memories[self.memories.len() - self.num_defined_memories..].iter() {
+ mems.memory(*m);
+ }
+ module.section(&mems);
+ }
+
+ fn encode_globals(&self, module: &mut wasm_encoder::Module) {
+ if self.globals.is_empty() {
+ return;
+ }
+ let mut globals = wasm_encoder::GlobalSection::new();
+ for (idx, expr) in &self.defined_globals {
+ let ty = &self.globals[*idx as usize];
+ match expr {
+ GlobalInitExpr::ConstExpr(expr) => globals.global(*ty, expr),
+ GlobalInitExpr::FuncRef(func) => globals.global(*ty, &ConstExpr::ref_func(*func)),
+ };
+ }
+ module.section(&globals);
+ }
+
+ fn encode_exports(&self, module: &mut wasm_encoder::Module) {
+ if self.exports.is_empty() {
+ return;
+ }
+ let mut exports = wasm_encoder::ExportSection::new();
+ for (name, kind, idx) in &self.exports {
+ exports.export(name, *kind, *idx);
+ }
+ module.section(&exports);
+ }
+
+ fn encode_start(&self, module: &mut wasm_encoder::Module) {
+ if let Some(f) = self.start {
+ module.section(&wasm_encoder::StartSection { function_index: f });
+ }
+ }
+
+ fn encode_elems(&self, module: &mut wasm_encoder::Module) {
+ if self.elems.is_empty() {
+ return;
+ }
+ let mut elems = wasm_encoder::ElementSection::new();
+ let mut exps = vec![];
+ for el in &self.elems {
+ let elements = match &el.items {
+ Elements::Expressions(es) => {
+ exps.clear();
+ exps.extend(es.iter().map(|e| {
+ // TODO(nagisa): generate global.get of imported ref globals too.
+ match e {
+ Some(i) => match el.ty {
+ RefType::FUNCREF => wasm_encoder::ConstExpr::ref_func(*i),
+ _ => unreachable!(),
+ },
+ None => wasm_encoder::ConstExpr::ref_null(el.ty.heap_type),
+ }
+ }));
+ wasm_encoder::Elements::Expressions(&exps)
+ }
+ Elements::Functions(fs) => wasm_encoder::Elements::Functions(fs),
+ };
+ match &el.kind {
+ ElementKind::Active { table, offset } => {
+ let offset = match *offset {
+ Offset::Const32(n) => ConstExpr::i32_const(n),
+ Offset::Const64(n) => ConstExpr::i64_const(n),
+ Offset::Global(g) => ConstExpr::global_get(g),
+ };
+ elems.active(*table, &offset, el.ty, elements);
+ }
+ ElementKind::Passive => {
+ elems.passive(el.ty, elements);
+ }
+ ElementKind::Declared => {
+ elems.declared(el.ty, elements);
+ }
+ }
+ }
+ module.section(&elems);
+ }
+
+ fn encode_data_count(&self, module: &mut wasm_encoder::Module) {
+ // Without bulk memory there's no need for a data count section,
+ if !self.config.bulk_memory_enabled() {
+ return;
+ }
+ // ... and also if there's no data no need for a data count section.
+ if self.data.is_empty() {
+ return;
+ }
+ module.section(&wasm_encoder::DataCountSection {
+ count: u32::try_from(self.data.len()).unwrap(),
+ });
+ }
+
+ fn encode_code(&self, module: &mut wasm_encoder::Module) {
+ if self.code.is_empty() {
+ return;
+ }
+ let mut code = wasm_encoder::CodeSection::new();
+ for c in &self.code {
+ // Skip the run-length encoding because it is a little
+ // annoying to compute; use a length of one for every local.
+ let mut func = wasm_encoder::Function::new(c.locals.iter().map(|l| (1, *l)));
+ match &c.instructions {
+ Instructions::Generated(instrs) => {
+ for instr in instrs {
+ func.instruction(instr);
+ }
+ func.instruction(&wasm_encoder::Instruction::End);
+ }
+ Instructions::Arbitrary(body) => {
+ func.raw(body.iter().copied());
+ }
+ }
+ code.function(&func);
+ }
+ module.section(&code);
+ }
+
+ fn encode_data(&self, module: &mut wasm_encoder::Module) {
+ if self.data.is_empty() {
+ return;
+ }
+ let mut data = wasm_encoder::DataSection::new();
+ for seg in &self.data {
+ match &seg.kind {
+ DataSegmentKind::Active {
+ memory_index,
+ offset,
+ } => {
+ let offset = match *offset {
+ Offset::Const32(n) => ConstExpr::i32_const(n),
+ Offset::Const64(n) => ConstExpr::i64_const(n),
+ Offset::Global(g) => ConstExpr::global_get(g),
+ };
+ data.active(*memory_index, &offset, seg.init.iter().copied());
+ }
+ DataSegmentKind::Passive => {
+ data.passive(seg.init.iter().copied());
+ }
+ }
+ }
+ module.section(&data);
+ }
+}
+
+pub(crate) fn translate_entity_type(ty: &EntityType) -> wasm_encoder::EntityType {
+ match ty {
+ EntityType::Tag(t) => wasm_encoder::EntityType::Tag(wasm_encoder::TagType {
+ kind: wasm_encoder::TagKind::Exception,
+ func_type_idx: t.func_type_idx,
+ }),
+ EntityType::Func(f, _) => wasm_encoder::EntityType::Function(*f),
+ EntityType::Table(ty) => (*ty).into(),
+ EntityType::Memory(m) => (*m).into(),
+ EntityType::Global(g) => (*g).into(),
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/core/terminate.rs b/third_party/rust/wasm-smith/src/core/terminate.rs
new file mode 100644
index 0000000000..adcfeed54f
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core/terminate.rs
@@ -0,0 +1,70 @@
+use super::*;
+use std::mem;
+use wasm_encoder::BlockType;
+
+impl Module {
+ /// Ensure that all of this Wasm module's functions will terminate when
+ /// executed.
+ ///
+ /// This adds a new mutable, exported global to the module to keep track of
+ /// how much "fuel" is left. Fuel is decremented at the head of each loop
+ /// and function. When fuel reaches zero, a trap is raised.
+ ///
+ /// The index of the fuel global is returned, so that you may control how
+ /// much fuel the module is given.
+ pub fn ensure_termination(&mut self, default_fuel: u32) -> u32 {
+ let fuel_global = self.globals.len() as u32;
+ self.globals.push(GlobalType {
+ val_type: ValType::I32,
+ mutable: true,
+ });
+ self.defined_globals.push((
+ fuel_global,
+ GlobalInitExpr::ConstExpr(ConstExpr::i32_const(default_fuel as i32)),
+ ));
+
+ for code in &mut self.code {
+ let check_fuel = |insts: &mut Vec<Instruction>| {
+ // if fuel == 0 { trap }
+ insts.push(Instruction::GlobalGet(fuel_global));
+ insts.push(Instruction::I32Eqz);
+ insts.push(Instruction::If(BlockType::Empty));
+ insts.push(Instruction::Unreachable);
+ insts.push(Instruction::End);
+
+ // fuel -= 1
+ insts.push(Instruction::GlobalGet(fuel_global));
+ insts.push(Instruction::I32Const(1));
+ insts.push(Instruction::I32Sub);
+ insts.push(Instruction::GlobalSet(fuel_global));
+ };
+
+ let instrs = match &mut code.instructions {
+ Instructions::Generated(list) => list,
+ // only present on modules contained within
+ // `MaybeInvalidModule`, which doesn't expose its internal
+ // `Module`.
+ Instructions::Arbitrary(_) => unreachable!(),
+ };
+ let mut new_insts = Vec::with_capacity(instrs.len() * 2);
+
+ // Check fuel at the start of functions to deal with infinite
+ // recursion.
+ check_fuel(&mut new_insts);
+
+ for inst in mem::replace(instrs, vec![]) {
+ let is_loop = matches!(&inst, Instruction::Loop(_));
+ new_insts.push(inst);
+
+ // Check fuel at loop heads to deal with infinite loops.
+ if is_loop {
+ check_fuel(&mut new_insts);
+ }
+ }
+
+ *instrs = new_insts;
+ }
+
+ fuel_global
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/lib.rs b/third_party/rust/wasm-smith/src/lib.rs
new file mode 100644
index 0000000000..63db1ad93c
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/lib.rs
@@ -0,0 +1,193 @@
+//! A WebAssembly test case generator.
+//!
+//! ## Usage
+//!
+//! First, use [`cargo fuzz`](https://github.com/rust-fuzz/cargo-fuzz) to define
+//! a new fuzz target:
+//!
+//! ```shell
+//! $ cargo fuzz add my_wasm_smith_fuzz_target
+//! ```
+//!
+//! Next, add `wasm-smith` to your dependencies:
+//!
+//! ```shell
+//! $ cargo add wasm-smith
+//! ```
+//!
+//! Then, define your fuzz target so that it takes arbitrary
+//! `wasm_smith::Module`s as an argument, convert the module into serialized
+//! Wasm bytes via the `to_bytes` method, and then feed it into your system:
+//!
+//! ```no_run
+//! // fuzz/fuzz_targets/my_wasm_smith_fuzz_target.rs
+//!
+//! #![no_main]
+//!
+//! use libfuzzer_sys::fuzz_target;
+//! use wasm_smith::Module;
+//!
+//! fuzz_target!(|module: Module| {
+//! let wasm_bytes = module.to_bytes();
+//!
+//! // Your code here...
+//! });
+//! ```
+//!
+//! Finally, start fuzzing:
+//!
+//! ```shell
+//! $ cargo fuzz run my_wasm_smith_fuzz_target
+//! ```
+//!
+//! > **Note:** For a real world example, also check out [the `validate` fuzz
+//! > target](https://github.com/fitzgen/wasm-smith/blob/main/fuzz/fuzz_targets/validate.rs)
+//! > defined in this repository. Using the `wasmparser` crate, it checks that
+//! > every module generated by `wasm-smith` validates successfully.
+//!
+//! ## Design
+//!
+//! The design and implementation strategy of wasm-smith is outlined in
+//! [this article](https://fitzgeraldnick.com/2020/08/24/writing-a-test-case-generator.html).
+
+#![deny(missing_docs, missing_debug_implementations)]
+// Needed for the `instructions!` macro in `src/code_builder.rs`.
+#![recursion_limit = "512"]
+
+mod component;
+mod config;
+mod core;
+
+pub use crate::core::{
+ ConfiguredModule, InstructionKind, InstructionKinds, MaybeInvalidModule, Module,
+};
+use arbitrary::{Result, Unstructured};
+pub use component::{Component, ConfiguredComponent};
+pub use config::{Config, DefaultConfig, SwarmConfig};
+use std::{collections::HashSet, fmt::Write, str};
+use wasmparser::types::{KebabStr, KebabString};
+
+/// Do something an arbitrary number of times.
+///
+/// The callback can return `false` to exit the loop early.
+pub(crate) fn arbitrary_loop<'a>(
+ u: &mut Unstructured<'a>,
+ min: usize,
+ max: usize,
+ mut f: impl FnMut(&mut Unstructured<'a>) -> Result<bool>,
+) -> Result<()> {
+ assert!(max >= min);
+ for _ in 0..min {
+ if !f(u)? {
+ return Err(arbitrary::Error::IncorrectFormat);
+ }
+ }
+ for _ in 0..(max - min) {
+ let keep_going = u.arbitrary().unwrap_or(false);
+ if !keep_going {
+ break;
+ }
+
+ if !f(u)? {
+ break;
+ }
+ }
+
+ Ok(())
+}
+
+// Mirror what happens in `Arbitrary for String`, but do so with a clamped size.
+pub(crate) fn limited_str<'a>(max_size: usize, u: &mut Unstructured<'a>) -> Result<&'a str> {
+ let size = u.arbitrary_len::<u8>()?;
+ let size = std::cmp::min(size, max_size);
+ match str::from_utf8(u.peek_bytes(size).unwrap()) {
+ Ok(s) => {
+ u.bytes(size).unwrap();
+ Ok(s)
+ }
+ Err(e) => {
+ let i = e.valid_up_to();
+ let valid = u.bytes(i).unwrap();
+ let s = unsafe {
+ debug_assert!(str::from_utf8(valid).is_ok());
+ str::from_utf8_unchecked(valid)
+ };
+ Ok(s)
+ }
+ }
+}
+
+pub(crate) fn limited_string(max_size: usize, u: &mut Unstructured) -> Result<String> {
+ Ok(limited_str(max_size, u)?.into())
+}
+
+pub(crate) fn unique_string(
+ max_size: usize,
+ names: &mut HashSet<String>,
+ u: &mut Unstructured,
+) -> Result<String> {
+ let mut name = limited_string(max_size, u)?;
+ while names.contains(&name) {
+ write!(&mut name, "{}", names.len()).unwrap();
+ }
+ names.insert(name.clone());
+ Ok(name)
+}
+
+pub(crate) fn unique_kebab_string(
+ max_size: usize,
+ names: &mut HashSet<KebabString>,
+ u: &mut Unstructured,
+) -> Result<KebabString> {
+ let size = std::cmp::min(u.arbitrary_len::<u8>()?, max_size);
+ let mut name = String::with_capacity(size);
+ let mut require_alpha = true;
+ for _ in 0..size {
+ name.push(match u.int_in_range::<u8>(0..=36)? {
+ x if (0..26).contains(&x) => {
+ require_alpha = false;
+ (b'a' + x) as char
+ }
+ x if (26..36).contains(&x) => {
+ if require_alpha {
+ require_alpha = false;
+ (b'a' + (x - 26)) as char
+ } else {
+ (b'0' + (x - 26)) as char
+ }
+ }
+ x if x == 36 => {
+ if require_alpha {
+ require_alpha = false;
+ 'a'
+ } else {
+ require_alpha = true;
+ '-'
+ }
+ }
+ _ => unreachable!(),
+ });
+ }
+
+ if name.is_empty() || name.ends_with('-') {
+ name.push('a');
+ }
+
+ while names.contains(KebabStr::new(&name).unwrap()) {
+ write!(&mut name, "{}", names.len()).unwrap();
+ }
+
+ let name = KebabString::new(name).unwrap();
+ names.insert(name.clone());
+
+ Ok(name)
+}
+
+pub(crate) fn unique_url(
+ max_size: usize,
+ names: &mut HashSet<KebabString>,
+ u: &mut Unstructured,
+) -> Result<String> {
+ let path = unique_kebab_string(max_size, names, u)?;
+ Ok(format!("https://example.com/{path}"))
+}
diff --git a/third_party/rust/wasm-smith/tests/component.rs b/third_party/rust/wasm-smith/tests/component.rs
new file mode 100644
index 0000000000..fe5d253f66
--- /dev/null
+++ b/third_party/rust/wasm-smith/tests/component.rs
@@ -0,0 +1,42 @@
+use arbitrary::{Arbitrary, Unstructured};
+use rand::{rngs::SmallRng, RngCore, SeedableRng};
+use wasm_smith::Component;
+
+#[test]
+fn smoke_test_component() {
+ const NUM_RUNS: usize = 4096;
+
+ let mut rng = SmallRng::seed_from_u64(0);
+ let mut buf = vec![0; 1024];
+ let mut ok_count = 0;
+
+ for _ in 0..NUM_RUNS {
+ rng.fill_bytes(&mut buf);
+ let u = Unstructured::new(&buf);
+ if let Ok(component) = Component::arbitrary_take_rest(u) {
+ ok_count += 1;
+ let component = component.to_bytes();
+
+ let mut validator =
+ wasmparser::Validator::new_with_features(wasmparser::WasmFeatures {
+ component_model: true,
+ ..Default::default()
+ });
+ if let Err(e) = validator.validate_all(&component) {
+ std::fs::write("component.wasm", &component).unwrap();
+ panic!(
+ "generated component should be valid; failing binary written \
+ to `component.wasm`. Error: {}",
+ e
+ );
+ }
+ }
+ }
+
+ println!(
+ "Generated {} / {} ({:.02}%) arbitrary components okay",
+ ok_count,
+ NUM_RUNS,
+ ok_count as f64 / NUM_RUNS as f64 * 100.0
+ );
+}
diff --git a/third_party/rust/wasm-smith/tests/core.rs b/third_party/rust/wasm-smith/tests/core.rs
new file mode 100644
index 0000000000..b177d8eea8
--- /dev/null
+++ b/third_party/rust/wasm-smith/tests/core.rs
@@ -0,0 +1,318 @@
+use arbitrary::{Arbitrary, Unstructured};
+use rand::{rngs::SmallRng, RngCore, SeedableRng};
+use std::collections::HashMap;
+use wasm_smith::{Config, ConfiguredModule, Module, SwarmConfig};
+use wasmparser::{Parser, TypeRef, ValType, Validator, WasmFeatures};
+
+#[test]
+fn smoke_test_module() {
+ let mut rng = SmallRng::seed_from_u64(0);
+ let mut buf = vec![0; 2048];
+ for _ in 0..1024 {
+ rng.fill_bytes(&mut buf);
+ let u = Unstructured::new(&buf);
+ if let Ok(module) = Module::arbitrary_take_rest(u) {
+ let wasm_bytes = module.to_bytes();
+
+ let mut validator = Validator::new_with_features(wasm_features());
+ validate(&mut validator, &wasm_bytes);
+ }
+ }
+}
+
+#[test]
+fn smoke_test_ensure_termination() {
+ let mut rng = SmallRng::seed_from_u64(0);
+ let mut buf = vec![0; 2048];
+ for _ in 0..1024 {
+ rng.fill_bytes(&mut buf);
+ let u = Unstructured::new(&buf);
+ if let Ok(mut module) = Module::arbitrary_take_rest(u) {
+ module.ensure_termination(10);
+ let wasm_bytes = module.to_bytes();
+
+ let mut validator = Validator::new_with_features(wasm_features());
+ validate(&mut validator, &wasm_bytes);
+ }
+ }
+}
+
+#[test]
+fn smoke_test_swarm_config() {
+ let mut rng = SmallRng::seed_from_u64(0);
+ let mut buf = vec![0; 2048];
+ for _ in 0..1024 {
+ rng.fill_bytes(&mut buf);
+ let u = Unstructured::new(&buf);
+ if let Ok(module) = ConfiguredModule::<SwarmConfig>::arbitrary_take_rest(u) {
+ let module = module.module;
+ let wasm_bytes = module.to_bytes();
+
+ let mut validator = Validator::new_with_features(wasm_features());
+ validate(&mut validator, &wasm_bytes);
+ }
+ }
+}
+
+#[test]
+fn multi_value_disabled() {
+ let mut rng = SmallRng::seed_from_u64(42);
+ let mut buf = vec![0; 2048];
+ for _ in 0..10 {
+ rng.fill_bytes(&mut buf);
+ let mut u = Unstructured::new(&buf);
+ let mut cfg = SwarmConfig::arbitrary(&mut u).unwrap();
+ cfg.multi_value_enabled = false;
+ if let Ok(module) = Module::new(cfg, &mut u) {
+ let wasm_bytes = module.to_bytes();
+ let mut features = wasm_features();
+ features.multi_value = false;
+ let mut validator = Validator::new_with_features(features);
+ validate(&mut validator, &wasm_bytes);
+ }
+ }
+}
+
+#[test]
+fn smoke_can_smith_valid_webassembly_one_point_oh() {
+ let mut rng = SmallRng::seed_from_u64(42);
+ let mut buf = vec![0; 10240];
+ for _ in 0..100 {
+ rng.fill_bytes(&mut buf);
+ let mut u = Unstructured::new(&buf);
+ let mut cfg = SwarmConfig::arbitrary(&mut u).unwrap();
+ cfg.sign_extension_enabled = false;
+ cfg.saturating_float_to_int_enabled = false;
+ cfg.reference_types_enabled = false;
+ cfg.multi_value_enabled = false;
+ cfg.bulk_memory_enabled = false;
+ cfg.simd_enabled = false;
+ cfg.relaxed_simd_enabled = false;
+ cfg.exceptions_enabled = false;
+ cfg.memory64_enabled = false;
+ cfg.max_memories = 1;
+ cfg.max_tables = 1;
+ let features = parser_features_from_config(&cfg);
+ if let Ok(module) = Module::new(cfg, &mut u) {
+ let wasm_bytes = module.to_bytes();
+ // This table should set to `true` only features specified in wasm-core-1 spec.
+ let mut validator = Validator::new_with_features(features);
+ validate(&mut validator, &wasm_bytes);
+ }
+ }
+}
+
+#[test]
+fn smoke_test_imports_config() {
+ let mut n_partial = 0;
+ let mut global_imports_seen = HashMap::<_, bool>::new();
+ let mut rng = SmallRng::seed_from_u64(11);
+ let mut buf = vec![0; 512];
+ for _ in 0..1024 {
+ rng.fill_bytes(&mut buf);
+
+ let mut u = Unstructured::new(&buf);
+ let (config, available) = import_config(&mut u);
+ let features = parser_features_from_config(&config);
+
+ if let Ok(module) = Module::new(config, &mut u) {
+ let wasm_bytes = module.to_bytes();
+ let mut validator = Validator::new_with_features(features);
+ validate(&mut validator, &wasm_bytes);
+ let mut imports_seen = available
+ .iter()
+ .map(|(m, f, t)| ((*m, *f), (false, t)))
+ .collect::<HashMap<_, _>>();
+ let mut sig_types = Vec::new();
+
+ for payload in Parser::new(0).parse_all(&wasm_bytes) {
+ let payload = payload.unwrap();
+ if let wasmparser::Payload::TypeSection(rdr) = payload {
+ // Gather the signature types to later check function types against.
+ for ty in rdr {
+ match ty.unwrap() {
+ wasmparser::Type::Func(ft) => sig_types.push(ft),
+ }
+ }
+ } else if let wasmparser::Payload::ImportSection(rdr) = payload {
+ // Read out imports, checking that they all are within the list of expected
+ // imports (i.e. we don't generate arbitrary ones), and that we handle the
+ // logic correctly (i.e. signature types are as expected)
+ for import in rdr {
+ let import = import.unwrap();
+ use AvailableImportKind as I;
+ let entry = imports_seen.get_mut(&(import.module, import.name));
+ match (entry, &import.ty) {
+ (Some((true, _)), _) => panic!("duplicate import of {:?}", import),
+ (Some((seen, I::Memory)), TypeRef::Memory(_)) => *seen = true,
+ (Some((seen, I::Global(t))), TypeRef::Global(gt))
+ if *t == gt.content_type =>
+ {
+ *seen = true
+ }
+ (Some((seen, I::Table(t))), TypeRef::Table(tt))
+ if *t == ValType::Ref(tt.element_type) =>
+ {
+ *seen = true
+ }
+ (Some((seen, I::Func(p, r))), TypeRef::Func(sig_idx))
+ if sig_types[*sig_idx as usize].params() == *p
+ && sig_types[*sig_idx as usize].results() == *r =>
+ {
+ *seen = true
+ }
+ (
+ Some((seen, I::Tag(p))),
+ TypeRef::Tag(wasmparser::TagType { func_type_idx, .. }),
+ ) if sig_types[*func_type_idx as usize].params() == *p
+ && sig_types[*func_type_idx as usize].results().is_empty() =>
+ {
+ *seen = true
+ }
+ (Some((_, expected)), _) => panic!(
+ "import {:?} type mismatch, expected: {:?}",
+ import, expected
+ ),
+ (None, _) => panic!("import of an unknown entity: {:?}", import),
+ }
+ }
+ }
+ }
+
+ // Verify that we have seen both instances with partial imports (i.e. we don't always
+ // just copy over all the imports from the example module) and also that we eventually
+ // observe all of the imports being used (i.e. selection is reasonably random)
+ for (m, f, _) in &available[..] {
+ let seen = imports_seen[&(*m, *f)];
+ let global_seen = global_imports_seen
+ .entry((m.to_string(), f.to_string()))
+ .or_default();
+ *global_seen |= seen.0;
+ }
+ if !imports_seen.values().all(|v| v.0) {
+ n_partial += 1;
+ }
+ }
+ }
+ assert!(global_imports_seen.values().all(|v| *v));
+ assert!(n_partial > 0);
+}
+
+#[test]
+fn smoke_test_no_trapping_mode() {
+ let mut rng = SmallRng::seed_from_u64(0);
+ let mut buf = vec![0; 2048];
+ for _ in 0..1024 {
+ rng.fill_bytes(&mut buf);
+ let mut u = Unstructured::new(&buf);
+ let mut cfg = SwarmConfig::arbitrary(&mut u).unwrap();
+ cfg.disallow_traps = true;
+ if let Ok(module) = Module::new(cfg, &mut u) {
+ let wasm_bytes = module.to_bytes();
+ let mut validator = Validator::new_with_features(wasm_features());
+ validate(&mut validator, &wasm_bytes);
+ }
+ }
+}
+
+fn wasm_features() -> WasmFeatures {
+ WasmFeatures {
+ multi_memory: true,
+ relaxed_simd: true,
+ memory64: true,
+ exceptions: true,
+ tail_call: true,
+ ..WasmFeatures::default()
+ }
+}
+
+#[derive(Debug)]
+enum AvailableImportKind {
+ Func(&'static [ValType], &'static [ValType]),
+ Tag(&'static [ValType]),
+ Global(ValType),
+ Table(ValType),
+ Memory,
+}
+
+fn import_config(
+ u: &mut Unstructured,
+) -> (
+ SwarmConfig,
+ Vec<(&'static str, &'static str, AvailableImportKind)>,
+) {
+ let mut config = SwarmConfig::arbitrary(u).expect("arbitrary swarm");
+ config.exceptions_enabled = u.arbitrary().expect("exceptions enabled for swarm");
+ let available = {
+ use {AvailableImportKind::*, ValType::*};
+ vec![
+ ("env", "pi", Func(&[I32], &[])),
+ ("env", "pi2", Func(&[I32], &[])),
+ ("env", "pipi2", Func(&[I32, I32], &[])),
+ ("env", "po", Func(&[], &[I32])),
+ ("env", "pipo", Func(&[I32], &[I32])),
+ ("env", "popo", Func(&[], &[I32, I32])),
+ ("env", "mem", Memory),
+ ("env", "tbl", Table(ValType::FUNCREF)),
+ ("vars", "g", Global(I64)),
+ ("tags", "tag1", Tag(&[I32])),
+ ]
+ };
+ config.available_imports = Some(
+ wat::parse_str(
+ r#"
+ (module
+ (import "env" "pi" (func (param i32)))
+ (import "env" "pi2" (func (param i32)))
+ (import "env" "pipi2" (func (param i32 i32)))
+ (import "env" "po" (func (result i32)))
+ (import "env" "pipo" (func (param i32) (result i32)))
+ (import "env" "popo" (func (result i32 i32)))
+ (import "env" "mem" (memory 1 16))
+ (import "env" "tbl" (table 1 16 funcref))
+ (import "vars" "g" (global i64))
+ (import "tags" "tag1" (tag (param i32)))
+ )
+ "#,
+ )
+ .unwrap()
+ .into(),
+ );
+ (config, available)
+}
+
+fn parser_features_from_config(config: &impl Config) -> WasmFeatures {
+ WasmFeatures {
+ mutable_global: true,
+ saturating_float_to_int: config.saturating_float_to_int_enabled(),
+ sign_extension: config.sign_extension_ops_enabled(),
+ reference_types: config.reference_types_enabled(),
+ multi_value: config.multi_value_enabled(),
+ bulk_memory: config.bulk_memory_enabled(),
+ simd: config.simd_enabled(),
+ relaxed_simd: config.relaxed_simd_enabled(),
+ multi_memory: config.max_memories() > 1,
+ exceptions: config.exceptions_enabled(),
+ memory64: config.memory64_enabled(),
+ tail_call: config.tail_call_enabled(),
+
+ threads: false,
+ floats: true,
+ extended_const: false,
+ component_model: false,
+ function_references: false,
+ memory_control: false,
+ }
+}
+
+fn validate(validator: &mut Validator, bytes: &[u8]) {
+ let err = match validator.validate_all(bytes) {
+ Ok(_) => return,
+ Err(e) => e,
+ };
+ drop(std::fs::write("test.wasm", &bytes));
+ if let Ok(text) = wasmprinter::print_bytes(bytes) {
+ drop(std::fs::write("test.wat", &text));
+ }
+ panic!("wasm failed to validate {:?}", err);
+}