summaryrefslogtreecommitdiffstats
path: root/third_party/rust/wasm-smith/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/wasm-smith/src
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/wasm-smith/src')
-rw-r--r--third_party/rust/wasm-smith/src/component.rs2178
-rw-r--r--third_party/rust/wasm-smith/src/component/encode.rs321
-rw-r--r--third_party/rust/wasm-smith/src/config.rs622
-rw-r--r--third_party/rust/wasm-smith/src/core.rs2404
-rw-r--r--third_party/rust/wasm-smith/src/core/code_builder.rs7058
-rw-r--r--third_party/rust/wasm-smith/src/core/code_builder/no_traps.rs644
-rw-r--r--third_party/rust/wasm-smith/src/core/encode.rs299
-rw-r--r--third_party/rust/wasm-smith/src/core/terminate.rs70
-rw-r--r--third_party/rust/wasm-smith/src/lib.rs192
9 files changed, 13788 insertions, 0 deletions
diff --git a/third_party/rust/wasm-smith/src/component.rs b/third_party/rust/wasm-smith/src/component.rs
new file mode 100644
index 0000000000..e18373b4f4
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/component.rs
@@ -0,0 +1,2178 @@
+//! Generation of Wasm
+//! [components](https://github.com/WebAssembly/component-model).
+
+// FIXME(#1000): component support in `wasm-smith` is a work in progress.
+#![allow(unused_variables, dead_code)]
+
+use crate::{arbitrary_loop, Config};
+use arbitrary::{Arbitrary, Result, Unstructured};
+use std::collections::BTreeMap;
+use std::convert::TryFrom;
+use std::{
+ collections::{HashMap, HashSet},
+ rc::Rc,
+};
+use wasm_encoder::{ComponentTypeRef, ComponentValType, PrimitiveValType, TypeBounds, ValType};
+
+mod encode;
+
+/// A pseudo-random WebAssembly [component].
+///
+/// Construct instances of this type with [the `Arbitrary`
+/// trait](https://docs.rs/arbitrary/*/arbitrary/trait.Arbitrary.html).
+///
+/// [component]: https://github.com/WebAssembly/component-model/blob/ast-and-binary/design/MVP/Explainer.md
+///
+/// ## Configured Generated Components
+///
+/// The `Arbitrary` implementation uses the [`Config::default()`][crate::Config]
+/// configuration. If you want to customize the shape of generated components,
+/// create your own [`Config`][crate::Config] instance and pass it to
+/// [`Component::new`][crate::Component::new].
+#[derive(Debug)]
+pub struct Component {
+ sections: Vec<Section>,
+}
+
+/// A builder to create a component (and possibly a whole tree of nested
+/// components).
+///
+/// Maintains a stack of components we are currently building, as well as
+/// metadata about them. The split between `Component` and `ComponentBuilder` is
+/// that the builder contains metadata that is purely used when generating
+/// components and is unnecessary after we are done generating the structure of
+/// the components and only need to encode an already-generated component to
+/// bytes.
+#[derive(Debug)]
+struct ComponentBuilder {
+ config: Config,
+
+ // The set of core `valtype`s that we are configured to generate.
+ core_valtypes: Vec<ValType>,
+
+ // Stack of types scopes that are currently available.
+ //
+ // There is an entry in this stack for each component, but there can also be
+ // additional entries for module/component/instance types, each of which
+ // have their own scope.
+ //
+ // This stack is always non-empty and the last entry is always the current
+ // scope.
+ //
+ // When a particular scope can alias outer types, it can alias from any
+ // scope that is older than it (i.e. `types_scope[i]` can alias from
+ // `types_scope[j]` when `j <= i`).
+ types: Vec<TypesScope>,
+
+ // The set of components we are currently building and their associated
+ // metadata.
+ components: Vec<ComponentContext>,
+
+ // Whether we are in the final bits of generating this component and we just
+ // need to ensure that the minimum number of entities configured have all
+ // been generated. This changes the behavior of various
+ // `arbitrary_<section>` methods to always fill in their minimums.
+ fill_minimums: bool,
+
+ // Our maximums for these entities are applied across the whole component
+ // tree, not per-component.
+ total_components: usize,
+ total_modules: usize,
+ total_instances: usize,
+ total_values: usize,
+}
+
+#[derive(Debug, Clone)]
+enum ComponentOrCoreFuncType {
+ Component(Rc<FuncType>),
+ Core(Rc<crate::core::FuncType>),
+}
+
+impl ComponentOrCoreFuncType {
+ fn as_core(&self) -> &Rc<crate::core::FuncType> {
+ match self {
+ ComponentOrCoreFuncType::Core(t) => t,
+ ComponentOrCoreFuncType::Component(_) => panic!("not a core func type"),
+ }
+ }
+
+ fn as_component(&self) -> &Rc<FuncType> {
+ match self {
+ ComponentOrCoreFuncType::Core(_) => panic!("not a component func type"),
+ ComponentOrCoreFuncType::Component(t) => t,
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+enum ComponentOrCoreInstanceType {
+ Component(Rc<InstanceType>),
+ Core(BTreeMap<String, crate::core::EntityType>),
+}
+
+/// Metadata (e.g. contents of various index spaces) we keep track of on a
+/// per-component basis.
+#[derive(Debug)]
+struct ComponentContext {
+ // The actual component itself.
+ component: Component,
+
+ // The number of imports we have generated thus far.
+ num_imports: usize,
+
+ // The set of names of imports we've generated thus far.
+ import_names: HashSet<String>,
+
+ // The set of URLs of imports we've generated thus far.
+ import_urls: HashSet<String>,
+
+ // This component's function index space.
+ funcs: Vec<ComponentOrCoreFuncType>,
+
+ // Which entries in `funcs` are component functions?
+ component_funcs: Vec<u32>,
+
+ // Which entries in `component_funcs` are component functions that only use scalar
+ // types?
+ scalar_component_funcs: Vec<u32>,
+
+ // Which entries in `funcs` are core Wasm functions?
+ //
+ // Note that a component can't import core functions, so these entries will
+ // never point to a `Section::Import`.
+ core_funcs: Vec<u32>,
+
+ // This component's component index space.
+ //
+ // An indirect list of all directly-nested (not transitive) components
+ // inside this component.
+ //
+ // Each entry is of the form `(i, j)` where `component.sections[i]` is
+ // guaranteed to be either
+ //
+ // * a `Section::Component` and we are referencing the component defined in
+ // that section (in this case `j` must also be `0`, since a component
+ // section can only contain a single nested component), or
+ //
+ // * a `Section::Import` and we are referencing the `j`th import in that
+ // section, which is guaranteed to be a component import.
+ components: Vec<(usize, usize)>,
+
+ // This component's module index space.
+ //
+ // An indirect list of all directly-nested (not transitive) modules
+ // inside this component.
+ //
+ // Each entry is of the form `(i, j)` where `component.sections[i]` is
+ // guaranteed to be either
+ //
+ // * a `Section::Core` and we are referencing the module defined in that
+ // section (in this case `j` must also be `0`, since a core section can
+ // only contain a single nested module), or
+ //
+ // * a `Section::Import` and we are referencing the `j`th import in that
+ // section, which is guaranteed to be a module import.
+ modules: Vec<(usize, usize)>,
+
+ // This component's instance index space.
+ instances: Vec<ComponentOrCoreInstanceType>,
+
+ // This component's value index space.
+ values: Vec<ComponentValType>,
+}
+
+impl ComponentContext {
+ fn empty() -> Self {
+ ComponentContext {
+ component: Component::empty(),
+ num_imports: 0,
+ import_names: HashSet::default(),
+ import_urls: HashSet::default(),
+ funcs: vec![],
+ component_funcs: vec![],
+ scalar_component_funcs: vec![],
+ core_funcs: vec![],
+ components: vec![],
+ modules: vec![],
+ instances: vec![],
+ values: vec![],
+ }
+ }
+
+ fn num_modules(&self) -> usize {
+ self.modules.len()
+ }
+
+ fn num_components(&self) -> usize {
+ self.components.len()
+ }
+
+ fn num_instances(&self) -> usize {
+ self.instances.len()
+ }
+
+ fn num_funcs(&self) -> usize {
+ self.funcs.len()
+ }
+
+ fn num_values(&self) -> usize {
+ self.values.len()
+ }
+}
+
+#[derive(Debug, Default)]
+struct TypesScope {
+ // All core types in this scope, regardless of kind.
+ core_types: Vec<Rc<CoreType>>,
+
+ // The indices of all the entries in `core_types` that are core function types.
+ core_func_types: Vec<u32>,
+
+ // The indices of all the entries in `core_types` that are module types.
+ module_types: Vec<u32>,
+
+ // All component types in this index space, regardless of kind.
+ types: Vec<Rc<Type>>,
+
+ // The indices of all the entries in `types` that are defined value types.
+ defined_types: Vec<u32>,
+
+ // The indices of all the entries in `types` that are func types.
+ func_types: Vec<u32>,
+
+ // A map from function types to their indices in the types space.
+ func_type_to_indices: HashMap<Rc<FuncType>, Vec<u32>>,
+
+ // The indices of all the entries in `types` that are component types.
+ component_types: Vec<u32>,
+
+ // The indices of all the entries in `types` that are instance types.
+ instance_types: Vec<u32>,
+}
+
+impl TypesScope {
+ fn push(&mut self, ty: Rc<Type>) -> u32 {
+ let ty_idx = u32::try_from(self.types.len()).unwrap();
+
+ let kind_list = match &*ty {
+ Type::Defined(_) => &mut self.defined_types,
+ Type::Func(func_ty) => {
+ self.func_type_to_indices
+ .entry(func_ty.clone())
+ .or_default()
+ .push(ty_idx);
+ &mut self.func_types
+ }
+ Type::Component(_) => &mut self.component_types,
+ Type::Instance(_) => &mut self.instance_types,
+ };
+ kind_list.push(ty_idx);
+
+ self.types.push(ty);
+ ty_idx
+ }
+
+ fn push_core(&mut self, ty: Rc<CoreType>) -> u32 {
+ let ty_idx = u32::try_from(self.core_types.len()).unwrap();
+
+ let kind_list = match &*ty {
+ CoreType::Func(_) => &mut self.core_func_types,
+ CoreType::Module(_) => &mut self.module_types,
+ };
+ kind_list.push(ty_idx);
+
+ self.core_types.push(ty);
+ ty_idx
+ }
+
+ fn get(&self, index: u32) -> &Rc<Type> {
+ &self.types[index as usize]
+ }
+
+ fn get_core(&self, index: u32) -> &Rc<CoreType> {
+ &self.core_types[index as usize]
+ }
+
+ fn get_func(&self, index: u32) -> &Rc<FuncType> {
+ match &**self.get(index) {
+ Type::Func(f) => f,
+ _ => panic!("get_func on non-function type"),
+ }
+ }
+
+ fn can_ref_type(&self) -> bool {
+ // All component types and core module types may be referenced
+ !self.types.is_empty() || !self.module_types.is_empty()
+ }
+}
+
+impl<'a> Arbitrary<'a> for Component {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ Component::new(Config::default(), u)
+ }
+}
+
+#[derive(Default)]
+struct EntityCounts {
+ globals: usize,
+ tables: usize,
+ memories: usize,
+ tags: usize,
+ funcs: usize,
+}
+
+impl Component {
+ /// Construct a new `Component` using the given configuration.
+ pub fn new(config: Config, u: &mut Unstructured) -> Result<Self> {
+ let mut builder = ComponentBuilder::new(config);
+ builder.build(u)
+ }
+
+ fn empty() -> Self {
+ Component { sections: vec![] }
+ }
+}
+
+#[must_use]
+enum Step {
+ Finished(Component),
+ StillBuilding,
+}
+
+impl Step {
+ fn unwrap_still_building(self) {
+ match self {
+ Step::Finished(_) => panic!(
+ "`Step::unwrap_still_building` called on a `Step` that is not `StillBuilding`"
+ ),
+ Step::StillBuilding => {}
+ }
+ }
+}
+
+impl ComponentBuilder {
+ fn new(config: Config) -> Self {
+ ComponentBuilder {
+ config,
+ core_valtypes: vec![],
+ types: vec![Default::default()],
+ components: vec![ComponentContext::empty()],
+ fill_minimums: false,
+ total_components: 0,
+ total_modules: 0,
+ total_instances: 0,
+ total_values: 0,
+ }
+ }
+
+ fn build(&mut self, u: &mut Unstructured) -> Result<Component> {
+ self.core_valtypes = crate::core::configured_valtypes(&self.config);
+
+ let mut choices: Vec<fn(&mut ComponentBuilder, &mut Unstructured) -> Result<Step>> = vec![];
+
+ loop {
+ choices.clear();
+ choices.push(Self::finish_component);
+
+ // Only add any choice other than "finish what we've generated thus
+ // far" when there is more arbitrary fuzzer data for us to consume.
+ if !u.is_empty() {
+ choices.push(Self::arbitrary_custom_section);
+
+ // NB: we add each section as a choice even if we've already
+ // generated our maximum number of entities in that section so that
+ // we can exercise adding empty sections to the end of the module.
+ choices.push(Self::arbitrary_core_type_section);
+ choices.push(Self::arbitrary_type_section);
+ choices.push(Self::arbitrary_import_section);
+ choices.push(Self::arbitrary_canonical_section);
+
+ if self.total_modules < self.config.max_modules {
+ choices.push(Self::arbitrary_core_module_section);
+ }
+
+ if self.components.len() < self.config.max_nesting_depth
+ && self.total_components < self.config.max_components
+ {
+ choices.push(Self::arbitrary_component_section);
+ }
+
+ // FIXME(#1000)
+ //
+ // choices.push(Self::arbitrary_instance_section);
+ // choices.push(Self::arbitrary_export_section);
+ // choices.push(Self::arbitrary_start_section);
+ // choices.push(Self::arbitrary_alias_section);
+ }
+
+ let f = u.choose(&choices)?;
+ match f(self, u)? {
+ Step::StillBuilding => {}
+ Step::Finished(component) => {
+ if self.components.is_empty() {
+ // If we just finished the root component, then return it.
+ return Ok(component);
+ } else {
+ // Otherwise, add it as a nested component in the parent.
+ self.push_section(Section::Component(component));
+ }
+ }
+ }
+ }
+ }
+
+ fn finish_component(&mut self, u: &mut Unstructured) -> Result<Step> {
+ // Ensure we've generated all of our minimums.
+ self.fill_minimums = true;
+ {
+ if self.current_type_scope().types.len() < self.config.min_types {
+ self.arbitrary_type_section(u)?.unwrap_still_building();
+ }
+ if self.component().num_imports < self.config.min_imports {
+ self.arbitrary_import_section(u)?.unwrap_still_building();
+ }
+ if self.component().funcs.len() < self.config.min_funcs {
+ self.arbitrary_canonical_section(u)?.unwrap_still_building();
+ }
+ }
+ self.fill_minimums = false;
+
+ self.types
+ .pop()
+ .expect("should have a types scope for the component we are finishing");
+ Ok(Step::Finished(self.components.pop().unwrap().component))
+ }
+
+ fn component(&self) -> &ComponentContext {
+ self.components.last().unwrap()
+ }
+
+ fn component_mut(&mut self) -> &mut ComponentContext {
+ self.components.last_mut().unwrap()
+ }
+
+ fn last_section(&self) -> Option<&Section> {
+ self.component().component.sections.last()
+ }
+
+ fn last_section_mut(&mut self) -> Option<&mut Section> {
+ self.component_mut().component.sections.last_mut()
+ }
+
+ fn push_section(&mut self, section: Section) {
+ self.component_mut().component.sections.push(section);
+ }
+
+ fn ensure_section(
+ &mut self,
+ mut predicate: impl FnMut(&Section) -> bool,
+ mut make_section: impl FnMut() -> Section,
+ ) -> &mut Section {
+ match self.last_section() {
+ Some(sec) if predicate(sec) => {}
+ _ => self.push_section(make_section()),
+ }
+ self.last_section_mut().unwrap()
+ }
+
+ fn arbitrary_custom_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::Custom(u.arbitrary()?));
+ Ok(Step::StillBuilding)
+ }
+
+ fn push_type(&mut self, ty: Rc<Type>) -> u32 {
+ match self.ensure_section(
+ |s| matches!(s, Section::Type(_)),
+ || Section::Type(TypeSection { types: vec![] }),
+ ) {
+ Section::Type(TypeSection { types }) => {
+ types.push(ty.clone());
+ self.current_type_scope_mut().push(ty)
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn push_core_type(&mut self, ty: Rc<CoreType>) -> u32 {
+ match self.ensure_section(
+ |s| matches!(s, Section::CoreType(_)),
+ || Section::CoreType(CoreTypeSection { types: vec![] }),
+ ) {
+ Section::CoreType(CoreTypeSection { types }) => {
+ types.push(ty.clone());
+ self.current_type_scope_mut().push_core(ty)
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_core_type_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::CoreType(CoreTypeSection { types: vec![] }));
+
+ let min = if self.fill_minimums {
+ self.config
+ .min_types
+ .saturating_sub(self.current_type_scope().types.len())
+ } else {
+ 0
+ };
+
+ let max = self.config.max_types - self.current_type_scope().types.len();
+
+ arbitrary_loop(u, min, max, |u| {
+ let mut type_fuel = self.config.max_type_size;
+ let ty = self.arbitrary_core_type(u, &mut type_fuel)?;
+ self.push_core_type(ty);
+ Ok(true)
+ })?;
+
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_core_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<CoreType>> {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(Rc::new(CoreType::Module(Rc::new(ModuleType::default()))));
+ }
+
+ let ty = match u.int_in_range::<u8>(0..=1)? {
+ 0 => CoreType::Func(crate::core::arbitrary_func_type(
+ u,
+ &self.config,
+ &self.core_valtypes,
+ if self.config.multi_value_enabled {
+ None
+ } else {
+ Some(1)
+ },
+ 0,
+ )?),
+ 1 => CoreType::Module(self.arbitrary_module_type(u, type_fuel)?),
+ _ => unreachable!(),
+ };
+ Ok(Rc::new(ty))
+ }
+
+ fn arbitrary_type_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::Type(TypeSection { types: vec![] }));
+
+ let min = if self.fill_minimums {
+ self.config
+ .min_types
+ .saturating_sub(self.current_type_scope().types.len())
+ } else {
+ 0
+ };
+
+ let max = self.config.max_types - self.current_type_scope().types.len();
+
+ arbitrary_loop(u, min, max, |u| {
+ let mut type_fuel = self.config.max_type_size;
+ let ty = self.arbitrary_type(u, &mut type_fuel)?;
+ self.push_type(ty);
+ Ok(true)
+ })?;
+
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_type_ref<'a>(
+ &self,
+ u: &mut Unstructured<'a>,
+ for_import: bool,
+ for_type_def: bool,
+ ) -> Result<Option<ComponentTypeRef>> {
+ let mut choices: Vec<fn(&Self, &mut Unstructured) -> Result<ComponentTypeRef>> = Vec::new();
+ let scope = self.current_type_scope();
+
+ if !scope.module_types.is_empty()
+ && (for_type_def || !for_import || self.total_modules < self.config.max_modules)
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Module(
+ *u.choose(&me.current_type_scope().module_types)?,
+ ))
+ });
+ }
+
+ // Types cannot be imported currently
+ if !for_import
+ && !scope.types.is_empty()
+ && (for_type_def || scope.types.len() < self.config.max_types)
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Type(TypeBounds::Eq(u.int_in_range(
+ 0..=u32::try_from(me.current_type_scope().types.len() - 1).unwrap(),
+ )?)))
+ });
+ }
+
+ // TODO: wasm-smith needs to ensure that every arbitrary value gets used exactly once.
+ // until that time, don't import values
+ // if for_type_def || !for_import || self.total_values < self.config.max_values() {
+ // choices.push(|me, u| Ok(ComponentTypeRef::Value(me.arbitrary_component_val_type(u)?)));
+ // }
+
+ if !scope.func_types.is_empty()
+ && (for_type_def || !for_import || self.component().num_funcs() < self.config.max_funcs)
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Func(
+ *u.choose(&me.current_type_scope().func_types)?,
+ ))
+ });
+ }
+
+ if !scope.component_types.is_empty()
+ && (for_type_def || !for_import || self.total_components < self.config.max_components)
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Component(
+ *u.choose(&me.current_type_scope().component_types)?,
+ ))
+ });
+ }
+
+ if !scope.instance_types.is_empty()
+ && (for_type_def || !for_import || self.total_instances < self.config.max_instances)
+ {
+ choices.push(|me, u| {
+ Ok(ComponentTypeRef::Instance(
+ *u.choose(&me.current_type_scope().instance_types)?,
+ ))
+ });
+ }
+
+ if choices.is_empty() {
+ return Ok(None);
+ }
+
+ let f = u.choose(&choices)?;
+ f(self, u).map(Option::Some)
+ }
+
+ fn arbitrary_type(&mut self, u: &mut Unstructured, type_fuel: &mut u32) -> Result<Rc<Type>> {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(Rc::new(Type::Defined(
+ self.arbitrary_defined_type(u, type_fuel)?,
+ )));
+ }
+
+ let ty = match u.int_in_range::<u8>(0..=3)? {
+ 0 => Type::Defined(self.arbitrary_defined_type(u, type_fuel)?),
+ 1 => Type::Func(self.arbitrary_func_type(u, type_fuel)?),
+ 2 => Type::Component(self.arbitrary_component_type(u, type_fuel)?),
+ 3 => Type::Instance(self.arbitrary_instance_type(u, type_fuel)?),
+ _ => unreachable!(),
+ };
+ Ok(Rc::new(ty))
+ }
+
+ fn arbitrary_module_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<ModuleType>> {
+ let mut defs = vec![];
+ let mut has_memory = false;
+ let mut has_canonical_abi_realloc = false;
+ let mut has_canonical_abi_free = false;
+ let mut types: Vec<Rc<crate::core::FuncType>> = vec![];
+ let mut imports = HashMap::new();
+ let mut exports = HashSet::new();
+ let mut counts = EntityCounts::default();
+
+ // Special case the canonical ABI functions since certain types can only
+ // be passed across the component boundary if they exist and
+ // randomly generating them is extremely unlikely.
+
+ // `memory`
+ if counts.memories < self.config.max_memories && u.ratio::<u8>(99, 100)? {
+ defs.push(ModuleTypeDef::Export(
+ "memory".into(),
+ crate::core::EntityType::Memory(self.arbitrary_core_memory_type(u)?),
+ ));
+ exports.insert("memory".into());
+ counts.memories += 1;
+ has_memory = true;
+ }
+
+ // `canonical_abi_realloc`
+ if counts.funcs < self.config.max_funcs
+ && types.len() < self.config.max_types
+ && u.ratio::<u8>(99, 100)?
+ {
+ let realloc_ty = Rc::new(crate::core::FuncType {
+ params: vec![ValType::I32, ValType::I32, ValType::I32, ValType::I32],
+ results: vec![ValType::I32],
+ });
+ let ty_idx = u32::try_from(types.len()).unwrap();
+ types.push(realloc_ty.clone());
+ defs.push(ModuleTypeDef::TypeDef(crate::core::CompositeType::Func(
+ realloc_ty.clone(),
+ )));
+ defs.push(ModuleTypeDef::Export(
+ "canonical_abi_realloc".into(),
+ crate::core::EntityType::Func(ty_idx, realloc_ty),
+ ));
+ exports.insert("canonical_abi_realloc".into());
+ counts.funcs += 1;
+ has_canonical_abi_realloc = true;
+ }
+
+ // `canonical_abi_free`
+ if counts.funcs < self.config.max_funcs
+ && types.len() < self.config.max_types
+ && u.ratio::<u8>(99, 100)?
+ {
+ let free_ty = Rc::new(crate::core::FuncType {
+ params: vec![ValType::I32, ValType::I32, ValType::I32],
+ results: vec![],
+ });
+ let ty_idx = u32::try_from(types.len()).unwrap();
+ types.push(free_ty.clone());
+ defs.push(ModuleTypeDef::TypeDef(crate::core::CompositeType::Func(
+ free_ty.clone(),
+ )));
+ defs.push(ModuleTypeDef::Export(
+ "canonical_abi_free".into(),
+ crate::core::EntityType::Func(ty_idx, free_ty),
+ ));
+ exports.insert("canonical_abi_free".into());
+ counts.funcs += 1;
+ has_canonical_abi_free = true;
+ }
+
+ let mut entity_choices: Vec<
+ fn(
+ &ComponentBuilder,
+ &mut Unstructured,
+ &mut EntityCounts,
+ &[Rc<crate::core::FuncType>],
+ ) -> Result<crate::core::EntityType>,
+ > = Vec::with_capacity(5);
+
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ let max_choice = if types.len() < self.config.max_types {
+ // Check if the parent scope has core function types to alias
+ if !types.is_empty()
+ || (!self.types.is_empty()
+ && !self.types.last().unwrap().core_func_types.is_empty())
+ {
+ // Imports, exports, types, and aliases
+ 3
+ } else {
+ // Imports, exports, and types
+ 2
+ }
+ } else {
+ // Imports and exports
+ 1
+ };
+
+ match u.int_in_range::<u8>(0..=max_choice)? {
+ // Import.
+ 0 => {
+ let module = crate::limited_string(100, u)?;
+ let existing_module_imports = imports.entry(module.clone()).or_default();
+ let field = crate::unique_string(100, existing_module_imports, u)?;
+ let entity_type = match self.arbitrary_core_entity_type(
+ u,
+ &types,
+ &mut entity_choices,
+ &mut counts,
+ )? {
+ None => return Ok(false),
+ Some(x) => x,
+ };
+ defs.push(ModuleTypeDef::Import(crate::core::Import {
+ module,
+ field,
+ entity_type,
+ }));
+ }
+
+ // Export.
+ 1 => {
+ let name = crate::unique_string(100, &mut exports, u)?;
+ let entity_ty = match self.arbitrary_core_entity_type(
+ u,
+ &types,
+ &mut entity_choices,
+ &mut counts,
+ )? {
+ None => return Ok(false),
+ Some(x) => x,
+ };
+ defs.push(ModuleTypeDef::Export(name, entity_ty));
+ }
+
+ // Type definition.
+ 2 => {
+ let ty = crate::core::arbitrary_func_type(
+ u,
+ &self.config,
+ &self.core_valtypes,
+ if self.config.multi_value_enabled {
+ None
+ } else {
+ Some(1)
+ },
+ 0,
+ )?;
+ types.push(ty.clone());
+ defs.push(ModuleTypeDef::TypeDef(crate::core::CompositeType::Func(ty)));
+ }
+
+ // Alias
+ 3 => {
+ let (count, index, kind) = self.arbitrary_outer_core_type_alias(u, &types)?;
+ let ty = match &kind {
+ CoreOuterAliasKind::Type(ty) => ty.clone(),
+ };
+ types.push(ty);
+ defs.push(ModuleTypeDef::OuterAlias {
+ count,
+ i: index,
+ kind,
+ });
+ }
+
+ _ => unreachable!(),
+ }
+
+ Ok(true)
+ })?;
+
+ Ok(Rc::new(ModuleType {
+ defs,
+ has_memory,
+ has_canonical_abi_realloc,
+ has_canonical_abi_free,
+ }))
+ }
+
+ fn arbitrary_core_entity_type(
+ &self,
+ u: &mut Unstructured,
+ types: &[Rc<crate::core::FuncType>],
+ choices: &mut Vec<
+ fn(
+ &ComponentBuilder,
+ &mut Unstructured,
+ &mut EntityCounts,
+ &[Rc<crate::core::FuncType>],
+ ) -> Result<crate::core::EntityType>,
+ >,
+ counts: &mut EntityCounts,
+ ) -> Result<Option<crate::core::EntityType>> {
+ choices.clear();
+
+ if counts.globals < self.config.max_globals {
+ choices.push(|c, u, counts, _types| {
+ counts.globals += 1;
+ Ok(crate::core::EntityType::Global(
+ c.arbitrary_core_global_type(u)?,
+ ))
+ });
+ }
+
+ if counts.tables < self.config.max_tables {
+ choices.push(|c, u, counts, _types| {
+ counts.tables += 1;
+ Ok(crate::core::EntityType::Table(
+ c.arbitrary_core_table_type(u)?,
+ ))
+ });
+ }
+
+ if counts.memories < self.config.max_memories {
+ choices.push(|c, u, counts, _types| {
+ counts.memories += 1;
+ Ok(crate::core::EntityType::Memory(
+ c.arbitrary_core_memory_type(u)?,
+ ))
+ });
+ }
+
+ if types.iter().any(|ty| ty.results.is_empty())
+ && self.config.exceptions_enabled
+ && counts.tags < self.config.max_tags
+ {
+ choices.push(|c, u, counts, types| {
+ counts.tags += 1;
+ let tag_func_types = types
+ .iter()
+ .enumerate()
+ .filter(|(_, ty)| ty.results.is_empty())
+ .map(|(i, _)| u32::try_from(i).unwrap())
+ .collect::<Vec<_>>();
+ Ok(crate::core::EntityType::Tag(
+ crate::core::arbitrary_tag_type(u, &tag_func_types, |idx| {
+ types[usize::try_from(idx).unwrap()].clone()
+ })?,
+ ))
+ });
+ }
+
+ if !types.is_empty() && counts.funcs < self.config.max_funcs {
+ choices.push(|c, u, counts, types| {
+ counts.funcs += 1;
+ let ty_idx = u.int_in_range(0..=u32::try_from(types.len() - 1).unwrap())?;
+ let ty = types[ty_idx as usize].clone();
+ Ok(crate::core::EntityType::Func(ty_idx, ty))
+ });
+ }
+
+ if choices.is_empty() {
+ return Ok(None);
+ }
+
+ let f = u.choose(choices)?;
+ let ty = f(self, u, counts, types)?;
+ Ok(Some(ty))
+ }
+
+ fn arbitrary_core_valtype(&self, u: &mut Unstructured) -> Result<ValType> {
+ Ok(*u.choose(&self.core_valtypes)?)
+ }
+
+ fn arbitrary_core_global_type(&self, u: &mut Unstructured) -> Result<crate::core::GlobalType> {
+ Ok(crate::core::GlobalType {
+ val_type: self.arbitrary_core_valtype(u)?,
+ mutable: u.arbitrary()?,
+ })
+ }
+
+ fn arbitrary_core_table_type(&self, u: &mut Unstructured) -> Result<crate::core::TableType> {
+ crate::core::arbitrary_table_type(u, &self.config)
+ }
+
+ fn arbitrary_core_memory_type(&self, u: &mut Unstructured) -> Result<crate::core::MemoryType> {
+ crate::core::arbitrary_memtype(u, &self.config)
+ }
+
+ fn with_types_scope<T>(&mut self, f: impl FnOnce(&mut Self) -> Result<T>) -> Result<T> {
+ self.types.push(Default::default());
+ let result = f(self);
+ self.types.pop();
+ result
+ }
+
+ fn current_type_scope(&self) -> &TypesScope {
+ self.types.last().unwrap()
+ }
+
+ fn current_type_scope_mut(&mut self) -> &mut TypesScope {
+ self.types.last_mut().unwrap()
+ }
+
+ fn outer_types_scope(&self, count: u32) -> &TypesScope {
+ &self.types[self.types.len() - 1 - usize::try_from(count).unwrap()]
+ }
+
+ fn outer_type(&self, count: u32, i: u32) -> &Rc<Type> {
+ &self.outer_types_scope(count).types[usize::try_from(i).unwrap()]
+ }
+
+ fn arbitrary_component_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<ComponentType>> {
+ let mut defs = vec![];
+ let mut imports = HashSet::new();
+ let mut import_urls = HashSet::new();
+ let mut exports = HashSet::new();
+ let mut export_urls = HashSet::new();
+
+ self.with_types_scope(|me| {
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ if me.current_type_scope().can_ref_type() && u.int_in_range::<u8>(0..=3)? == 0 {
+ if let Some(ty) = me.arbitrary_type_ref(u, true, true)? {
+ // Imports.
+ let name = crate::unique_kebab_string(100, &mut imports, u)?;
+ let url = if u.arbitrary()? {
+ Some(crate::unique_url(100, &mut import_urls, u)?)
+ } else {
+ None
+ };
+ defs.push(ComponentTypeDef::Import(Import { name, url, ty }));
+ return Ok(true);
+ }
+
+ // Can't reference an arbitrary type, fallback to another definition.
+ }
+
+ // Type definitions, exports, and aliases.
+ let def =
+ me.arbitrary_instance_type_def(u, &mut exports, &mut export_urls, type_fuel)?;
+ defs.push(def.into());
+ Ok(true)
+ })
+ })?;
+
+ Ok(Rc::new(ComponentType { defs }))
+ }
+
+ fn arbitrary_instance_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<InstanceType>> {
+ let mut defs = vec![];
+ let mut exports = HashSet::new();
+ let mut export_urls = HashSet::new();
+
+ self.with_types_scope(|me| {
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ defs.push(me.arbitrary_instance_type_def(
+ u,
+ &mut exports,
+ &mut export_urls,
+ type_fuel,
+ )?);
+ Ok(true)
+ })
+ })?;
+
+ Ok(Rc::new(InstanceType { defs }))
+ }
+
+ fn arbitrary_instance_type_def(
+ &mut self,
+ u: &mut Unstructured,
+ exports: &mut HashSet<String>,
+ export_urls: &mut HashSet<String>,
+ type_fuel: &mut u32,
+ ) -> Result<InstanceTypeDecl> {
+ let mut choices: Vec<
+ fn(
+ &mut ComponentBuilder,
+ &mut HashSet<String>,
+ &mut HashSet<String>,
+ &mut Unstructured,
+ &mut u32,
+ ) -> Result<InstanceTypeDecl>,
+ > = Vec::with_capacity(3);
+
+ // Export.
+ if self.current_type_scope().can_ref_type() {
+ choices.push(|me, exports, export_urls, u, _type_fuel| {
+ let ty = me.arbitrary_type_ref(u, false, true)?.unwrap();
+ if let ComponentTypeRef::Type(TypeBounds::Eq(idx)) = ty {
+ let ty = me.current_type_scope().get(idx).clone();
+ me.current_type_scope_mut().push(ty);
+ }
+ Ok(InstanceTypeDecl::Export {
+ name: crate::unique_kebab_string(100, exports, u)?,
+ url: if u.arbitrary()? {
+ Some(crate::unique_url(100, export_urls, u)?)
+ } else {
+ None
+ },
+ ty,
+ })
+ });
+ }
+
+ // Outer type alias.
+ if self
+ .types
+ .iter()
+ .any(|scope| !scope.types.is_empty() || !scope.core_types.is_empty())
+ {
+ choices.push(|me, _exports, _export_urls, u, _type_fuel| {
+ let alias = me.arbitrary_outer_type_alias(u)?;
+ match &alias {
+ Alias::Outer {
+ kind: OuterAliasKind::Type(ty),
+ ..
+ } => me.current_type_scope_mut().push(ty.clone()),
+ Alias::Outer {
+ kind: OuterAliasKind::CoreType(ty),
+ ..
+ } => me.current_type_scope_mut().push_core(ty.clone()),
+ _ => unreachable!(),
+ };
+ Ok(InstanceTypeDecl::Alias(alias))
+ });
+ }
+
+ // Core type definition.
+ choices.push(|me, _exports, _export_urls, u, type_fuel| {
+ let ty = me.arbitrary_core_type(u, type_fuel)?;
+ me.current_type_scope_mut().push_core(ty.clone());
+ Ok(InstanceTypeDecl::CoreType(ty))
+ });
+
+ // Type definition.
+ if self.types.len() < self.config.max_nesting_depth {
+ choices.push(|me, _exports, _export_urls, u, type_fuel| {
+ let ty = me.arbitrary_type(u, type_fuel)?;
+ me.current_type_scope_mut().push(ty.clone());
+ Ok(InstanceTypeDecl::Type(ty))
+ });
+ }
+
+ let f = u.choose(&choices)?;
+ f(self, exports, export_urls, u, type_fuel)
+ }
+
+ fn arbitrary_outer_core_type_alias(
+ &self,
+ u: &mut Unstructured,
+ local_types: &[Rc<crate::core::FuncType>],
+ ) -> Result<(u32, u32, CoreOuterAliasKind)> {
+ let enclosing_type_len = if !self.types.is_empty() {
+ self.types.last().unwrap().core_func_types.len()
+ } else {
+ 0
+ };
+
+ assert!(!local_types.is_empty() || enclosing_type_len > 0);
+
+ let max = enclosing_type_len + local_types.len() - 1;
+ let i = u.int_in_range(0..=max)?;
+ let (count, index, ty) = if i < enclosing_type_len {
+ let enclosing = self.types.last().unwrap();
+ let index = enclosing.core_func_types[i];
+ (
+ 1,
+ index,
+ match enclosing.get_core(index).as_ref() {
+ CoreType::Func(ty) => ty.clone(),
+ CoreType::Module(_) => unreachable!(),
+ },
+ )
+ } else if i - enclosing_type_len < local_types.len() {
+ let i = i - enclosing_type_len;
+ (0, u32::try_from(i).unwrap(), local_types[i].clone())
+ } else {
+ unreachable!()
+ };
+
+ Ok((count, index, CoreOuterAliasKind::Type(ty)))
+ }
+
+ fn arbitrary_outer_type_alias(&self, u: &mut Unstructured) -> Result<Alias> {
+ let non_empty_types_scopes: Vec<_> = self
+ .types
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, scope)| !scope.types.is_empty() || !scope.core_types.is_empty())
+ .collect();
+ assert!(
+ !non_empty_types_scopes.is_empty(),
+ "precondition: there are non-empty types scopes"
+ );
+
+ let (count, scope) = u.choose(&non_empty_types_scopes)?;
+ let count = u32::try_from(*count).unwrap();
+ assert!(!scope.types.is_empty() || !scope.core_types.is_empty());
+
+ let max_type_in_scope = scope.types.len() + scope.core_types.len() - 1;
+ let i = u.int_in_range(0..=max_type_in_scope)?;
+
+ let (i, kind) = if i < scope.types.len() {
+ let i = u32::try_from(i).unwrap();
+ (i, OuterAliasKind::Type(Rc::clone(scope.get(i))))
+ } else if i - scope.types.len() < scope.core_types.len() {
+ let i = u32::try_from(i - scope.types.len()).unwrap();
+ (i, OuterAliasKind::CoreType(Rc::clone(scope.get_core(i))))
+ } else {
+ unreachable!()
+ };
+
+ Ok(Alias::Outer { count, i, kind })
+ }
+
+ fn arbitrary_func_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<Rc<FuncType>> {
+ let mut params = Vec::new();
+ let mut results = Vec::new();
+ let mut names = HashSet::new();
+
+ // Note: parameters are currently limited to a maximum of 16
+ // because any additional parameters will require indirect access
+ // via a pointer argument; when this occurs, validation of any
+ // lowered function will fail because it will be missing a
+ // memory option (not yet implemented).
+ //
+ // When options are correctly specified on canonical functions,
+ // we should increase this maximum to test indirect parameter
+ // passing.
+ arbitrary_loop(u, 0, 16, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ let name = crate::unique_kebab_string(100, &mut names, u)?;
+ let ty = self.arbitrary_component_val_type(u)?;
+
+ params.push((name, ty));
+
+ Ok(true)
+ })?;
+
+ names.clear();
+
+ // Likewise, the limit for results is 1 before the memory option is
+ // required. When the memory option is implemented, this restriction
+ // should be relaxed.
+ arbitrary_loop(u, 0, 1, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ // If the result list is empty (i.e. first push), then arbitrarily give
+ // the result a name. Otherwise, all of the subsequent items must be named.
+ let name = if results.is_empty() {
+ // Most of the time we should have a single, unnamed result.
+ u.ratio::<u8>(10, 100)?
+ .then(|| crate::unique_kebab_string(100, &mut names, u))
+ .transpose()?
+ } else {
+ Some(crate::unique_kebab_string(100, &mut names, u)?)
+ };
+
+ let ty = self.arbitrary_component_val_type(u)?;
+
+ results.push((name, ty));
+
+ // There can be only one unnamed result.
+ if results.len() == 1 && results[0].0.is_none() {
+ return Ok(false);
+ }
+
+ Ok(true)
+ })?;
+
+ Ok(Rc::new(FuncType { params, results }))
+ }
+
+ fn arbitrary_component_val_type(&self, u: &mut Unstructured) -> Result<ComponentValType> {
+ let max_choices = if self.current_type_scope().defined_types.is_empty() {
+ 0
+ } else {
+ 1
+ };
+ match u.int_in_range(0..=max_choices)? {
+ 0 => Ok(ComponentValType::Primitive(
+ self.arbitrary_primitive_val_type(u)?,
+ )),
+ 1 => {
+ let index = *u.choose(&self.current_type_scope().defined_types)?;
+ let ty = Rc::clone(self.current_type_scope().get(index));
+ Ok(ComponentValType::Type(index))
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_primitive_val_type(&self, u: &mut Unstructured) -> Result<PrimitiveValType> {
+ match u.int_in_range(0..=12)? {
+ 0 => Ok(PrimitiveValType::Bool),
+ 1 => Ok(PrimitiveValType::S8),
+ 2 => Ok(PrimitiveValType::U8),
+ 3 => Ok(PrimitiveValType::S16),
+ 4 => Ok(PrimitiveValType::U16),
+ 5 => Ok(PrimitiveValType::S32),
+ 6 => Ok(PrimitiveValType::U32),
+ 7 => Ok(PrimitiveValType::S64),
+ 8 => Ok(PrimitiveValType::U64),
+ 9 => Ok(PrimitiveValType::Float32),
+ 10 => Ok(PrimitiveValType::Float64),
+ 11 => Ok(PrimitiveValType::Char),
+ 12 => Ok(PrimitiveValType::String),
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_record_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<RecordType> {
+ let mut fields = vec![];
+ let mut field_names = HashSet::new();
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ let name = crate::unique_kebab_string(100, &mut field_names, u)?;
+ let ty = self.arbitrary_component_val_type(u)?;
+
+ fields.push((name, ty));
+ Ok(true)
+ })?;
+ Ok(RecordType { fields })
+ }
+
+ fn arbitrary_variant_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<VariantType> {
+ let mut cases = vec![];
+ let mut case_names = HashSet::new();
+ arbitrary_loop(u, 1, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ let name = crate::unique_kebab_string(100, &mut case_names, u)?;
+
+ let ty = u
+ .arbitrary::<bool>()?
+ .then(|| self.arbitrary_component_val_type(u))
+ .transpose()?;
+
+ let refines = if !cases.is_empty() && u.arbitrary()? {
+ let max_cases = u32::try_from(cases.len() - 1).unwrap();
+ Some(u.int_in_range(0..=max_cases)?)
+ } else {
+ None
+ };
+
+ cases.push((name, ty, refines));
+ Ok(true)
+ })?;
+
+ Ok(VariantType { cases })
+ }
+
+ fn arbitrary_list_type(&self, u: &mut Unstructured) -> Result<ListType> {
+ Ok(ListType {
+ elem_ty: self.arbitrary_component_val_type(u)?,
+ })
+ }
+
+ fn arbitrary_tuple_type(&self, u: &mut Unstructured, type_fuel: &mut u32) -> Result<TupleType> {
+ let mut fields = vec![];
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ fields.push(self.arbitrary_component_val_type(u)?);
+ Ok(true)
+ })?;
+ Ok(TupleType { fields })
+ }
+
+ fn arbitrary_flags_type(&self, u: &mut Unstructured, type_fuel: &mut u32) -> Result<FlagsType> {
+ let mut fields = vec![];
+ let mut field_names = HashSet::new();
+ arbitrary_loop(u, 0, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ fields.push(crate::unique_kebab_string(100, &mut field_names, u)?);
+ Ok(true)
+ })?;
+ Ok(FlagsType { fields })
+ }
+
+ fn arbitrary_enum_type(&self, u: &mut Unstructured, type_fuel: &mut u32) -> Result<EnumType> {
+ let mut variants = vec![];
+ let mut variant_names = HashSet::new();
+ arbitrary_loop(u, 1, 100, |u| {
+ *type_fuel = type_fuel.saturating_sub(1);
+ if *type_fuel == 0 {
+ return Ok(false);
+ }
+
+ variants.push(crate::unique_kebab_string(100, &mut variant_names, u)?);
+ Ok(true)
+ })?;
+ Ok(EnumType { variants })
+ }
+
+ fn arbitrary_option_type(&self, u: &mut Unstructured) -> Result<OptionType> {
+ Ok(OptionType {
+ inner_ty: self.arbitrary_component_val_type(u)?,
+ })
+ }
+
+ fn arbitrary_result_type(&self, u: &mut Unstructured) -> Result<ResultType> {
+ Ok(ResultType {
+ ok_ty: u
+ .arbitrary::<bool>()?
+ .then(|| self.arbitrary_component_val_type(u))
+ .transpose()?,
+ err_ty: u
+ .arbitrary::<bool>()?
+ .then(|| self.arbitrary_component_val_type(u))
+ .transpose()?,
+ })
+ }
+
+ fn arbitrary_defined_type(
+ &self,
+ u: &mut Unstructured,
+ type_fuel: &mut u32,
+ ) -> Result<DefinedType> {
+ match u.int_in_range(0..=8)? {
+ 0 => Ok(DefinedType::Primitive(
+ self.arbitrary_primitive_val_type(u)?,
+ )),
+ 1 => Ok(DefinedType::Record(
+ self.arbitrary_record_type(u, type_fuel)?,
+ )),
+ 2 => Ok(DefinedType::Variant(
+ self.arbitrary_variant_type(u, type_fuel)?,
+ )),
+ 3 => Ok(DefinedType::List(self.arbitrary_list_type(u)?)),
+ 4 => Ok(DefinedType::Tuple(self.arbitrary_tuple_type(u, type_fuel)?)),
+ 5 => Ok(DefinedType::Flags(self.arbitrary_flags_type(u, type_fuel)?)),
+ 6 => Ok(DefinedType::Enum(self.arbitrary_enum_type(u, type_fuel)?)),
+ 7 => Ok(DefinedType::Option(self.arbitrary_option_type(u)?)),
+ 8 => Ok(DefinedType::Result(self.arbitrary_result_type(u)?)),
+ _ => unreachable!(),
+ }
+ }
+
+ fn push_import(&mut self, name: String, url: Option<String>, ty: ComponentTypeRef) {
+ let nth = match self.ensure_section(
+ |sec| matches!(sec, Section::Import(_)),
+ || Section::Import(ImportSection { imports: vec![] }),
+ ) {
+ Section::Import(sec) => {
+ sec.imports.push(Import { name, url, ty });
+ sec.imports.len() - 1
+ }
+ _ => unreachable!(),
+ };
+ let section_index = self.component().component.sections.len() - 1;
+
+ match ty {
+ ComponentTypeRef::Module(_) => {
+ self.total_modules += 1;
+ self.component_mut().modules.push((section_index, nth));
+ }
+ ComponentTypeRef::Func(ty_index) => {
+ let func_ty = match self.current_type_scope().get(ty_index).as_ref() {
+ Type::Func(ty) => ty.clone(),
+ _ => unreachable!(),
+ };
+
+ if func_ty.is_scalar() {
+ let func_index = u32::try_from(self.component().component_funcs.len()).unwrap();
+ self.component_mut().scalar_component_funcs.push(func_index);
+ }
+
+ let func_index = u32::try_from(self.component().funcs.len()).unwrap();
+ self.component_mut()
+ .funcs
+ .push(ComponentOrCoreFuncType::Component(func_ty));
+
+ self.component_mut().component_funcs.push(func_index);
+ }
+ ComponentTypeRef::Value(ty) => {
+ self.total_values += 1;
+ self.component_mut().values.push(ty);
+ }
+ ComponentTypeRef::Type(TypeBounds::Eq(ty_index)) => {
+ let ty = self.current_type_scope().get(ty_index).clone();
+ self.current_type_scope_mut().push(ty);
+ }
+ ComponentTypeRef::Type(TypeBounds::SubResource) => {
+ unimplemented!()
+ }
+ ComponentTypeRef::Instance(ty_index) => {
+ let instance_ty = match self.current_type_scope().get(ty_index).as_ref() {
+ Type::Instance(ty) => ty.clone(),
+ _ => unreachable!(),
+ };
+
+ self.total_instances += 1;
+ self.component_mut()
+ .instances
+ .push(ComponentOrCoreInstanceType::Component(instance_ty));
+ }
+ ComponentTypeRef::Component(_) => {
+ self.total_components += 1;
+ self.component_mut().components.push((section_index, nth));
+ }
+ }
+ }
+
+ fn core_function_type(&self, core_func_index: u32) -> &Rc<crate::core::FuncType> {
+ self.component().funcs[self.component().core_funcs[core_func_index as usize] as usize]
+ .as_core()
+ }
+
+ fn component_function_type(&self, func_index: u32) -> &Rc<FuncType> {
+ self.component().funcs[self.component().component_funcs[func_index as usize] as usize]
+ .as_component()
+ }
+
+ fn push_func(&mut self, func: Func) {
+ let nth = match self.component_mut().component.sections.last_mut() {
+ Some(Section::Canonical(CanonicalSection { funcs })) => funcs.len(),
+ _ => {
+ self.push_section(Section::Canonical(CanonicalSection { funcs: vec![] }));
+ 0
+ }
+ };
+ let section_index = self.component().component.sections.len() - 1;
+
+ let func_index = u32::try_from(self.component().funcs.len()).unwrap();
+
+ let ty = match &func {
+ Func::CanonLift { func_ty, .. } => {
+ let ty = Rc::clone(self.current_type_scope().get_func(*func_ty));
+ if ty.is_scalar() {
+ let func_index = u32::try_from(self.component().component_funcs.len()).unwrap();
+ self.component_mut().scalar_component_funcs.push(func_index);
+ }
+ self.component_mut().component_funcs.push(func_index);
+ ComponentOrCoreFuncType::Component(ty)
+ }
+ Func::CanonLower {
+ func_index: comp_func_index,
+ ..
+ } => {
+ let comp_func_ty = self.component_function_type(*comp_func_index);
+ let core_func_ty = canonical_abi_for(comp_func_ty);
+ self.component_mut().core_funcs.push(func_index);
+ ComponentOrCoreFuncType::Core(core_func_ty)
+ }
+ };
+
+ self.component_mut().funcs.push(ty);
+
+ match self.component_mut().component.sections.last_mut() {
+ Some(Section::Canonical(CanonicalSection { funcs })) => funcs.push(func),
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_import_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::Import(ImportSection { imports: vec![] }));
+
+ let min = if self.fill_minimums {
+ self.config
+ .min_imports
+ .saturating_sub(self.component().num_imports)
+ } else {
+ // Allow generating empty sections. We can always fill in the required
+ // minimum later.
+ 0
+ };
+ let max = self.config.max_imports - self.component().num_imports;
+
+ crate::arbitrary_loop(u, min, max, |u| {
+ match self.arbitrary_type_ref(u, true, false)? {
+ Some(ty) => {
+ let name =
+ crate::unique_kebab_string(100, &mut self.component_mut().import_names, u)?;
+ let url = if u.arbitrary()? {
+ Some(crate::unique_url(
+ 100,
+ &mut self.component_mut().import_urls,
+ u,
+ )?)
+ } else {
+ None
+ };
+ self.push_import(name, url, ty);
+ Ok(true)
+ }
+ None => Ok(false),
+ }
+ })?;
+
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_canonical_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.push_section(Section::Canonical(CanonicalSection { funcs: vec![] }));
+
+ let min = if self.fill_minimums {
+ self.config
+ .min_funcs
+ .saturating_sub(self.component().funcs.len())
+ } else {
+ // Allow generating empty sections. We can always fill in the
+ // required minimum later.
+ 0
+ };
+ let max = self.config.max_funcs - self.component().funcs.len();
+
+ let mut choices: Vec<fn(&mut Unstructured, &mut ComponentBuilder) -> Result<Option<Func>>> =
+ Vec::with_capacity(2);
+
+ crate::arbitrary_loop(u, min, max, |u| {
+ choices.clear();
+
+ // NB: We only lift/lower scalar component functions.
+ //
+ // If we generated lifting and lowering of compound value types,
+ // the probability of generating a corresponding Wasm module that
+ // generates valid instances of the compound value types would
+ // be vanishingly tiny (e.g. for `list<string>` we would have to
+ // generate a core Wasm module that correctly produces a pointer and
+ // length for a memory region that itself is a series of pointers
+ // and lengths of valid strings, as well as `canonical_abi_realloc`
+ // and `canonical_abi_free` functions that do the right thing).
+ //
+ // This is a pretty serious limitation of `wasm-smith`'s component
+ // types support, but it is one we are intentionally
+ // accepting. `wasm-smith` will focus on generating arbitrary
+ // component sections, structures, and import/export topologies; not
+ // component functions and core Wasm implementations of component
+ // functions. In the future, we intend to build a new, distinct test
+ // case generator specifically for exercising component functions
+ // and the canonical ABI. This new generator won't emit arbitrary
+ // component sections, structures, or import/export topologies, and
+ // will instead leave that to `wasm-smith`.
+
+ if !self.component().scalar_component_funcs.is_empty() {
+ choices.push(|u, c| {
+ let func_index = *u.choose(&c.component().scalar_component_funcs)?;
+ Ok(Some(Func::CanonLower {
+ // Scalar component functions don't use any canonical options.
+ options: vec![],
+ func_index,
+ }))
+ });
+ }
+
+ if !self.component().core_funcs.is_empty() {
+ choices.push(|u, c| {
+ let core_func_index = u.int_in_range(
+ 0..=u32::try_from(c.component().core_funcs.len() - 1).unwrap(),
+ )?;
+ let core_func_ty = c.core_function_type(core_func_index);
+ let comp_func_ty = inverse_scalar_canonical_abi_for(u, core_func_ty)?;
+
+ let func_ty = if let Some(indices) = c
+ .current_type_scope()
+ .func_type_to_indices
+ .get(&comp_func_ty)
+ {
+ // If we've already defined this component function type
+ // one or more times, then choose one of those
+ // definitions arbitrarily.
+ debug_assert!(!indices.is_empty());
+ *u.choose(indices)?
+ } else if c.current_type_scope().types.len() < c.config.max_types {
+ // If we haven't already defined this component function
+ // type, and we haven't defined the configured maximum
+ // amount of types yet, then just define this type.
+ let ty = Rc::new(Type::Func(Rc::new(comp_func_ty)));
+ c.push_type(ty)
+ } else {
+ // Otherwise, give up on lifting this function.
+ return Ok(None);
+ };
+
+ Ok(Some(Func::CanonLift {
+ func_ty,
+ // Scalar functions don't use any canonical options.
+ options: vec![],
+ core_func_index,
+ }))
+ });
+ }
+
+ if choices.is_empty() {
+ return Ok(false);
+ }
+
+ let f = u.choose(&choices)?;
+ if let Some(func) = f(u, self)? {
+ self.push_func(func);
+ }
+
+ Ok(true)
+ })?;
+
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_core_module_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ let module = crate::core::Module::new_internal(
+ self.config.clone(),
+ u,
+ crate::core::DuplicateImportsBehavior::Disallowed,
+ )?;
+ self.push_section(Section::CoreModule(module));
+ self.total_modules += 1;
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_component_section(&mut self, u: &mut Unstructured) -> Result<Step> {
+ self.types.push(TypesScope::default());
+ self.components.push(ComponentContext::empty());
+ self.total_components += 1;
+ Ok(Step::StillBuilding)
+ }
+
+ fn arbitrary_instance_section(&mut self, u: &mut Unstructured) -> Result<()> {
+ todo!()
+ }
+
+ fn arbitrary_export_section(&mut self, u: &mut Unstructured) -> Result<()> {
+ todo!()
+ }
+
+ fn arbitrary_start_section(&mut self, u: &mut Unstructured) -> Result<()> {
+ todo!()
+ }
+
+ fn arbitrary_alias_section(&mut self, u: &mut Unstructured) -> Result<()> {
+ todo!()
+ }
+}
+
+fn canonical_abi_for(func_ty: &FuncType) -> Rc<crate::core::FuncType> {
+ let to_core_ty = |ty| match ty {
+ ComponentValType::Primitive(prim_ty) => match prim_ty {
+ PrimitiveValType::Char
+ | PrimitiveValType::Bool
+ | PrimitiveValType::S8
+ | PrimitiveValType::U8
+ | PrimitiveValType::S16
+ | PrimitiveValType::U16
+ | PrimitiveValType::S32
+ | PrimitiveValType::U32 => ValType::I32,
+ PrimitiveValType::S64 | PrimitiveValType::U64 => ValType::I64,
+ PrimitiveValType::Float32 => ValType::F32,
+ PrimitiveValType::Float64 => ValType::F64,
+ PrimitiveValType::String => {
+ unimplemented!("non-scalar types are not supported yet")
+ }
+ },
+ ComponentValType::Type(_) => unimplemented!("non-scalar types are not supported yet"),
+ };
+
+ Rc::new(crate::core::FuncType {
+ params: func_ty
+ .params
+ .iter()
+ .map(|(_, ty)| to_core_ty(*ty))
+ .collect(),
+ results: func_ty
+ .results
+ .iter()
+ .map(|(_, ty)| to_core_ty(*ty))
+ .collect(),
+ })
+}
+
+fn inverse_scalar_canonical_abi_for(
+ u: &mut Unstructured,
+ core_func_ty: &crate::core::FuncType,
+) -> Result<FuncType> {
+ let from_core_ty = |u: &mut Unstructured, core_ty| match core_ty {
+ ValType::I32 => u
+ .choose(&[
+ ComponentValType::Primitive(PrimitiveValType::Char),
+ ComponentValType::Primitive(PrimitiveValType::Bool),
+ ComponentValType::Primitive(PrimitiveValType::S8),
+ ComponentValType::Primitive(PrimitiveValType::U8),
+ ComponentValType::Primitive(PrimitiveValType::S16),
+ ComponentValType::Primitive(PrimitiveValType::U16),
+ ComponentValType::Primitive(PrimitiveValType::S32),
+ ComponentValType::Primitive(PrimitiveValType::U32),
+ ])
+ .cloned(),
+ ValType::I64 => u
+ .choose(&[
+ ComponentValType::Primitive(PrimitiveValType::S64),
+ ComponentValType::Primitive(PrimitiveValType::U64),
+ ])
+ .cloned(),
+ ValType::F32 => Ok(ComponentValType::Primitive(PrimitiveValType::Float32)),
+ ValType::F64 => Ok(ComponentValType::Primitive(PrimitiveValType::Float64)),
+ ValType::V128 | ValType::Ref(_) => {
+ unreachable!("not used in canonical ABI")
+ }
+ };
+
+ let mut names = HashSet::default();
+ let mut params = vec![];
+
+ for core_ty in &core_func_ty.params {
+ params.push((
+ crate::unique_kebab_string(100, &mut names, u)?,
+ from_core_ty(u, *core_ty)?,
+ ));
+ }
+
+ names.clear();
+
+ let results = match core_func_ty.results.len() {
+ 0 => Vec::new(),
+ 1 => vec![(
+ if u.arbitrary()? {
+ Some(crate::unique_kebab_string(100, &mut names, u)?)
+ } else {
+ None
+ },
+ from_core_ty(u, core_func_ty.results[0])?,
+ )],
+ _ => unimplemented!("non-scalar types are not supported yet"),
+ };
+
+ Ok(FuncType { params, results })
+}
+
+#[derive(Debug)]
+enum Section {
+ Custom(CustomSection),
+ CoreModule(crate::Module),
+ CoreInstance(CoreInstanceSection),
+ CoreType(CoreTypeSection),
+ Component(Component),
+ Instance(InstanceSection),
+ Alias(AliasSection),
+ Type(TypeSection),
+ Canonical(CanonicalSection),
+ Start(StartSection),
+ Import(ImportSection),
+ Export(ExportSection),
+}
+
+#[derive(Debug)]
+struct CustomSection {
+ name: String,
+ data: Vec<u8>,
+}
+
+impl<'a> Arbitrary<'a> for CustomSection {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ let name = crate::limited_string(1_000, u)?;
+ let data = u.arbitrary()?;
+ Ok(CustomSection { name, data })
+ }
+}
+
+#[derive(Debug)]
+struct TypeSection {
+ types: Vec<Rc<Type>>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum CoreType {
+ Func(Rc<crate::core::FuncType>),
+ Module(Rc<ModuleType>),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Default)]
+struct ModuleType {
+ defs: Vec<ModuleTypeDef>,
+ has_memory: bool,
+ has_canonical_abi_realloc: bool,
+ has_canonical_abi_free: bool,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum ModuleTypeDef {
+ TypeDef(crate::core::CompositeType),
+ Import(crate::core::Import),
+ OuterAlias {
+ count: u32,
+ i: u32,
+ kind: CoreOuterAliasKind,
+ },
+ Export(String, crate::core::EntityType),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum Type {
+ Defined(DefinedType),
+ Func(Rc<FuncType>),
+ Component(Rc<ComponentType>),
+ Instance(Rc<InstanceType>),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum CoreInstanceExportAliasKind {
+ Func,
+ Table,
+ Memory,
+ Global,
+ Tag,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum CoreOuterAliasKind {
+ Type(Rc<crate::core::FuncType>),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum Alias {
+ InstanceExport {
+ instance: u32,
+ name: String,
+ kind: InstanceExportAliasKind,
+ },
+ CoreInstanceExport {
+ instance: u32,
+ name: String,
+ kind: CoreInstanceExportAliasKind,
+ },
+ Outer {
+ count: u32,
+ i: u32,
+ kind: OuterAliasKind,
+ },
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum InstanceExportAliasKind {
+ Module,
+ Component,
+ Instance,
+ Func,
+ Value,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum OuterAliasKind {
+ Module,
+ Component,
+ CoreType(Rc<CoreType>),
+ Type(Rc<Type>),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct ComponentType {
+ defs: Vec<ComponentTypeDef>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum ComponentTypeDef {
+ CoreType(Rc<CoreType>),
+ Type(Rc<Type>),
+ Alias(Alias),
+ Import(Import),
+ Export {
+ name: String,
+ url: Option<String>,
+ ty: ComponentTypeRef,
+ },
+}
+
+impl From<InstanceTypeDecl> for ComponentTypeDef {
+ fn from(def: InstanceTypeDecl) -> Self {
+ match def {
+ InstanceTypeDecl::CoreType(t) => Self::CoreType(t),
+ InstanceTypeDecl::Type(t) => Self::Type(t),
+ InstanceTypeDecl::Export { name, url, ty } => Self::Export { name, url, ty },
+ InstanceTypeDecl::Alias(a) => Self::Alias(a),
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct InstanceType {
+ defs: Vec<InstanceTypeDecl>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum InstanceTypeDecl {
+ CoreType(Rc<CoreType>),
+ Type(Rc<Type>),
+ Alias(Alias),
+ Export {
+ name: String,
+ url: Option<String>,
+ ty: ComponentTypeRef,
+ },
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct FuncType {
+ params: Vec<(String, ComponentValType)>,
+ results: Vec<(Option<String>, ComponentValType)>,
+}
+
+impl FuncType {
+ fn unnamed_result_ty(&self) -> Option<ComponentValType> {
+ if self.results.len() == 1 {
+ let (name, ty) = &self.results[0];
+ if name.is_none() {
+ return Some(*ty);
+ }
+ }
+ None
+ }
+
+ fn is_scalar(&self) -> bool {
+ self.params.iter().all(|(_, ty)| is_scalar(ty))
+ && self.results.len() == 1
+ && is_scalar(&self.results[0].1)
+ }
+}
+
+fn is_scalar(ty: &ComponentValType) -> bool {
+ match ty {
+ ComponentValType::Primitive(prim) => match prim {
+ PrimitiveValType::Bool
+ | PrimitiveValType::S8
+ | PrimitiveValType::U8
+ | PrimitiveValType::S16
+ | PrimitiveValType::U16
+ | PrimitiveValType::S32
+ | PrimitiveValType::U32
+ | PrimitiveValType::S64
+ | PrimitiveValType::U64
+ | PrimitiveValType::Float32
+ | PrimitiveValType::Float64
+ | PrimitiveValType::Char => true,
+ PrimitiveValType::String => false,
+ },
+ ComponentValType::Type(_) => false,
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+enum DefinedType {
+ Primitive(PrimitiveValType),
+ Record(RecordType),
+ Variant(VariantType),
+ List(ListType),
+ Tuple(TupleType),
+ Flags(FlagsType),
+ Enum(EnumType),
+ Option(OptionType),
+ Result(ResultType),
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct RecordType {
+ fields: Vec<(String, ComponentValType)>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct VariantType {
+ cases: Vec<(String, Option<ComponentValType>, Option<u32>)>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct ListType {
+ elem_ty: ComponentValType,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct TupleType {
+ fields: Vec<ComponentValType>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct FlagsType {
+ fields: Vec<String>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct EnumType {
+ variants: Vec<String>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct OptionType {
+ inner_ty: ComponentValType,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct ResultType {
+ ok_ty: Option<ComponentValType>,
+ err_ty: Option<ComponentValType>,
+}
+
+#[derive(Debug)]
+struct ImportSection {
+ imports: Vec<Import>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+struct Import {
+ name: String,
+ url: Option<String>,
+ ty: ComponentTypeRef,
+}
+
+#[derive(Debug)]
+struct CanonicalSection {
+ funcs: Vec<Func>,
+}
+
+#[derive(Debug)]
+enum Func {
+ CanonLift {
+ func_ty: u32,
+ options: Vec<CanonOpt>,
+ core_func_index: u32,
+ },
+ CanonLower {
+ options: Vec<CanonOpt>,
+ func_index: u32,
+ },
+}
+
+#[derive(Debug)]
+enum CanonOpt {
+ StringUtf8,
+ StringUtf16,
+ StringLatin1Utf16,
+ Memory(u32),
+ Realloc(u32),
+ PostReturn(u32),
+}
+
+#[derive(Debug)]
+struct InstanceSection {}
+
+#[derive(Debug)]
+struct ExportSection {}
+
+#[derive(Debug)]
+struct StartSection {}
+
+#[derive(Debug)]
+struct AliasSection {}
+
+#[derive(Debug)]
+struct CoreInstanceSection {}
+
+#[derive(Debug)]
+struct CoreTypeSection {
+ types: Vec<Rc<CoreType>>,
+}
diff --git a/third_party/rust/wasm-smith/src/component/encode.rs b/third_party/rust/wasm-smith/src/component/encode.rs
new file mode 100644
index 0000000000..7963ecfae5
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/component/encode.rs
@@ -0,0 +1,321 @@
+use std::borrow::Cow;
+
+use super::*;
+use wasm_encoder::{ComponentExportKind, ComponentOuterAliasKind, ExportKind};
+
+impl Component {
+ /// Encode this Wasm component into bytes.
+ pub fn to_bytes(&self) -> Vec<u8> {
+ self.encoded().finish()
+ }
+
+ fn encoded(&self) -> wasm_encoder::Component {
+ let mut component = wasm_encoder::Component::new();
+ for section in &self.sections {
+ section.encode(&mut component);
+ }
+ component
+ }
+}
+
+impl Section {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ match self {
+ Self::Custom(sec) => sec.encode(component),
+ Self::CoreModule(module) => {
+ let bytes = module.to_bytes();
+ component.section(&wasm_encoder::RawSection {
+ id: wasm_encoder::ComponentSectionId::CoreModule as u8,
+ data: &bytes,
+ });
+ }
+ Self::CoreInstance(_) => todo!(),
+ Self::CoreType(sec) => sec.encode(component),
+ Self::Component(comp) => {
+ let bytes = comp.to_bytes();
+ component.section(&wasm_encoder::RawSection {
+ id: wasm_encoder::ComponentSectionId::Component as u8,
+ data: &bytes,
+ });
+ }
+ Self::Instance(_) => todo!(),
+ Self::Alias(_) => todo!(),
+ Self::Type(sec) => sec.encode(component),
+ Self::Canonical(sec) => sec.encode(component),
+ Self::Start(_) => todo!(),
+ Self::Import(sec) => sec.encode(component),
+ Self::Export(_) => todo!(),
+ }
+ }
+}
+
+impl CustomSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ component.section(&wasm_encoder::CustomSection {
+ name: (&self.name).into(),
+ data: Cow::Borrowed(&self.data),
+ });
+ }
+}
+
+impl TypeSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ let mut sec = wasm_encoder::ComponentTypeSection::new();
+ for ty in &self.types {
+ ty.encode(sec.ty());
+ }
+ component.section(&sec);
+ }
+}
+
+impl ImportSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ let mut sec = wasm_encoder::ComponentImportSection::new();
+ for imp in &self.imports {
+ sec.import(&imp.name, imp.ty);
+ }
+ component.section(&sec);
+ }
+}
+
+impl CanonicalSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ let mut sec = wasm_encoder::CanonicalFunctionSection::new();
+ for func in &self.funcs {
+ match func {
+ Func::CanonLift {
+ func_ty,
+ options,
+ core_func_index,
+ } => {
+ let options = translate_canon_opt(options);
+ sec.lift(*core_func_index, *func_ty, options);
+ }
+ Func::CanonLower {
+ options,
+ func_index,
+ } => {
+ let options = translate_canon_opt(options);
+ sec.lower(*func_index, options);
+ }
+ }
+ }
+ component.section(&sec);
+ }
+}
+
+impl CoreTypeSection {
+ fn encode(&self, component: &mut wasm_encoder::Component) {
+ let mut sec = wasm_encoder::CoreTypeSection::new();
+ for ty in &self.types {
+ ty.encode(sec.ty());
+ }
+ component.section(&sec);
+ }
+}
+
+impl CoreType {
+ fn encode(&self, enc: wasm_encoder::CoreTypeEncoder<'_>) {
+ match self {
+ Self::Func(ty) => {
+ enc.function(ty.params.iter().copied(), ty.results.iter().copied());
+ }
+ Self::Module(mod_ty) => {
+ let mut enc_mod_ty = wasm_encoder::ModuleType::new();
+ for def in &mod_ty.defs {
+ match def {
+ ModuleTypeDef::TypeDef(crate::core::CompositeType::Func(func_ty)) => {
+ enc_mod_ty.ty().function(
+ func_ty.params.iter().copied(),
+ func_ty.results.iter().copied(),
+ );
+ }
+ ModuleTypeDef::TypeDef(_) => {
+ unimplemented!("non-func types in a component's module type")
+ }
+ ModuleTypeDef::OuterAlias { count, i, kind } => match kind {
+ CoreOuterAliasKind::Type(_) => {
+ enc_mod_ty.alias_outer_core_type(*count, *i);
+ }
+ },
+ ModuleTypeDef::Import(imp) => {
+ enc_mod_ty.import(
+ &imp.module,
+ &imp.field,
+ crate::core::encode::translate_entity_type(&imp.entity_type),
+ );
+ }
+ ModuleTypeDef::Export(name, ty) => {
+ enc_mod_ty.export(name, crate::core::encode::translate_entity_type(ty));
+ }
+ }
+ }
+ enc.module(&enc_mod_ty);
+ }
+ }
+ }
+}
+
+impl Type {
+ fn encode(&self, enc: wasm_encoder::ComponentTypeEncoder<'_>) {
+ match self {
+ Self::Defined(ty) => {
+ ty.encode(enc.defined_type());
+ }
+ Self::Func(func_ty) => {
+ let mut f = enc.function();
+
+ f.params(func_ty.params.iter().map(|(name, ty)| (name.as_str(), *ty)));
+
+ if let Some(ty) = func_ty.unnamed_result_ty() {
+ f.result(ty);
+ } else {
+ f.results(
+ func_ty
+ .results
+ .iter()
+ .map(|(name, ty)| (name.as_deref().unwrap(), *ty)),
+ );
+ }
+ }
+ Self::Component(comp_ty) => {
+ let mut enc_comp_ty = wasm_encoder::ComponentType::new();
+ for def in &comp_ty.defs {
+ match def {
+ ComponentTypeDef::Import(imp) => {
+ enc_comp_ty.import(&imp.name, imp.ty);
+ }
+ ComponentTypeDef::CoreType(ty) => {
+ ty.encode(enc_comp_ty.core_type());
+ }
+ ComponentTypeDef::Type(ty) => {
+ ty.encode(enc_comp_ty.ty());
+ }
+ ComponentTypeDef::Export { name, url: _, ty } => {
+ enc_comp_ty.export(name, *ty);
+ }
+ ComponentTypeDef::Alias(a) => {
+ enc_comp_ty.alias(translate_alias(a));
+ }
+ }
+ }
+ enc.component(&enc_comp_ty);
+ }
+ Self::Instance(inst_ty) => {
+ let mut enc_inst_ty = wasm_encoder::InstanceType::new();
+ for def in &inst_ty.defs {
+ match def {
+ InstanceTypeDecl::CoreType(ty) => {
+ ty.encode(enc_inst_ty.core_type());
+ }
+ InstanceTypeDecl::Type(ty) => {
+ ty.encode(enc_inst_ty.ty());
+ }
+ InstanceTypeDecl::Export { name, url: _, ty } => {
+ enc_inst_ty.export(name, *ty);
+ }
+ InstanceTypeDecl::Alias(a) => {
+ enc_inst_ty.alias(translate_alias(a));
+ }
+ }
+ }
+ enc.instance(&enc_inst_ty);
+ }
+ }
+ }
+}
+
+impl DefinedType {
+ fn encode(&self, enc: wasm_encoder::ComponentDefinedTypeEncoder<'_>) {
+ match self {
+ Self::Primitive(ty) => enc.primitive(*ty),
+ Self::Record(ty) => {
+ enc.record(ty.fields.iter().map(|(name, ty)| (name.as_str(), *ty)));
+ }
+ Self::Variant(ty) => {
+ enc.variant(
+ ty.cases
+ .iter()
+ .map(|(name, ty, refines)| (name.as_str(), *ty, *refines)),
+ );
+ }
+ Self::List(ty) => {
+ enc.list(ty.elem_ty);
+ }
+ Self::Tuple(ty) => {
+ enc.tuple(ty.fields.iter().copied());
+ }
+ Self::Flags(ty) => {
+ enc.flags(ty.fields.iter().map(|f| f.as_str()));
+ }
+ Self::Enum(ty) => {
+ enc.enum_type(ty.variants.iter().map(|v| v.as_str()));
+ }
+ Self::Option(ty) => {
+ enc.option(ty.inner_ty);
+ }
+ Self::Result(ty) => {
+ enc.result(ty.ok_ty, ty.err_ty);
+ }
+ }
+ }
+}
+
+fn translate_canon_opt(options: &[CanonOpt]) -> Vec<wasm_encoder::CanonicalOption> {
+ options
+ .iter()
+ .map(|o| match o {
+ CanonOpt::StringUtf8 => wasm_encoder::CanonicalOption::UTF8,
+ CanonOpt::StringUtf16 => wasm_encoder::CanonicalOption::UTF16,
+ CanonOpt::StringLatin1Utf16 => wasm_encoder::CanonicalOption::CompactUTF16,
+ CanonOpt::Memory(idx) => wasm_encoder::CanonicalOption::Memory(*idx),
+ CanonOpt::Realloc(idx) => wasm_encoder::CanonicalOption::Realloc(*idx),
+ CanonOpt::PostReturn(idx) => wasm_encoder::CanonicalOption::PostReturn(*idx),
+ })
+ .collect()
+}
+
+fn translate_alias(alias: &Alias) -> wasm_encoder::Alias<'_> {
+ match alias {
+ Alias::InstanceExport {
+ instance,
+ name,
+ kind,
+ } => wasm_encoder::Alias::InstanceExport {
+ instance: *instance,
+ name,
+ kind: match kind {
+ InstanceExportAliasKind::Module => ComponentExportKind::Module,
+ InstanceExportAliasKind::Component => ComponentExportKind::Component,
+ InstanceExportAliasKind::Instance => ComponentExportKind::Instance,
+ InstanceExportAliasKind::Func => ComponentExportKind::Func,
+ InstanceExportAliasKind::Value => ComponentExportKind::Value,
+ },
+ },
+ Alias::CoreInstanceExport {
+ instance,
+ name,
+ kind,
+ } => wasm_encoder::Alias::CoreInstanceExport {
+ instance: *instance,
+ name,
+ kind: match kind {
+ CoreInstanceExportAliasKind::Func => ExportKind::Func,
+ CoreInstanceExportAliasKind::Table => ExportKind::Table,
+ CoreInstanceExportAliasKind::Global => ExportKind::Global,
+ CoreInstanceExportAliasKind::Memory => ExportKind::Memory,
+ CoreInstanceExportAliasKind::Tag => ExportKind::Tag,
+ },
+ },
+ Alias::Outer { count, i, kind } => wasm_encoder::Alias::Outer {
+ count: *count,
+ index: *i,
+ kind: match kind {
+ OuterAliasKind::Module => ComponentOuterAliasKind::CoreModule,
+ OuterAliasKind::Component => ComponentOuterAliasKind::Component,
+ OuterAliasKind::Type(_) => ComponentOuterAliasKind::Type,
+ OuterAliasKind::CoreType(_) => ComponentOuterAliasKind::CoreType,
+ },
+ },
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/config.rs b/third_party/rust/wasm-smith/src/config.rs
new file mode 100644
index 0000000000..b183530e4c
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/config.rs
@@ -0,0 +1,622 @@
+//! Configuring the shape of generated Wasm modules.
+
+use crate::InstructionKinds;
+use arbitrary::{Arbitrary, Result, Unstructured};
+
+macro_rules! define_config {
+ (
+ $(#[$attr:meta])*
+ pub struct Config {
+ $(
+ $(#[$field_attr:meta])*
+ pub $field:ident : $field_ty:ty = $default:expr,
+ )*
+ }
+ ) => {
+ $(#[$attr])*
+ pub struct Config {
+ /// The imports that may be used when generating the module.
+ ///
+ /// Defaults to `None` which means that any arbitrary import can be
+ /// generated.
+ ///
+ /// To only allow specific imports, override this method to return a
+ /// WebAssembly module which describes the imports allowed.
+ ///
+ /// Note that [`Self::min_imports`] is ignored when
+ /// `available_imports` are enabled.
+ ///
+ /// The returned value must be a valid binary encoding of a
+ /// WebAssembly module. `wasm-smith` will panic if the module cannot
+ /// be parsed.
+ ///
+ /// # Example
+ ///
+ /// An implementation of this method could use the `wat` crate to
+ /// provide a human-readable and maintainable description:
+ ///
+ /// ```rust
+ /// Some(wat::parse_str(r#"
+ /// (module
+ /// (import "env" "ping" (func (param i32)))
+ /// (import "env" "pong" (func (result i32)))
+ /// (import "env" "memory" (memory 1))
+ /// (import "env" "table" (table 1))
+ /// (import "env" "tag" (tag (param i32)))
+ /// )
+ /// "#))
+ /// # ;
+ /// ```
+ pub available_imports: Option<Vec<u8>>,
+
+ $(
+ $(#[$field_attr])*
+ pub $field: $field_ty,
+ )*
+ }
+
+ impl Default for Config {
+ fn default() -> Config {
+ Config {
+ available_imports: None,
+
+ $(
+ $field: $default,
+ )*
+ }
+ }
+ }
+
+ #[cfg(feature = "_internal_cli")]
+ #[doc(hidden)]
+ #[derive(Clone, Debug, Default, clap::Parser, serde_derive::Deserialize)]
+ #[serde(rename_all = "kebab-case")]
+ pub struct InternalOptionalConfig {
+ /// The imports that may be used when generating the module.
+ ///
+ /// When unspecified, any arbitrary import can be generated.
+ ///
+ /// To only allow specific imports, provide a file path of a
+ /// WebAssembly module which describes the imports allowed.
+ ///
+ /// Note that [`Self::min_imports`] is ignored when
+ /// `available_imports` are enabled.
+ ///
+ /// The returned value must be a valid binary encoding of a
+ /// WebAssembly module. `wasm-smith` will panic if the module cannot
+ /// be parsed.
+ #[cfg_attr(feature = "clap", clap(long))]
+ available_imports: Option<std::path::PathBuf>,
+
+ $(
+ $(#[$field_attr])*
+ #[cfg_attr(feature = "clap", clap(long))]
+ pub $field: Option<$field_ty>,
+ )*
+ }
+
+ #[cfg(feature = "_internal_cli")]
+ impl InternalOptionalConfig {
+ pub fn or(self, other: Self) -> Self {
+ Self {
+ available_imports: self.available_imports.or(other.available_imports),
+
+ $(
+ $field: self.$field.or(other.$field),
+ )*
+ }
+ }
+ }
+
+ #[cfg(feature = "_internal_cli")]
+ impl TryFrom<InternalOptionalConfig> for Config {
+ type Error = anyhow::Error;
+ fn try_from(config: InternalOptionalConfig) -> anyhow::Result<Config> {
+ let default = Config::default();
+ Ok(Config {
+ available_imports: if let Some(file) = config
+ .available_imports
+ .as_ref() {
+ Some(wat::parse_file(file)?)
+ } else {
+ None
+ },
+
+ $(
+ $field: config.$field.unwrap_or(default.$field),
+ )*
+ })
+ }
+ }
+ }
+}
+
+define_config! {
+ /// Configuration for a generated module.
+ ///
+ /// Don't care to configure your generated modules? Just use
+ /// [`Module::arbitrary`][crate::Module], which internally uses the default
+ /// configuration.
+ ///
+ /// Want control over the shape of the module that gets generated? Create a
+ /// `Config` and then pass it to [`Module::new`][crate::Module::new].
+ ///
+ /// # Swarm Testing
+ ///
+ /// You can use the `Arbitrary for Config` implementation for [swarm
+ /// testing]. This will dynamically -- but still deterministically -- choose
+ /// configuration options for you.
+ ///
+ /// [swarm testing]: https://www.cs.utah.edu/~regehr/papers/swarm12.pdf
+ ///
+ /// Note that we pick only *maximums*, not minimums, here because it is more
+ /// complex to describe the domain of valid configs when minima are involved
+ /// (`min <= max` for each variable) and minima are mostly used to ensure
+ /// certain elements are present, but do not widen the range of generated
+ /// Wasm modules.
+ #[derive(Clone, Debug)]
+ pub struct Config {
+ /// Determines whether a `start` export may be included. Defaults to `true`.
+ pub allow_start_export: bool = true,
+
+ /// The kinds of instructions allowed in the generated wasm
+ /// programs. Defaults to all.
+ ///
+ /// The categories of instructions match the categories used by the
+ /// [WebAssembly
+ /// specification](https://webassembly.github.io/spec/core/syntax/instructions.html);
+ /// e.g., numeric, vector, control, memory, etc.
+ ///
+ /// Note that modifying this setting is separate from the proposal
+ /// flags; that is, if `simd_enabled() == true` but
+ /// `allowed_instruction()` does not include vector instructions, the
+ /// generated programs will not include these instructions but could
+ /// contain vector types.
+ pub allowed_instructions: InstructionKinds = InstructionKinds::all(),
+
+ /// Determines whether the bulk memory proposal is enabled for
+ /// generating instructions.
+ ///
+ /// Defaults to `false`.
+ pub bulk_memory_enabled: bool = false,
+
+ /// Returns whether NaN values are canonicalized after all f32/f64
+ /// operation. Defaults to false.
+ ///
+ /// This can be useful when a generated wasm module is executed in
+ /// multiple runtimes which may produce different NaN values. This
+ /// ensures that the generated module will always use the same NaN
+ /// representation for all instructions which have visible side effects,
+ /// for example writing floats to memory or float-to-int bitcast
+ /// instructions.
+ pub canonicalize_nans: bool = false,
+
+ /// Returns whether we should avoid generating code that will possibly
+ /// trap.
+ ///
+ /// For some trapping instructions, this will emit extra instructions to
+ /// ensure they don't trap, while some instructions will simply be
+ /// excluded. In cases where we would run into a trap, we instead
+ /// choose some arbitrary non-trapping behavior. For example, if we
+ /// detect that a Load instruction would attempt to access out-of-bounds
+ /// memory, we instead pretend the load succeeded and push 0 onto the
+ /// stack.
+ ///
+ /// One type of trap that we can't currently avoid is
+ /// StackOverflow. Even when `disallow_traps` is set to true, wasm-smith
+ /// will eventually generate a program that infinitely recurses, causing
+ /// the call stack to be exhausted.
+ ///
+ /// Defaults to `false`.
+ pub disallow_traps: bool = false,
+
+ /// Determines whether the exception-handling proposal is enabled for
+ /// generating instructions.
+ ///
+ /// Defaults to `false`.
+ pub exceptions_enabled: bool = false,
+
+ /// Export all WebAssembly objects in the module. Defaults to false.
+ ///
+ /// This overrides [`Config::min_exports`] and [`Config::max_exports`].
+ pub export_everything: bool = false,
+
+ /// Determines whether the GC proposal is enabled when generating a Wasm
+ /// module.
+ ///
+ /// Defaults to `false`.
+ pub gc_enabled: bool = false,
+
+ /// Returns whether we should generate custom sections or not. Defaults
+ /// to false.
+ pub generate_custom_sections: bool = false,
+
+ /// Returns the maximal size of the `alias` section. Defaults to 1000.
+ pub max_aliases: usize = 1000,
+
+ /// The maximum number of components to use. Defaults to 10.
+ ///
+ /// This includes imported components.
+ ///
+ /// Note that this is only relevant for components.
+ pub max_components: usize = 10,
+
+ /// The maximum number of data segments to generate. Defaults to 100.
+ pub max_data_segments: usize = 100,
+
+ /// The maximum number of element segments to generate. Defaults to 100.
+ pub max_element_segments: usize = 100,
+
+ /// The maximum number of elements within a segment to
+ /// generate. Defaults to 100.
+ pub max_elements: usize = 100,
+
+ /// The maximum number of exports to generate. Defaults to 100.
+ pub max_exports: usize = 100,
+
+ /// The maximum number of functions to generate. Defaults to 100. This
+ /// includes imported functions.
+ pub max_funcs: usize = 100,
+
+ /// The maximum number of globals to generate. Defaults to 100. This
+ /// includes imported globals.
+ pub max_globals: usize = 100,
+
+ /// The maximum number of imports to generate. Defaults to 100.
+ pub max_imports: usize = 100,
+
+ /// The maximum number of instances to use. Defaults to 10.
+ ///
+ /// This includes imported instances.
+ ///
+ /// Note that this is only relevant for components.
+ pub max_instances: usize = 10,
+
+ /// The maximum number of instructions to generate in a function
+ /// body. Defaults to 100.
+ ///
+ /// Note that some additional `end`s, `else`s, and `unreachable`s may be
+ /// appended to the function body to finish block scopes.
+ pub max_instructions: usize = 100,
+
+ /// The maximum number of memories to use. Defaults to 1.
+ ///
+ /// This includes imported memories.
+ ///
+ /// Note that more than one memory is in the realm of the multi-memory
+ /// wasm proposal.
+ pub max_memories: usize = 1,
+
+ /// The maximum, in 64k Wasm pages, of any 32-bit memory's initial or
+ /// maximum size.
+ ///
+ /// Defaults to 2^16.
+ pub max_memory32_pages: u64 = 1 << 16,
+
+ /// The maximum, in 64k Wasm pages, of any 64-bit memory's initial or
+ /// maximum size.
+ ///
+ /// Defaults to 2^48.
+ pub max_memory64_pages: u64 = 1 << 48,
+
+ /// The maximum number of modules to use. Defaults to 10.
+ ///
+ /// This includes imported modules.
+ ///
+ /// Note that this is only relevant for components.
+ pub max_modules: usize = 10,
+
+ /// Returns the maximal nesting depth of modules with the component
+ /// model proposal. Defaults to 10.
+ pub max_nesting_depth: usize = 10,
+
+ /// The maximum, elements, of any table's initial or maximum
+ /// size. Defaults to 1 million.
+ pub max_table_elements: u32 = 1_000_000,
+
+ /// The maximum number of tables to use. Defaults to 1.
+ ///
+ /// This includes imported tables.
+ ///
+ /// Note that more than one table is in the realm of the reference types
+ /// proposal.
+ pub max_tables: usize = 1,
+
+ /// The maximum number of tags to generate. Defaults to 100.
+ pub max_tags: usize = 100,
+
+ /// Returns the maximal effective size of any type generated by
+ /// wasm-smith.
+ ///
+ /// Note that this number is roughly in units of "how many types would
+ /// be needed to represent the recursive type". A function with 8
+ /// parameters and 2 results would take 11 types (one for the type, 10
+ /// for params/results). A module type with 2 imports and 3 exports
+ /// would take 6 (module + imports + exports) plus the size of each
+ /// import/export type. This is a somewhat rough measurement that is not
+ /// intended to be very precise.
+ ///
+ /// Defaults to 1000.
+ pub max_type_size: u32 = 1000,
+
+ /// The maximum number of types to generate. Defaults to 100.
+ pub max_types: usize = 100,
+
+ /// The maximum number of values to use. Defaults to 10.
+ ///
+ /// This includes imported values.
+ ///
+ /// Note that this is irrelevant unless value model support is enabled.
+ pub max_values: usize = 10,
+
+ /// Returns whether 64-bit memories are allowed. Defaults to false.
+ ///
+ /// Note that this is the gate for the memory64 proposal to WebAssembly.
+ pub memory64_enabled: bool = false,
+
+ /// Whether every Wasm memory must have a maximum size
+ /// specified. Defaults to `false`.
+ pub memory_max_size_required: bool = false,
+
+ /// Control the probability of generating memory offsets that are in
+ /// bounds vs. potentially out of bounds.
+ ///
+ /// See the `MemoryOffsetChoices` struct for details.
+ pub memory_offset_choices: MemoryOffsetChoices = MemoryOffsetChoices::default(),
+
+ /// The minimum number of data segments to generate. Defaults to 0.
+ pub min_data_segments: usize = 0,
+
+ /// The minimum number of element segments to generate. Defaults to 0.
+ pub min_element_segments: usize = 0,
+
+ /// The minimum number of elements within a segment to
+ /// generate. Defaults to 0.
+ pub min_elements: usize = 0,
+
+ /// The minimum number of exports to generate. Defaults to 0.
+ pub min_exports: usize = 0,
+
+ /// The minimum number of functions to generate. Defaults to 0.
+ ///
+ /// This includes imported functions.
+ pub min_funcs: usize = 0,
+
+ /// The minimum number of globals to generate. Defaults to 0.
+ ///
+ /// This includes imported globals.
+ pub min_globals: usize = 0,
+
+ /// The minimum number of imports to generate. Defaults to 0.
+ ///
+ /// Note that if the sum of the maximum function[^1], table, global and
+ /// memory counts is less than the minimum number of imports, then it
+ /// will not be possible to satisfy all constraints (because imports
+ /// count against the limits for those element kinds). In that case, we
+ /// strictly follow the max-constraints, and can fail to satisfy this
+ /// minimum number.
+ ///
+ /// [^1]: the maximum number of functions is also limited by the number
+ /// of function types arbitrarily chosen; strictly speaking, then, the
+ /// maximum number of imports that can be created due to max-constraints
+ /// is `sum(min(num_func_types, max_funcs), max_tables, max_globals,
+ /// max_memories)`.
+ pub min_imports: usize = 0,
+
+ /// The minimum number of memories to use. Defaults to 0.
+ ///
+ /// This includes imported memories.
+ pub min_memories: u32 = 0,
+
+ /// The minimum number of tables to use. Defaults to 0.
+ ///
+ /// This includes imported tables.
+ pub min_tables: u32 = 0,
+
+ /// The minimum number of tags to generate. Defaults to 0.
+ pub min_tags: usize = 0,
+
+ /// The minimum number of types to generate. Defaults to 0.
+ pub min_types: usize = 0,
+
+ /// The minimum size, in bytes, of all leb-encoded integers. Defaults to
+ /// 1.
+ ///
+ /// This is useful for ensuring that all leb-encoded integers are
+ /// decoded as such rather than as simply one byte. This will forcibly
+ /// extend leb integers with an over-long encoding in some locations if
+ /// the size would otherwise be smaller than number returned here.
+ pub min_uleb_size: u8 = 1,
+
+ /// Determines whether the multi-value results are enabled.
+ ///
+ /// Defaults to `true`.
+ pub multi_value_enabled: bool = true,
+
+ /// Determines whether the reference types proposal is enabled for
+ /// generating instructions.
+ ///
+ /// Defaults to `false`.
+ pub reference_types_enabled: bool = false,
+
+ /// Determines whether the Relaxed SIMD proposal is enabled for
+ /// generating instructions.
+ ///
+ /// Defaults to `false`.
+ pub relaxed_simd_enabled: bool = false,
+
+ /// Determines whether the nontrapping-float-to-int-conversions propsal
+ /// is enabled.
+ ///
+ /// Defaults to `true`.
+ pub saturating_float_to_int_enabled: bool = true,
+
+ /// Determines whether the sign-extension-ops propsal is enabled.
+ ///
+ /// Defaults to `true`.
+ pub sign_extension_ops_enabled: bool = true,
+
+ /// Determines whether the SIMD proposal is enabled for generating
+ /// instructions.
+ ///
+ /// Defaults to `false`.
+ pub simd_enabled: bool = false,
+
+ /// Determines whether the tail calls proposal is enabled for generating
+ /// instructions.
+ ///
+ /// Defaults to `false`.
+ pub tail_call_enabled: bool = false,
+
+ /// Whether every Wasm table must have a maximum size
+ /// specified. Defaults to `false`.
+ pub table_max_size_required: bool = false,
+
+ /// Determines whether the threads proposal is enabled.
+ ///
+ /// The [threads proposal] involves shared linear memory, new atomic
+ /// instructions, and new `wait` and `notify` instructions.
+ ///
+ /// [threads proposal]: https://github.com/WebAssembly/threads/blob/master/proposals/threads/Overview.md
+ ///
+ /// Defaults to `false`.
+ pub threads_enabled: bool = false,
+ }
+}
+
+/// This is a tuple `(a, b, c)` where
+///
+/// * `a / (a+b+c)` is the probability of generating a memory offset within
+/// `0..memory.min_size`, i.e. an offset that is definitely in bounds of a
+/// non-empty memory. (Note that if a memory is zero-sized, however, no offset
+/// will ever be in bounds.)
+///
+/// * `b / (a+b+c)` is the probability of generating a memory offset within
+/// `memory.min_size..memory.max_size`, i.e. an offset that is possibly in
+/// bounds if the memory has been grown.
+///
+/// * `c / (a+b+c)` is the probability of generating a memory offset within the
+/// range `memory.max_size..`, i.e. an offset that is definitely out of
+/// bounds.
+///
+/// At least one of `a`, `b`, and `c` must be non-zero.
+///
+/// If you want to always generate memory offsets that are definitely in bounds
+/// of a non-zero-sized memory, for example, you could return `(1, 0, 0)`.
+///
+/// The default is `(90, 9, 1)`.
+#[derive(Clone, Debug)]
+#[cfg_attr(feature = "serde_derive", derive(serde_derive::Deserialize))]
+pub struct MemoryOffsetChoices(pub u32, pub u32, pub u32);
+
+impl Default for MemoryOffsetChoices {
+ fn default() -> Self {
+ MemoryOffsetChoices(90, 9, 1)
+ }
+}
+
+#[cfg(feature = "_internal_cli")]
+impl std::str::FromStr for MemoryOffsetChoices {
+ type Err = String;
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ use std::str::FromStr;
+ let mut parts = s.split(",");
+ let a = parts
+ .next()
+ .ok_or_else(|| "need 3 comma separated values".to_string())?;
+ let a = <u32 as FromStr>::from_str(a).map_err(|e| e.to_string())?;
+ let b = parts
+ .next()
+ .ok_or_else(|| "need 3 comma separated values".to_string())?;
+ let b = <u32 as FromStr>::from_str(b).map_err(|e| e.to_string())?;
+ let c = parts
+ .next()
+ .ok_or_else(|| "need 3 comma separated values".to_string())?;
+ let c = <u32 as FromStr>::from_str(c).map_err(|e| e.to_string())?;
+ if parts.next().is_some() {
+ return Err("found more than 3 comma separated values".to_string());
+ }
+ Ok(MemoryOffsetChoices(a, b, c))
+ }
+}
+
+impl<'a> Arbitrary<'a> for Config {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ const MAX_MAXIMUM: usize = 1000;
+
+ let reference_types_enabled: bool = u.arbitrary()?;
+ let max_tables = if reference_types_enabled { 100 } else { 1 };
+
+ Ok(Config {
+ max_types: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_imports: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_tags: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_funcs: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_globals: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_exports: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_element_segments: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_elements: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_data_segments: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_instructions: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_memories: u.int_in_range(0..=100)?,
+ max_tables,
+ max_memory32_pages: u.int_in_range(0..=1 << 16)?,
+ max_memory64_pages: u.int_in_range(0..=1 << 48)?,
+ min_uleb_size: u.int_in_range(0..=5)?,
+ bulk_memory_enabled: u.arbitrary()?,
+ reference_types_enabled,
+ simd_enabled: u.arbitrary()?,
+ multi_value_enabled: u.arbitrary()?,
+ max_aliases: u.int_in_range(0..=MAX_MAXIMUM)?,
+ max_nesting_depth: u.int_in_range(0..=10)?,
+ saturating_float_to_int_enabled: u.arbitrary()?,
+ sign_extension_ops_enabled: u.arbitrary()?,
+ allowed_instructions: {
+ use flagset::Flags;
+ let mut allowed = Vec::new();
+ for kind in crate::core::InstructionKind::LIST {
+ if u.arbitrary()? {
+ allowed.push(*kind);
+ }
+ }
+ InstructionKinds::new(&allowed)
+ },
+ table_max_size_required: u.arbitrary()?,
+ max_table_elements: u.int_in_range(0..=1_000_000)?,
+
+ // These fields, unlike the ones above, are less useful to set.
+ // They either make weird inputs or are for features not widely
+ // implemented yet so they're turned off by default.
+ min_types: 0,
+ min_imports: 0,
+ min_tags: 0,
+ min_funcs: 0,
+ min_globals: 0,
+ min_exports: 0,
+ min_element_segments: 0,
+ min_elements: 0,
+ min_data_segments: 0,
+ min_memories: 0,
+ min_tables: 0,
+ memory_max_size_required: false,
+ max_instances: 0,
+ max_modules: 0,
+ max_components: 0,
+ max_values: 0,
+ memory_offset_choices: MemoryOffsetChoices::default(),
+ allow_start_export: true,
+ relaxed_simd_enabled: false,
+ exceptions_enabled: false,
+ memory64_enabled: false,
+ max_type_size: 1000,
+ canonicalize_nans: false,
+ available_imports: None,
+ threads_enabled: false,
+ export_everything: false,
+ disallow_traps: false,
+ tail_call_enabled: false,
+ gc_enabled: false,
+ generate_custom_sections: false,
+ })
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/core.rs b/third_party/rust/wasm-smith/src/core.rs
new file mode 100644
index 0000000000..6a2836ae0e
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core.rs
@@ -0,0 +1,2404 @@
+//! Generating arbitary core Wasm modules.
+
+mod code_builder;
+pub(crate) mod encode;
+mod terminate;
+
+use crate::{arbitrary_loop, limited_string, unique_string, Config};
+use arbitrary::{Arbitrary, Result, Unstructured};
+use code_builder::CodeBuilderAllocations;
+use flagset::{flags, FlagSet};
+use std::collections::{HashMap, HashSet};
+use std::convert::TryFrom;
+use std::ops::Range;
+use std::rc::Rc;
+use std::str::{self, FromStr};
+use wasm_encoder::{
+ ArrayType, BlockType, ConstExpr, ExportKind, FieldType, HeapType, RefType, StorageType,
+ StructType, ValType,
+};
+pub(crate) use wasm_encoder::{GlobalType, MemoryType, TableType};
+
+// NB: these constants are used to control the rate at which various events
+// occur. For more information see where these constants are used. Their values
+// are somewhat random in the sense that they're not scientifically determined
+// or anything like that, I just threw a bunch of random data at wasm-smith and
+// measured various rates of ooms/traps/etc and adjusted these so abnormal
+// events were ~1% of the time.
+const CHANCE_OFFSET_INBOUNDS: usize = 10; // bigger = less traps
+const CHANCE_SEGMENT_ON_EMPTY: usize = 10; // bigger = less traps
+const PCT_INBOUNDS: f64 = 0.995; // bigger = less traps
+
+type Instruction = wasm_encoder::Instruction<'static>;
+
+/// A pseudo-random WebAssembly module.
+///
+/// Construct instances of this type (with default configuration) with [the
+/// `Arbitrary`
+/// trait](https://docs.rs/arbitrary/*/arbitrary/trait.Arbitrary.html).
+///
+/// ## Configuring Generated Modules
+///
+/// To configure the shape of generated module, create a
+/// [`Config`][crate::Config] and then call [`Module::new`][crate::Module::new]
+/// with it.
+#[derive(Debug)]
+pub struct Module {
+ config: Config,
+ duplicate_imports_behavior: DuplicateImportsBehavior,
+ valtypes: Vec<ValType>,
+
+ /// All types locally defined in this module (available in the type index
+ /// space).
+ types: Vec<SubType>,
+
+ /// Non-overlapping ranges within `types` that belong to the same rec
+ /// group. All of `types` is covered by these ranges. When GC is not
+ /// enabled, these are all single-element ranges.
+ rec_groups: Vec<Range<usize>>,
+
+ /// A map from a super type to all of its sub types.
+ super_to_sub_types: HashMap<u32, Vec<u32>>,
+
+ /// Indices within `types` that are not final types.
+ can_subtype: Vec<u32>,
+
+ /// Whether we should encode a types section, even if `self.types` is empty.
+ should_encode_types: bool,
+
+ /// All of this module's imports. These don't have their own index space,
+ /// but instead introduce entries to each imported entity's associated index
+ /// space.
+ imports: Vec<Import>,
+
+ /// Whether we should encode an imports section, even if `self.imports` is
+ /// empty.
+ should_encode_imports: bool,
+
+ /// Indices within `types` that are array types.
+ array_types: Vec<u32>,
+
+ /// Indices within `types` that are function types.
+ func_types: Vec<u32>,
+
+ /// Indices within `types that are struct types.
+ struct_types: Vec<u32>,
+
+ /// Number of imported items into this module.
+ num_imports: usize,
+
+ /// The number of tags defined in this module (not imported or
+ /// aliased).
+ num_defined_tags: usize,
+
+ /// The number of functions defined in this module (not imported or
+ /// aliased).
+ num_defined_funcs: usize,
+
+ /// The number of tables defined in this module (not imported or
+ /// aliased).
+ num_defined_tables: usize,
+
+ /// The number of memories defined in this module (not imported or
+ /// aliased).
+ num_defined_memories: usize,
+
+ /// The indexes and initialization expressions of globals defined in this
+ /// module.
+ defined_globals: Vec<(u32, GlobalInitExpr)>,
+
+ /// All tags available to this module, sorted by their index. The list
+ /// entry is the type of each tag.
+ tags: Vec<TagType>,
+
+ /// All functions available to this module, sorted by their index. The list
+ /// entry points to the index in this module where the function type is
+ /// defined (if available) and provides the type of the function.
+ funcs: Vec<(u32, Rc<FuncType>)>,
+
+ /// All tables available to this module, sorted by their index. The list
+ /// entry is the type of each table.
+ tables: Vec<TableType>,
+
+ /// All globals available to this module, sorted by their index. The list
+ /// entry is the type of each global.
+ globals: Vec<GlobalType>,
+
+ /// All memories available to this module, sorted by their index. The list
+ /// entry is the type of each memory.
+ memories: Vec<MemoryType>,
+
+ exports: Vec<(String, ExportKind, u32)>,
+ start: Option<u32>,
+ elems: Vec<ElementSegment>,
+ code: Vec<Code>,
+ data: Vec<DataSegment>,
+
+ /// The predicted size of the effective type of this module, based on this
+ /// module's size of the types of imports/exports.
+ type_size: u32,
+
+ /// Names currently exported from this module.
+ export_names: HashSet<String>,
+}
+
+impl<'a> Arbitrary<'a> for Module {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ Module::new(Config::default(), u)
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub(crate) enum DuplicateImportsBehavior {
+ Allowed,
+ Disallowed,
+}
+
+impl Module {
+ /// Returns a reference to the internal configuration.
+ pub fn config(&self) -> &Config {
+ &self.config
+ }
+
+ /// Creates a new `Module` with the specified `config` for
+ /// configuration and `Unstructured` for the DNA of this module.
+ pub fn new(config: Config, u: &mut Unstructured<'_>) -> Result<Self> {
+ Self::new_internal(config, u, DuplicateImportsBehavior::Allowed)
+ }
+
+ pub(crate) fn new_internal(
+ config: Config,
+ u: &mut Unstructured<'_>,
+ duplicate_imports_behavior: DuplicateImportsBehavior,
+ ) -> Result<Self> {
+ let mut module = Module::empty(config, duplicate_imports_behavior);
+ module.build(u, false)?;
+ Ok(module)
+ }
+
+ fn empty(config: Config, duplicate_imports_behavior: DuplicateImportsBehavior) -> Self {
+ Module {
+ config,
+ duplicate_imports_behavior,
+ valtypes: Vec::new(),
+ types: Vec::new(),
+ rec_groups: Vec::new(),
+ can_subtype: Vec::new(),
+ super_to_sub_types: HashMap::new(),
+ should_encode_types: false,
+ imports: Vec::new(),
+ should_encode_imports: false,
+ array_types: Vec::new(),
+ func_types: Vec::new(),
+ struct_types: Vec::new(),
+ num_imports: 0,
+ num_defined_tags: 0,
+ num_defined_funcs: 0,
+ num_defined_tables: 0,
+ num_defined_memories: 0,
+ defined_globals: Vec::new(),
+ tags: Vec::new(),
+ funcs: Vec::new(),
+ tables: Vec::new(),
+ globals: Vec::new(),
+ memories: Vec::new(),
+ exports: Vec::new(),
+ start: None,
+ elems: Vec::new(),
+ code: Vec::new(),
+ data: Vec::new(),
+ type_size: 0,
+ export_names: HashSet::new(),
+ }
+ }
+}
+
+/// Same as [`Module`], but may be invalid.
+///
+/// This module generates function bodies differnetly than `Module` to try to
+/// better explore wasm decoders and such.
+#[derive(Debug)]
+pub struct MaybeInvalidModule {
+ module: Module,
+}
+
+impl MaybeInvalidModule {
+ /// Encode this Wasm module into bytes.
+ pub fn to_bytes(&self) -> Vec<u8> {
+ self.module.to_bytes()
+ }
+}
+
+impl<'a> Arbitrary<'a> for MaybeInvalidModule {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ let mut module = Module::empty(Config::default(), DuplicateImportsBehavior::Allowed);
+ module.build(u, true)?;
+ Ok(MaybeInvalidModule { module })
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) struct RecGroup {
+ pub(crate) types: Vec<SubType>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) struct SubType {
+ pub(crate) is_final: bool,
+ pub(crate) supertype: Option<u32>,
+ pub(crate) composite_type: CompositeType,
+}
+
+impl SubType {
+ fn unwrap_struct(&self) -> &StructType {
+ self.composite_type.unwrap_struct()
+ }
+
+ fn unwrap_func(&self) -> &Rc<FuncType> {
+ self.composite_type.unwrap_func()
+ }
+
+ fn unwrap_array(&self) -> &ArrayType {
+ self.composite_type.unwrap_array()
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) enum CompositeType {
+ Array(ArrayType),
+ Func(Rc<FuncType>),
+ Struct(StructType),
+}
+
+impl CompositeType {
+ fn unwrap_struct(&self) -> &StructType {
+ match self {
+ CompositeType::Struct(s) => s,
+ _ => panic!("not a struct"),
+ }
+ }
+
+ fn unwrap_func(&self) -> &Rc<FuncType> {
+ match self {
+ CompositeType::Func(f) => f,
+ _ => panic!("not a func"),
+ }
+ }
+
+ fn unwrap_array(&self) -> &ArrayType {
+ match self {
+ CompositeType::Array(a) => a,
+ _ => panic!("not an array"),
+ }
+ }
+}
+
+/// A function signature.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub(crate) struct FuncType {
+ /// Types of the parameter values.
+ pub(crate) params: Vec<ValType>,
+ /// Types of the result values.
+ pub(crate) results: Vec<ValType>,
+}
+
+/// An import of an entity provided externally or by a component.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) struct Import {
+ /// The name of the module providing this entity.
+ pub(crate) module: String,
+ /// The name of the entity.
+ pub(crate) field: String,
+ /// The type of this entity.
+ pub(crate) entity_type: EntityType,
+}
+
+/// Type of an entity.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) enum EntityType {
+ /// A global entity.
+ Global(GlobalType),
+ /// A table entity.
+ Table(TableType),
+ /// A memory entity.
+ Memory(MemoryType),
+ /// A tag entity.
+ Tag(TagType),
+ /// A function entity.
+ Func(u32, Rc<FuncType>),
+}
+
+/// Type of a tag.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) struct TagType {
+ /// Index of the function type.
+ func_type_idx: u32,
+ /// Type of the function.
+ func_type: Rc<FuncType>,
+}
+
+#[derive(Debug)]
+struct ElementSegment {
+ kind: ElementKind,
+ ty: RefType,
+ items: Elements,
+}
+
+#[derive(Debug)]
+enum ElementKind {
+ Passive,
+ Declared,
+ Active {
+ table: Option<u32>, // None == table 0 implicitly
+ offset: Offset,
+ },
+}
+
+#[derive(Debug)]
+enum Elements {
+ Functions(Vec<u32>),
+ Expressions(Vec<Option<u32>>),
+}
+
+#[derive(Debug)]
+struct Code {
+ locals: Vec<ValType>,
+ instructions: Instructions,
+}
+
+#[derive(Debug)]
+enum Instructions {
+ Generated(Vec<Instruction>),
+ Arbitrary(Vec<u8>),
+}
+
+#[derive(Debug)]
+struct DataSegment {
+ kind: DataSegmentKind,
+ init: Vec<u8>,
+}
+
+#[derive(Debug)]
+enum DataSegmentKind {
+ Passive,
+ Active { memory_index: u32, offset: Offset },
+}
+
+#[derive(Debug)]
+pub(crate) enum Offset {
+ Const32(i32),
+ Const64(i64),
+ Global(u32),
+}
+
+#[derive(Debug)]
+pub(crate) enum GlobalInitExpr {
+ FuncRef(u32),
+ ConstExpr(ConstExpr),
+}
+
+impl Module {
+ fn build(&mut self, u: &mut Unstructured, allow_invalid: bool) -> Result<()> {
+ self.valtypes = configured_valtypes(&self.config);
+
+ // We attempt to figure out our available imports *before* creating the types section here,
+ // because the types for the imports are already well-known (specified by the user) and we
+ // must have those populated for all function/etc. imports, no matter what.
+ //
+ // This can affect the available capacity for types and such.
+ if self.arbitrary_imports_from_available(u)? {
+ self.arbitrary_types(u)?;
+ } else {
+ self.arbitrary_types(u)?;
+ self.arbitrary_imports(u)?;
+ }
+
+ self.should_encode_types = !self.types.is_empty() || u.arbitrary()?;
+ self.should_encode_imports = !self.imports.is_empty() || u.arbitrary()?;
+
+ self.arbitrary_tags(u)?;
+ self.arbitrary_funcs(u)?;
+ self.arbitrary_tables(u)?;
+ self.arbitrary_memories(u)?;
+ self.arbitrary_globals(u)?;
+ self.arbitrary_exports(u)?;
+ self.arbitrary_start(u)?;
+ self.arbitrary_elems(u)?;
+ self.arbitrary_data(u)?;
+ self.arbitrary_code(u, allow_invalid)?;
+ Ok(())
+ }
+
+ #[inline]
+ fn val_type_is_sub_type(&self, a: ValType, b: ValType) -> bool {
+ match (a, b) {
+ (a, b) if a == b => true,
+ (ValType::Ref(a), ValType::Ref(b)) => self.ref_type_is_sub_type(a, b),
+ _ => false,
+ }
+ }
+
+ /// Is `a` a subtype of `b`?
+ fn ref_type_is_sub_type(&self, a: RefType, b: RefType) -> bool {
+ if a == b {
+ return true;
+ }
+
+ if a.nullable && !b.nullable {
+ return false;
+ }
+
+ self.heap_type_is_sub_type(a.heap_type, b.heap_type)
+ }
+
+ fn heap_type_is_sub_type(&self, a: HeapType, b: HeapType) -> bool {
+ use HeapType as HT;
+ match (a, b) {
+ (a, b) if a == b => true,
+
+ (HT::Eq | HT::I31 | HT::Struct | HT::Array | HT::None, HT::Any) => true,
+ (HT::I31 | HT::Struct | HT::Array | HT::None, HT::Eq) => true,
+ (HT::NoExtern, HT::Extern) => true,
+ (HT::NoFunc, HT::Func) => true,
+ (HT::None, HT::I31 | HT::Array | HT::Struct) => true,
+
+ (HT::Concrete(a), HT::Eq | HT::Any) => matches!(
+ self.ty(a).composite_type,
+ CompositeType::Array(_) | CompositeType::Struct(_)
+ ),
+
+ (HT::Concrete(a), HT::Struct) => {
+ matches!(self.ty(a).composite_type, CompositeType::Struct(_))
+ }
+
+ (HT::Concrete(a), HT::Array) => {
+ matches!(self.ty(a).composite_type, CompositeType::Array(_))
+ }
+
+ (HT::Concrete(a), HT::Func) => {
+ matches!(self.ty(a).composite_type, CompositeType::Func(_))
+ }
+
+ (HT::Concrete(mut a), HT::Concrete(b)) => loop {
+ if a == b {
+ return true;
+ }
+ if let Some(supertype) = self.ty(a).supertype {
+ a = supertype;
+ } else {
+ return false;
+ }
+ },
+
+ (HT::None, HT::Concrete(b)) => matches!(
+ self.ty(b).composite_type,
+ CompositeType::Array(_) | CompositeType::Struct(_)
+ ),
+
+ (HT::NoFunc, HT::Concrete(b)) => {
+ matches!(self.ty(b).composite_type, CompositeType::Func(_))
+ }
+
+ // Nothing else matches. (Avoid full wildcard matches so that
+ // adding/modifying variants is easier in the future.)
+ (HT::Concrete(_), _)
+ | (HT::Func, _)
+ | (HT::Extern, _)
+ | (HT::Any, _)
+ | (HT::None, _)
+ | (HT::NoExtern, _)
+ | (HT::NoFunc, _)
+ | (HT::Eq, _)
+ | (HT::Struct, _)
+ | (HT::Array, _)
+ | (HT::I31, _) => false,
+
+ // TODO: `exn` probably will be its own type hierarchy and will
+ // probably get `noexn` as well.
+ (HT::Exn, _) => false,
+ }
+ }
+
+ fn arbitrary_types(&mut self, u: &mut Unstructured) -> Result<()> {
+ assert!(self.config.min_types <= self.config.max_types);
+ while self.types.len() < self.config.min_types {
+ self.arbitrary_rec_group(u)?;
+ }
+ while self.types.len() < self.config.max_types {
+ let keep_going = u.arbitrary().unwrap_or(false);
+ if !keep_going {
+ break;
+ }
+ self.arbitrary_rec_group(u)?;
+ }
+ Ok(())
+ }
+
+ fn add_type(&mut self, ty: SubType) -> u32 {
+ let index = u32::try_from(self.types.len()).unwrap();
+
+ if let Some(supertype) = ty.supertype {
+ self.super_to_sub_types
+ .entry(supertype)
+ .or_default()
+ .push(index);
+ }
+
+ let list = match &ty.composite_type {
+ CompositeType::Array(_) => &mut self.array_types,
+ CompositeType::Func(_) => &mut self.func_types,
+ CompositeType::Struct(_) => &mut self.struct_types,
+ };
+ list.push(index);
+
+ if !ty.is_final {
+ self.can_subtype.push(index);
+ }
+
+ self.types.push(ty);
+ index
+ }
+
+ fn arbitrary_rec_group(&mut self, u: &mut Unstructured) -> Result<()> {
+ let rec_group_start = self.types.len();
+
+ if self.config.gc_enabled {
+ // With small probability, clone an existing rec group.
+ if self.clonable_rec_groups().next().is_some() && u.ratio(1, u8::MAX)? {
+ return self.clone_rec_group(u);
+ }
+
+ // Otherwise, create a new rec group with multiple types inside.
+ let max_rec_group_size = self.config.max_types - self.types.len();
+ let rec_group_size = u.int_in_range(0..=max_rec_group_size)?;
+ let type_ref_limit = u32::try_from(self.types.len() + rec_group_size).unwrap();
+ for _ in 0..rec_group_size {
+ let ty = self.arbitrary_sub_type(u, type_ref_limit)?;
+ self.add_type(ty);
+ }
+ } else {
+ let type_ref_limit = u32::try_from(self.types.len()).unwrap();
+ let ty = self.arbitrary_sub_type(u, type_ref_limit)?;
+ self.add_type(ty);
+ }
+
+ self.rec_groups.push(rec_group_start..self.types.len());
+ Ok(())
+ }
+
+ /// Returns an iterator of rec groups that we could currently clone while
+ /// still staying within the max types limit.
+ fn clonable_rec_groups(&self) -> impl Iterator<Item = Range<usize>> + '_ {
+ self.rec_groups
+ .iter()
+ .filter(|r| r.end - r.start <= self.config.max_types.saturating_sub(self.types.len()))
+ .cloned()
+ }
+
+ fn clone_rec_group(&mut self, u: &mut Unstructured) -> Result<()> {
+ // NB: this does *not* guarantee that the cloned rec group will
+ // canonicalize the same as the original rec group and be
+ // deduplicated. That would reqiure a second pass over the cloned types
+ // to rewrite references within the original rec group to be references
+ // into the new rec group. That might make sense to do one day, but for
+ // now we don't do it. That also means that we can't mark the new types
+ // as "subtypes" of the old types and vice versa.
+ let candidates: Vec<_> = self.clonable_rec_groups().collect();
+ let group = u.choose(&candidates)?.clone();
+ let new_rec_group_start = self.types.len();
+ for index in group {
+ let orig_ty_index = u32::try_from(index).unwrap();
+ let ty = self.ty(orig_ty_index).clone();
+ self.add_type(ty);
+ }
+ self.rec_groups.push(new_rec_group_start..self.types.len());
+ Ok(())
+ }
+
+ fn arbitrary_sub_type(
+ &mut self,
+ u: &mut Unstructured,
+ // NB: Types can be referenced up to (but not including)
+ // `type_ref_limit`. It is an exclusive bound to avoid an option: to
+ // disallow any reference to a concrete type (e.g. `(ref $type)`) you
+ // can simply pass `0` here.
+ type_ref_limit: u32,
+ ) -> Result<SubType> {
+ if !self.config.gc_enabled {
+ debug_assert_eq!(type_ref_limit, u32::try_from(self.types.len()).unwrap());
+ return Ok(SubType {
+ is_final: true,
+ supertype: None,
+ composite_type: CompositeType::Func(self.arbitrary_func_type(u, type_ref_limit)?),
+ });
+ }
+
+ if !self.can_subtype.is_empty() && u.ratio(1, 32_u8)? {
+ self.arbitrary_sub_type_of_super_type(u, type_ref_limit)
+ } else {
+ Ok(SubType {
+ is_final: u.arbitrary()?,
+ supertype: None,
+ composite_type: self.arbitrary_composite_type(u, type_ref_limit)?,
+ })
+ }
+ }
+
+ fn arbitrary_sub_type_of_super_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_ref_limit: u32,
+ ) -> Result<SubType> {
+ let supertype = *u.choose(&self.can_subtype)?;
+ let mut composite_type = self.types[usize::try_from(supertype).unwrap()]
+ .composite_type
+ .clone();
+ match &mut composite_type {
+ CompositeType::Array(a) => {
+ a.0 = self.arbitrary_matching_field_type(u, a.0)?;
+ }
+ CompositeType::Func(f) => {
+ *f = self.arbitrary_matching_func_type(u, f)?;
+ }
+ CompositeType::Struct(s) => {
+ *s = self.arbitrary_matching_struct_type(u, s, type_ref_limit)?;
+ }
+ }
+ Ok(SubType {
+ is_final: u.arbitrary()?,
+ supertype: Some(supertype),
+ composite_type,
+ })
+ }
+
+ fn arbitrary_matching_struct_type(
+ &mut self,
+ u: &mut Unstructured,
+ ty: &StructType,
+ type_ref_limit: u32,
+ ) -> Result<StructType> {
+ let len_extra_fields = u.int_in_range(0..=5)?;
+ let mut fields = Vec::with_capacity(ty.fields.len() + len_extra_fields);
+ for field in ty.fields.iter() {
+ fields.push(self.arbitrary_matching_field_type(u, *field)?);
+ }
+ for _ in 0..len_extra_fields {
+ fields.push(self.arbitrary_field_type(u, type_ref_limit)?);
+ }
+ Ok(StructType {
+ fields: fields.into_boxed_slice(),
+ })
+ }
+
+ fn arbitrary_matching_field_type(
+ &mut self,
+ u: &mut Unstructured,
+ ty: FieldType,
+ ) -> Result<FieldType> {
+ Ok(FieldType {
+ element_type: self.arbitrary_matching_storage_type(u, ty.element_type)?,
+ mutable: if ty.mutable { u.arbitrary()? } else { false },
+ })
+ }
+
+ fn arbitrary_matching_storage_type(
+ &mut self,
+ u: &mut Unstructured,
+ ty: StorageType,
+ ) -> Result<StorageType> {
+ match ty {
+ StorageType::I8 => Ok(StorageType::I8),
+ StorageType::I16 => Ok(StorageType::I16),
+ StorageType::Val(ty) => Ok(StorageType::Val(self.arbitrary_matching_val_type(u, ty)?)),
+ }
+ }
+
+ fn arbitrary_matching_val_type(
+ &mut self,
+ u: &mut Unstructured,
+ ty: ValType,
+ ) -> Result<ValType> {
+ match ty {
+ ValType::I32 => Ok(ValType::I32),
+ ValType::I64 => Ok(ValType::I64),
+ ValType::F32 => Ok(ValType::F32),
+ ValType::F64 => Ok(ValType::F64),
+ ValType::V128 => Ok(ValType::V128),
+ ValType::Ref(ty) => Ok(ValType::Ref(self.arbitrary_matching_ref_type(u, ty)?)),
+ }
+ }
+
+ fn arbitrary_matching_ref_type(&self, u: &mut Unstructured, ty: RefType) -> Result<RefType> {
+ Ok(RefType {
+ nullable: ty.nullable,
+ heap_type: self.arbitrary_matching_heap_type(u, ty.heap_type)?,
+ })
+ }
+
+ fn arbitrary_matching_heap_type(&self, u: &mut Unstructured, ty: HeapType) -> Result<HeapType> {
+ if !self.config.gc_enabled {
+ return Ok(ty);
+ }
+ use HeapType as HT;
+ let mut choices = vec![ty];
+ match ty {
+ HT::Any => {
+ choices.extend([HT::Eq, HT::Struct, HT::Array, HT::I31, HT::None]);
+ choices.extend(self.array_types.iter().copied().map(HT::Concrete));
+ choices.extend(self.struct_types.iter().copied().map(HT::Concrete));
+ }
+ HT::Eq => {
+ choices.extend([HT::Struct, HT::Array, HT::I31, HT::None]);
+ choices.extend(self.array_types.iter().copied().map(HT::Concrete));
+ choices.extend(self.struct_types.iter().copied().map(HT::Concrete));
+ }
+ HT::Struct => {
+ choices.extend([HT::Struct, HT::None]);
+ choices.extend(self.struct_types.iter().copied().map(HT::Concrete));
+ }
+ HT::Array => {
+ choices.extend([HT::Array, HT::None]);
+ choices.extend(self.array_types.iter().copied().map(HT::Concrete));
+ }
+ HT::I31 => {
+ choices.push(HT::None);
+ }
+ HT::Concrete(idx) => {
+ if let Some(subs) = self.super_to_sub_types.get(&idx) {
+ choices.extend(subs.iter().copied().map(HT::Concrete));
+ }
+ match self
+ .types
+ .get(usize::try_from(idx).unwrap())
+ .map(|ty| &ty.composite_type)
+ {
+ Some(CompositeType::Array(_)) | Some(CompositeType::Struct(_)) => {
+ choices.push(HT::None)
+ }
+ Some(CompositeType::Func(_)) => choices.push(HT::NoFunc),
+ None => {
+ // The referenced type might be part of this same rec
+ // group we are currently generating, but not generated
+ // yet. In this case, leave `choices` as it is, and we
+ // will just end up choosing the original type again
+ // down below, which is fine.
+ }
+ }
+ }
+ HT::Func => {
+ choices.extend(self.func_types.iter().copied().map(HT::Concrete));
+ choices.push(HT::NoFunc);
+ }
+ HT::Extern => {
+ choices.push(HT::NoExtern);
+ }
+ HT::Exn | HT::None | HT::NoExtern | HT::NoFunc => {}
+ }
+ Ok(*u.choose(&choices)?)
+ }
+
+ fn arbitrary_matching_func_type(
+ &mut self,
+ u: &mut Unstructured,
+ ty: &FuncType,
+ ) -> Result<Rc<FuncType>> {
+ // Note: parameters are contravariant, results are covariant. See
+ // https://github.com/bytecodealliance/wasm-tools/blob/0616ef196a183cf137ee06b4a5993b7d590088bf/crates/wasmparser/src/readers/core/types/matches.rs#L137-L174
+ // for details.
+ let mut params = Vec::with_capacity(ty.params.len());
+ for param in &ty.params {
+ params.push(self.arbitrary_super_type_of_val_type(u, *param)?);
+ }
+ let mut results = Vec::with_capacity(ty.results.len());
+ for result in &ty.results {
+ results.push(self.arbitrary_matching_val_type(u, *result)?);
+ }
+ Ok(Rc::new(FuncType { params, results }))
+ }
+
+ fn arbitrary_super_type_of_val_type(
+ &mut self,
+ u: &mut Unstructured,
+ ty: ValType,
+ ) -> Result<ValType> {
+ match ty {
+ ValType::I32 => Ok(ValType::I32),
+ ValType::I64 => Ok(ValType::I64),
+ ValType::F32 => Ok(ValType::F32),
+ ValType::F64 => Ok(ValType::F64),
+ ValType::V128 => Ok(ValType::V128),
+ ValType::Ref(ty) => Ok(ValType::Ref(self.arbitrary_super_type_of_ref_type(u, ty)?)),
+ }
+ }
+
+ fn arbitrary_super_type_of_ref_type(
+ &self,
+ u: &mut Unstructured,
+ ty: RefType,
+ ) -> Result<RefType> {
+ Ok(RefType {
+ // TODO: For now, only create allow nullable reference
+ // types. Eventually we should support non-nullable reference types,
+ // but this means that we will also need to recognize when it is
+ // impossible to create an instance of the reference (eg `(ref
+ // nofunc)` has no instances, and self-referential types that
+ // contain a non-null self-reference are also impossible to create).
+ nullable: true,
+ heap_type: self.arbitrary_super_type_of_heap_type(u, ty.heap_type)?,
+ })
+ }
+
+ fn arbitrary_super_type_of_heap_type(
+ &self,
+ u: &mut Unstructured,
+ ty: HeapType,
+ ) -> Result<HeapType> {
+ if !self.config.gc_enabled {
+ return Ok(ty);
+ }
+ use HeapType as HT;
+ let mut choices = vec![ty];
+ match ty {
+ HT::None => {
+ choices.extend([HT::Any, HT::Eq, HT::Struct, HT::Array, HT::I31]);
+ choices.extend(self.array_types.iter().copied().map(HT::Concrete));
+ choices.extend(self.struct_types.iter().copied().map(HT::Concrete));
+ }
+ HT::NoExtern => {
+ choices.push(HT::Extern);
+ }
+ HT::NoFunc => {
+ choices.extend(self.func_types.iter().copied().map(HT::Concrete));
+ choices.push(HT::Func);
+ }
+ HT::Concrete(mut idx) => {
+ match &self
+ .types
+ .get(usize::try_from(idx).unwrap())
+ .map(|ty| &ty.composite_type)
+ {
+ Some(CompositeType::Array(_)) => {
+ choices.extend([HT::Any, HT::Eq, HT::Array]);
+ }
+ Some(CompositeType::Func(_)) => {
+ choices.push(HT::Func);
+ }
+ Some(CompositeType::Struct(_)) => {
+ choices.extend([HT::Any, HT::Eq, HT::Struct]);
+ }
+ None => {
+ // Same as in `arbitrary_matching_heap_type`: this was a
+ // forward reference to a concrete type that is part of
+ // this same rec group we are generating right now, and
+ // therefore we haven't generated that type yet. Just
+ // leave `choices` as it is and we will choose the
+ // original type again down below.
+ }
+ }
+ while let Some(supertype) = self
+ .types
+ .get(usize::try_from(idx).unwrap())
+ .and_then(|ty| ty.supertype)
+ {
+ choices.push(HT::Concrete(supertype));
+ idx = supertype;
+ }
+ }
+ HT::Struct | HT::Array | HT::I31 => {
+ choices.extend([HT::Any, HT::Eq]);
+ }
+ HT::Eq => {
+ choices.push(HT::Any);
+ }
+ HT::Exn | HT::Any | HT::Func | HT::Extern => {}
+ }
+ Ok(*u.choose(&choices)?)
+ }
+
+ fn arbitrary_composite_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_ref_limit: u32,
+ ) -> Result<CompositeType> {
+ if !self.config.gc_enabled {
+ return Ok(CompositeType::Func(
+ self.arbitrary_func_type(u, type_ref_limit)?,
+ ));
+ }
+
+ match u.int_in_range(0..=2)? {
+ 0 => Ok(CompositeType::Array(ArrayType(
+ self.arbitrary_field_type(u, type_ref_limit)?,
+ ))),
+ 1 => Ok(CompositeType::Func(
+ self.arbitrary_func_type(u, type_ref_limit)?,
+ )),
+ 2 => Ok(CompositeType::Struct(
+ self.arbitrary_struct_type(u, type_ref_limit)?,
+ )),
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_struct_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_ref_limit: u32,
+ ) -> Result<StructType> {
+ let len = u.int_in_range(0..=20)?;
+ let mut fields = Vec::with_capacity(len);
+ for _ in 0..len {
+ fields.push(self.arbitrary_field_type(u, type_ref_limit)?);
+ }
+ Ok(StructType {
+ fields: fields.into_boxed_slice(),
+ })
+ }
+
+ fn arbitrary_field_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_ref_limit: u32,
+ ) -> Result<FieldType> {
+ Ok(FieldType {
+ element_type: self.arbitrary_storage_type(u, type_ref_limit)?,
+ mutable: u.arbitrary()?,
+ })
+ }
+
+ fn arbitrary_storage_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_ref_limit: u32,
+ ) -> Result<StorageType> {
+ match u.int_in_range(0..=2)? {
+ 0 => Ok(StorageType::I8),
+ 1 => Ok(StorageType::I16),
+ 2 => Ok(StorageType::Val(self.arbitrary_valtype(u, type_ref_limit)?)),
+ _ => unreachable!(),
+ }
+ }
+
+ fn arbitrary_ref_type(&self, u: &mut Unstructured) -> Result<RefType> {
+ Ok(RefType {
+ nullable: true,
+ heap_type: self.arbitrary_heap_type(u)?,
+ })
+ }
+
+ fn arbitrary_heap_type(&self, u: &mut Unstructured) -> Result<HeapType> {
+ assert!(self.config.reference_types_enabled);
+
+ if self.config.gc_enabled && !self.types.is_empty() && u.arbitrary()? {
+ let type_ref_limit = u32::try_from(self.types.len()).unwrap();
+ let idx = u.int_in_range(0..=type_ref_limit)?;
+ return Ok(HeapType::Concrete(idx));
+ }
+
+ let mut choices = vec![HeapType::Func, HeapType::Extern];
+ if self.config.exceptions_enabled {
+ choices.push(HeapType::Exn);
+ }
+ if self.config.gc_enabled {
+ choices.extend(
+ [
+ HeapType::Any,
+ HeapType::None,
+ HeapType::NoExtern,
+ HeapType::NoFunc,
+ HeapType::Eq,
+ HeapType::Struct,
+ HeapType::Array,
+ HeapType::I31,
+ ]
+ .iter()
+ .copied(),
+ );
+ }
+ u.choose(&choices).copied()
+ }
+
+ fn arbitrary_func_type(
+ &mut self,
+ u: &mut Unstructured,
+ type_ref_limit: u32,
+ ) -> Result<Rc<FuncType>> {
+ arbitrary_func_type(
+ u,
+ &self.config,
+ &self.valtypes,
+ if !self.config.multi_value_enabled {
+ Some(1)
+ } else {
+ None
+ },
+ type_ref_limit,
+ )
+ }
+
+ fn can_add_local_or_import_tag(&self) -> bool {
+ self.config.exceptions_enabled
+ && self.has_tag_func_types()
+ && self.tags.len() < self.config.max_tags
+ }
+
+ fn can_add_local_or_import_func(&self) -> bool {
+ !self.func_types.is_empty() && self.funcs.len() < self.config.max_funcs
+ }
+
+ fn can_add_local_or_import_table(&self) -> bool {
+ self.tables.len() < self.config.max_tables
+ }
+
+ fn can_add_local_or_import_global(&self) -> bool {
+ self.globals.len() < self.config.max_globals
+ }
+
+ fn can_add_local_or_import_memory(&self) -> bool {
+ self.memories.len() < self.config.max_memories
+ }
+
+ fn arbitrary_imports(&mut self, u: &mut Unstructured) -> Result<()> {
+ if self.config.max_type_size < self.type_size {
+ return Ok(());
+ }
+
+ let mut import_strings = HashSet::new();
+ let mut choices: Vec<fn(&mut Unstructured, &mut Module) -> Result<EntityType>> =
+ Vec::with_capacity(5);
+ let min = self.config.min_imports.saturating_sub(self.num_imports);
+ let max = self.config.max_imports.saturating_sub(self.num_imports);
+ arbitrary_loop(u, min, max, |u| {
+ choices.clear();
+ if self.can_add_local_or_import_tag() {
+ choices.push(|u, m| {
+ let ty = m.arbitrary_tag_type(u)?;
+ Ok(EntityType::Tag(ty))
+ });
+ }
+ if self.can_add_local_or_import_func() {
+ choices.push(|u, m| {
+ let idx = *u.choose(&m.func_types)?;
+ let ty = m.func_type(idx).clone();
+ Ok(EntityType::Func(idx, ty))
+ });
+ }
+ if self.can_add_local_or_import_global() {
+ choices.push(|u, m| {
+ let ty = m.arbitrary_global_type(u)?;
+ Ok(EntityType::Global(ty))
+ });
+ }
+ if self.can_add_local_or_import_memory() {
+ choices.push(|u, m| {
+ let ty = arbitrary_memtype(u, m.config())?;
+ Ok(EntityType::Memory(ty))
+ });
+ }
+ if self.can_add_local_or_import_table() {
+ choices.push(|u, m| {
+ let ty = arbitrary_table_type(u, m.config())?;
+ Ok(EntityType::Table(ty))
+ });
+ }
+
+ if choices.is_empty() {
+ // We are out of choices. If we have not have reached the
+ // minimum yet, then we have no way to satisfy the constraint,
+ // but we follow max-constraints before the min-import
+ // constraint.
+ return Ok(false);
+ }
+
+ // Generate a type to import, but only actually add the item if the
+ // type size budget allows us to.
+ let f = u.choose(&choices)?;
+ let entity_type = f(u, self)?;
+ let budget = self.config.max_type_size - self.type_size;
+ if entity_type.size() + 1 > budget {
+ return Ok(false);
+ }
+ self.type_size += entity_type.size() + 1;
+
+ // Generate an arbitrary module/name pair to name this import.
+ let mut import_pair = unique_import_strings(1_000, u)?;
+ if self.duplicate_imports_behavior == DuplicateImportsBehavior::Disallowed {
+ while import_strings.contains(&import_pair) {
+ use std::fmt::Write;
+ write!(&mut import_pair.1, "{}", import_strings.len()).unwrap();
+ }
+ import_strings.insert(import_pair.clone());
+ }
+ let (module, field) = import_pair;
+
+ // Once our name is determined, then we push the typed item into the
+ // appropriate namespace.
+ match &entity_type {
+ EntityType::Tag(ty) => self.tags.push(ty.clone()),
+ EntityType::Func(idx, ty) => self.funcs.push((*idx, ty.clone())),
+ EntityType::Global(ty) => self.globals.push(*ty),
+ EntityType::Table(ty) => self.tables.push(*ty),
+ EntityType::Memory(ty) => self.memories.push(*ty),
+ }
+
+ self.num_imports += 1;
+ self.imports.push(Import {
+ module,
+ field,
+ entity_type,
+ });
+ Ok(true)
+ })?;
+
+ Ok(())
+ }
+
+ /// Generate some arbitrary imports from the list of available imports.
+ ///
+ /// Returns `true` if there was a list of available imports
+ /// configured. Otherwise `false` and the caller should generate arbitrary
+ /// imports.
+ fn arbitrary_imports_from_available(&mut self, u: &mut Unstructured) -> Result<bool> {
+ let example_module = if let Some(wasm) = self.config.available_imports.take() {
+ wasm
+ } else {
+ return Ok(false);
+ };
+
+ #[cfg(feature = "wasmparser")]
+ {
+ self._arbitrary_imports_from_available(u, &example_module)?;
+ Ok(true)
+ }
+ #[cfg(not(feature = "wasmparser"))]
+ {
+ let _ = (example_module, u);
+ panic!("support for `available_imports` was disabled at compile time");
+ }
+ }
+
+ #[cfg(feature = "wasmparser")]
+ fn _arbitrary_imports_from_available(
+ &mut self,
+ u: &mut Unstructured,
+ example_module: &[u8],
+ ) -> Result<()> {
+ // First, parse the module-by-example to collect the types and imports.
+ //
+ // `available_types` will map from a signature index (which is the same as the index into
+ // this vector) as it appears in the parsed code, to the type itself as well as to the
+ // index in our newly generated module. Initially the option is `None` and will become a
+ // `Some` when we encounter an import that uses this signature in the next portion of this
+ // function. See also the `make_func_type` closure below.
+ let mut available_types = Vec::new();
+ let mut available_imports = Vec::<wasmparser::Import>::new();
+ for payload in wasmparser::Parser::new(0).parse_all(&example_module) {
+ match payload.expect("could not parse the available import payload") {
+ wasmparser::Payload::TypeSection(type_reader) => {
+ for ty in type_reader.into_iter_err_on_gc_types() {
+ let ty = ty.expect("could not parse type section");
+ available_types.push((ty, None));
+ }
+ }
+ wasmparser::Payload::ImportSection(import_reader) => {
+ for im in import_reader {
+ let im = im.expect("could not read import");
+ // We can immediately filter whether this is an import we want to
+ // use.
+ let use_import = u.arbitrary().unwrap_or(false);
+ if !use_import {
+ continue;
+ }
+ available_imports.push(im);
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // In this function we need to place imported function/tag types in the types section and
+ // generate import entries (which refer to said types) at the same time.
+ let max_types = self.config.max_types;
+ let multi_value_enabled = self.config.multi_value_enabled;
+ let mut new_imports = Vec::with_capacity(available_imports.len());
+ let first_type_index = self.types.len();
+ let mut new_types = Vec::<SubType>::new();
+
+ // Returns the index to the translated type in the to-be type section, and the reference to
+ // the type itself.
+ let mut make_func_type = |parsed_sig_idx: u32| {
+ let serialized_sig_idx = match available_types.get_mut(parsed_sig_idx as usize) {
+ None => panic!("signature index refers to a type out of bounds"),
+ Some((_, Some(idx))) => *idx as usize,
+ Some((func_type, index_store)) => {
+ let multi_value_required = func_type.results().len() > 1;
+ let new_index = first_type_index + new_types.len();
+ if new_index >= max_types || (multi_value_required && !multi_value_enabled) {
+ return None;
+ }
+ let func_type = Rc::new(FuncType {
+ params: func_type
+ .params()
+ .iter()
+ .map(|t| (*t).try_into().unwrap())
+ .collect(),
+ results: func_type
+ .results()
+ .iter()
+ .map(|t| (*t).try_into().unwrap())
+ .collect(),
+ });
+ index_store.replace(new_index as u32);
+ new_types.push(SubType {
+ is_final: true,
+ supertype: None,
+ composite_type: CompositeType::Func(Rc::clone(&func_type)),
+ });
+ new_index
+ }
+ };
+ match &new_types[serialized_sig_idx - first_type_index].composite_type {
+ CompositeType::Func(f) => Some((serialized_sig_idx as u32, Rc::clone(f))),
+ _ => unimplemented!(),
+ }
+ };
+
+ for import in available_imports {
+ let type_size_budget = self.config.max_type_size - self.type_size;
+ let entity_type = match &import.ty {
+ wasmparser::TypeRef::Func(sig_idx) => {
+ if self.funcs.len() >= self.config.max_funcs {
+ continue;
+ } else if let Some((sig_idx, func_type)) = make_func_type(*sig_idx) {
+ let entity = EntityType::Func(sig_idx as u32, Rc::clone(&func_type));
+ if type_size_budget < entity.size() {
+ continue;
+ }
+ self.funcs.push((sig_idx, func_type));
+ entity
+ } else {
+ continue;
+ }
+ }
+
+ wasmparser::TypeRef::Tag(wasmparser::TagType { func_type_idx, .. }) => {
+ let can_add_tag = self.tags.len() < self.config.max_tags;
+ if !self.config.exceptions_enabled || !can_add_tag {
+ continue;
+ } else if let Some((sig_idx, func_type)) = make_func_type(*func_type_idx) {
+ let tag_type = TagType {
+ func_type_idx: sig_idx,
+ func_type,
+ };
+ let entity = EntityType::Tag(tag_type.clone());
+ if type_size_budget < entity.size() {
+ continue;
+ }
+ self.tags.push(tag_type);
+ entity
+ } else {
+ continue;
+ }
+ }
+
+ wasmparser::TypeRef::Table(table_ty) => {
+ let table_ty = TableType::try_from(*table_ty).unwrap();
+ let entity = EntityType::Table(table_ty);
+ let type_size = entity.size();
+ if type_size_budget < type_size || !self.can_add_local_or_import_table() {
+ continue;
+ }
+ self.type_size += type_size;
+ self.tables.push(table_ty);
+ entity
+ }
+
+ wasmparser::TypeRef::Memory(memory_ty) => {
+ let memory_ty = MemoryType::try_from(*memory_ty).unwrap();
+ let entity = EntityType::Memory(memory_ty);
+ let type_size = entity.size();
+ if type_size_budget < type_size || !self.can_add_local_or_import_memory() {
+ continue;
+ }
+ self.type_size += type_size;
+ self.memories.push(memory_ty);
+ entity
+ }
+
+ wasmparser::TypeRef::Global(global_ty) => {
+ let global_ty = (*global_ty).try_into().unwrap();
+ let entity = EntityType::Global(global_ty);
+ let type_size = entity.size();
+ if type_size_budget < type_size || !self.can_add_local_or_import_global() {
+ continue;
+ }
+ self.type_size += type_size;
+ self.globals.push(global_ty);
+ entity
+ }
+ };
+ new_imports.push(Import {
+ module: import.module.to_string(),
+ field: import.name.to_string(),
+ entity_type,
+ });
+ self.num_imports += 1;
+ }
+
+ // Finally, add the entities we just generated.
+ for ty in new_types {
+ self.rec_groups.push(self.types.len()..self.types.len() + 1);
+ self.add_type(ty);
+ }
+ self.imports.extend(new_imports);
+
+ Ok(())
+ }
+
+ fn type_of(&self, kind: ExportKind, index: u32) -> EntityType {
+ match kind {
+ ExportKind::Global => EntityType::Global(self.globals[index as usize]),
+ ExportKind::Memory => EntityType::Memory(self.memories[index as usize]),
+ ExportKind::Table => EntityType::Table(self.tables[index as usize]),
+ ExportKind::Func => {
+ let (_idx, ty) = &self.funcs[index as usize];
+ EntityType::Func(u32::max_value(), ty.clone())
+ }
+ ExportKind::Tag => EntityType::Tag(self.tags[index as usize].clone()),
+ }
+ }
+
+ fn ty(&self, idx: u32) -> &SubType {
+ &self.types[idx as usize]
+ }
+
+ fn func_types(&self) -> impl Iterator<Item = (u32, &FuncType)> + '_ {
+ self.func_types
+ .iter()
+ .copied()
+ .map(move |type_i| (type_i, &**self.func_type(type_i)))
+ }
+
+ fn func_type(&self, idx: u32) -> &Rc<FuncType> {
+ match &self.ty(idx).composite_type {
+ CompositeType::Func(f) => f,
+ _ => panic!("types[{idx}] is not a func type"),
+ }
+ }
+
+ fn tags(&self) -> impl Iterator<Item = (u32, &TagType)> + '_ {
+ self.tags
+ .iter()
+ .enumerate()
+ .map(move |(i, ty)| (i as u32, ty))
+ }
+
+ fn funcs(&self) -> impl Iterator<Item = (u32, &Rc<FuncType>)> + '_ {
+ self.funcs
+ .iter()
+ .enumerate()
+ .map(move |(i, (_, ty))| (i as u32, ty))
+ }
+
+ fn has_tag_func_types(&self) -> bool {
+ self.tag_func_types().next().is_some()
+ }
+
+ fn tag_func_types(&self) -> impl Iterator<Item = u32> + '_ {
+ self.func_types
+ .iter()
+ .copied()
+ .filter(move |i| self.func_type(*i).results.is_empty())
+ }
+
+ fn arbitrary_valtype(&self, u: &mut Unstructured, type_ref_limit: u32) -> Result<ValType> {
+ arbitrary_valtype(u, &self.config, &self.valtypes, type_ref_limit)
+ }
+
+ fn arbitrary_global_type(&self, u: &mut Unstructured) -> Result<GlobalType> {
+ Ok(GlobalType {
+ val_type: self.arbitrary_valtype(u, u32::try_from(self.types.len()).unwrap())?,
+ mutable: u.arbitrary()?,
+ })
+ }
+
+ fn arbitrary_tag_type(&self, u: &mut Unstructured) -> Result<TagType> {
+ let candidate_func_types: Vec<_> = self.tag_func_types().collect();
+ arbitrary_tag_type(u, &candidate_func_types, |ty_idx| {
+ self.func_type(ty_idx).clone()
+ })
+ }
+
+ fn arbitrary_tags(&mut self, u: &mut Unstructured) -> Result<()> {
+ if !self.config.exceptions_enabled || !self.has_tag_func_types() {
+ return Ok(());
+ }
+
+ arbitrary_loop(u, self.config.min_tags, self.config.max_tags, |u| {
+ if !self.can_add_local_or_import_tag() {
+ return Ok(false);
+ }
+ self.tags.push(self.arbitrary_tag_type(u)?);
+ self.num_defined_tags += 1;
+ Ok(true)
+ })
+ }
+
+ fn arbitrary_funcs(&mut self, u: &mut Unstructured) -> Result<()> {
+ if self.func_types.is_empty() {
+ return Ok(());
+ }
+
+ arbitrary_loop(u, self.config.min_funcs, self.config.max_funcs, |u| {
+ if !self.can_add_local_or_import_func() {
+ return Ok(false);
+ }
+ let max = self.func_types.len() - 1;
+ let ty = self.func_types[u.int_in_range(0..=max)?];
+ self.funcs.push((ty, self.func_type(ty).clone()));
+ self.num_defined_funcs += 1;
+ Ok(true)
+ })
+ }
+
+ fn arbitrary_tables(&mut self, u: &mut Unstructured) -> Result<()> {
+ arbitrary_loop(
+ u,
+ self.config.min_tables as usize,
+ self.config.max_tables as usize,
+ |u| {
+ if !self.can_add_local_or_import_table() {
+ return Ok(false);
+ }
+ self.num_defined_tables += 1;
+ let ty = arbitrary_table_type(u, self.config())?;
+ self.tables.push(ty);
+ Ok(true)
+ },
+ )
+ }
+
+ fn arbitrary_memories(&mut self, u: &mut Unstructured) -> Result<()> {
+ arbitrary_loop(
+ u,
+ self.config.min_memories as usize,
+ self.config.max_memories as usize,
+ |u| {
+ if !self.can_add_local_or_import_memory() {
+ return Ok(false);
+ }
+ self.num_defined_memories += 1;
+ self.memories.push(arbitrary_memtype(u, self.config())?);
+ Ok(true)
+ },
+ )
+ }
+
+ fn arbitrary_globals(&mut self, u: &mut Unstructured) -> Result<()> {
+ let mut choices: Vec<Box<dyn Fn(&mut Unstructured, ValType) -> Result<GlobalInitExpr>>> =
+ vec![];
+ let num_imported_globals = self.globals.len();
+
+ arbitrary_loop(u, self.config.min_globals, self.config.max_globals, |u| {
+ if !self.can_add_local_or_import_global() {
+ return Ok(false);
+ }
+
+ let ty = self.arbitrary_global_type(u)?;
+
+ choices.clear();
+ let num_funcs = self.funcs.len() as u32;
+ choices.push(Box::new(move |u, ty| {
+ Ok(GlobalInitExpr::ConstExpr(match ty {
+ ValType::I32 => ConstExpr::i32_const(u.arbitrary()?),
+ ValType::I64 => ConstExpr::i64_const(u.arbitrary()?),
+ ValType::F32 => ConstExpr::f32_const(u.arbitrary()?),
+ ValType::F64 => ConstExpr::f64_const(u.arbitrary()?),
+ ValType::V128 => ConstExpr::v128_const(u.arbitrary()?),
+ ValType::Ref(ty) => {
+ assert!(ty.nullable);
+ if ty.heap_type == HeapType::Func && num_funcs > 0 && u.arbitrary()? {
+ let func = u.int_in_range(0..=num_funcs - 1)?;
+ return Ok(GlobalInitExpr::FuncRef(func));
+ }
+ ConstExpr::ref_null(ty.heap_type)
+ }
+ }))
+ }));
+
+ for (i, g) in self.globals[..num_imported_globals].iter().enumerate() {
+ if !g.mutable && g.val_type == ty.val_type {
+ choices.push(Box::new(move |_, _| {
+ Ok(GlobalInitExpr::ConstExpr(ConstExpr::global_get(i as u32)))
+ }));
+ }
+ }
+
+ let f = u.choose(&choices)?;
+ let expr = f(u, ty.val_type)?;
+ let global_idx = self.globals.len() as u32;
+ self.globals.push(ty);
+ self.defined_globals.push((global_idx, expr));
+ Ok(true)
+ })
+ }
+
+ fn arbitrary_exports(&mut self, u: &mut Unstructured) -> Result<()> {
+ if self.config.max_type_size < self.type_size && !self.config.export_everything {
+ return Ok(());
+ }
+
+ // Build up a list of candidates for each class of import
+ let mut choices: Vec<Vec<(ExportKind, u32)>> = Vec::with_capacity(6);
+ choices.push(
+ (0..self.funcs.len())
+ .map(|i| (ExportKind::Func, i as u32))
+ .collect(),
+ );
+ choices.push(
+ (0..self.tables.len())
+ .map(|i| (ExportKind::Table, i as u32))
+ .collect(),
+ );
+ choices.push(
+ (0..self.memories.len())
+ .map(|i| (ExportKind::Memory, i as u32))
+ .collect(),
+ );
+ choices.push(
+ (0..self.globals.len())
+ .map(|i| (ExportKind::Global, i as u32))
+ .collect(),
+ );
+
+ // If the configuration demands exporting everything, we do so here and
+ // early-return.
+ if self.config.export_everything {
+ for choices_by_kind in choices {
+ for (kind, idx) in choices_by_kind {
+ let name = unique_string(1_000, &mut self.export_names, u)?;
+ self.add_arbitrary_export(name, kind, idx)?;
+ }
+ }
+ return Ok(());
+ }
+
+ arbitrary_loop(u, self.config.min_exports, self.config.max_exports, |u| {
+ // Remove all candidates for export whose type size exceeds our
+ // remaining budget for type size. Then also remove any classes
+ // of exports which no longer have any candidates.
+ //
+ // If there's nothing remaining after this, then we're done.
+ let max_size = self.config.max_type_size - self.type_size;
+ for list in choices.iter_mut() {
+ list.retain(|(kind, idx)| self.type_of(*kind, *idx).size() + 1 < max_size);
+ }
+ choices.retain(|list| !list.is_empty());
+ if choices.is_empty() {
+ return Ok(false);
+ }
+
+ // Pick a name, then pick the export, and then we can record
+ // information about the chosen export.
+ let name = unique_string(1_000, &mut self.export_names, u)?;
+ let list = u.choose(&choices)?;
+ let (kind, idx) = *u.choose(list)?;
+ self.add_arbitrary_export(name, kind, idx)?;
+ Ok(true)
+ })
+ }
+
+ fn add_arbitrary_export(&mut self, name: String, kind: ExportKind, idx: u32) -> Result<()> {
+ let ty = self.type_of(kind, idx);
+ self.type_size += 1 + ty.size();
+ if self.type_size <= self.config.max_type_size {
+ self.exports.push((name, kind, idx));
+ Ok(())
+ } else {
+ // If our addition of exports takes us above the allowed number of
+ // types, we fail; this error code is not the most illustrative of
+ // the cause but is the best available from `arbitrary`.
+ Err(arbitrary::Error::IncorrectFormat)
+ }
+ }
+
+ fn arbitrary_start(&mut self, u: &mut Unstructured) -> Result<()> {
+ if !self.config.allow_start_export {
+ return Ok(());
+ }
+
+ let mut choices = Vec::with_capacity(self.funcs.len() as usize);
+
+ for (func_idx, ty) in self.funcs() {
+ if ty.params.is_empty() && ty.results.is_empty() {
+ choices.push(func_idx);
+ }
+ }
+
+ if !choices.is_empty() && u.arbitrary().unwrap_or(false) {
+ let f = *u.choose(&choices)?;
+ self.start = Some(f);
+ }
+
+ Ok(())
+ }
+
+ fn arbitrary_elems(&mut self, u: &mut Unstructured) -> Result<()> {
+ let func_max = self.funcs.len() as u32;
+
+ // Create a helper closure to choose an arbitrary offset.
+ let mut offset_global_choices = vec![];
+ if !self.config.disallow_traps {
+ for (i, g) in self.globals[..self.globals.len() - self.defined_globals.len()]
+ .iter()
+ .enumerate()
+ {
+ if !g.mutable && g.val_type == ValType::I32 {
+ offset_global_choices.push(i as u32);
+ }
+ }
+ }
+ let arbitrary_active_elem = |u: &mut Unstructured,
+ min_mem_size: u32,
+ table: Option<u32>,
+ disallow_traps: bool,
+ table_ty: &TableType| {
+ let (offset, max_size_hint) = if !offset_global_choices.is_empty() && u.arbitrary()? {
+ let g = u.choose(&offset_global_choices)?;
+ (Offset::Global(*g), None)
+ } else {
+ let max_mem_size = if disallow_traps {
+ table_ty.minimum
+ } else {
+ u32::MAX
+ };
+ let offset =
+ arbitrary_offset(u, min_mem_size.into(), max_mem_size.into(), 0)? as u32;
+ let max_size_hint = if disallow_traps
+ || (offset <= min_mem_size && u.int_in_range(0..=CHANCE_OFFSET_INBOUNDS)? != 0)
+ {
+ Some(min_mem_size - offset)
+ } else {
+ None
+ };
+ (Offset::Const32(offset as i32), max_size_hint)
+ };
+ Ok((ElementKind::Active { table, offset }, max_size_hint))
+ };
+
+ type GenElemSegment<'a> =
+ dyn Fn(&mut Unstructured) -> Result<(ElementKind, Option<u32>)> + 'a;
+ let mut funcrefs: Vec<Box<GenElemSegment>> = Vec::new();
+ let mut externrefs: Vec<Box<GenElemSegment>> = Vec::new();
+ let disallow_traps = self.config.disallow_traps;
+ for (i, ty) in self.tables.iter().enumerate() {
+ // If this table starts with no capacity then any non-empty element
+ // segment placed onto it will immediately trap, which isn't too
+ // too interesting. If that's the case give it an unlikely chance
+ // of proceeding.
+ if ty.minimum == 0 && u.int_in_range(0..=CHANCE_SEGMENT_ON_EMPTY)? != 0 {
+ continue;
+ }
+
+ let dst = if ty.element_type == RefType::FUNCREF {
+ &mut funcrefs
+ } else {
+ &mut externrefs
+ };
+ let minimum = ty.minimum;
+ // If the first table is a funcref table then it's a candidate for
+ // the MVP encoding of element segments.
+ if i == 0 && ty.element_type == RefType::FUNCREF {
+ dst.push(Box::new(move |u| {
+ arbitrary_active_elem(u, minimum, None, disallow_traps, ty)
+ }));
+ }
+ if self.config.bulk_memory_enabled {
+ let idx = Some(i as u32);
+ dst.push(Box::new(move |u| {
+ arbitrary_active_elem(u, minimum, idx, disallow_traps, ty)
+ }));
+ }
+ }
+
+ // Bulk memory enables passive/declared segments for funcrefs, and
+ // reference types additionally enables the segments for externrefs.
+ if self.config.bulk_memory_enabled {
+ funcrefs.push(Box::new(|_| Ok((ElementKind::Passive, None))));
+ funcrefs.push(Box::new(|_| Ok((ElementKind::Declared, None))));
+ if self.config.reference_types_enabled {
+ externrefs.push(Box::new(|_| Ok((ElementKind::Passive, None))));
+ externrefs.push(Box::new(|_| Ok((ElementKind::Declared, None))));
+ }
+ }
+
+ let mut choices = Vec::new();
+ if !funcrefs.is_empty() {
+ choices.push((&funcrefs, RefType::FUNCREF));
+ }
+ if !externrefs.is_empty() {
+ choices.push((&externrefs, RefType::EXTERNREF));
+ }
+
+ if choices.is_empty() {
+ return Ok(());
+ }
+ arbitrary_loop(
+ u,
+ self.config.min_element_segments,
+ self.config.max_element_segments,
+ |u| {
+ // Choose whether to generate a segment whose elements are initialized via
+ // expressions, or one whose elements are initialized via function indices.
+ let (kind_candidates, ty) = *u.choose(&choices)?;
+
+ // Select a kind for this segment now that we know the number of
+ // items the segment will hold.
+ let (kind, max_size_hint) = u.choose(kind_candidates)?(u)?;
+ let max = max_size_hint
+ .map(|i| usize::try_from(i).unwrap())
+ .unwrap_or_else(|| self.config.max_elements);
+
+ // Pick whether we're going to use expression elements or
+ // indices. Note that externrefs must use expressions,
+ // and functions without reference types must use indices.
+ let items = if ty == RefType::EXTERNREF
+ || (self.config.reference_types_enabled && u.arbitrary()?)
+ {
+ let mut init = vec![];
+ arbitrary_loop(u, self.config.min_elements, max, |u| {
+ init.push(
+ if ty == RefType::EXTERNREF || func_max == 0 || u.arbitrary()? {
+ None
+ } else {
+ Some(u.int_in_range(0..=func_max - 1)?)
+ },
+ );
+ Ok(true)
+ })?;
+ Elements::Expressions(init)
+ } else {
+ let mut init = vec![];
+ if func_max > 0 {
+ arbitrary_loop(u, self.config.min_elements, max, |u| {
+ let func_idx = u.int_in_range(0..=func_max - 1)?;
+ init.push(func_idx);
+ Ok(true)
+ })?;
+ }
+ Elements::Functions(init)
+ };
+
+ self.elems.push(ElementSegment { kind, ty, items });
+ Ok(true)
+ },
+ )
+ }
+
+ fn arbitrary_code(&mut self, u: &mut Unstructured, allow_invalid: bool) -> Result<()> {
+ self.code.reserve(self.num_defined_funcs);
+ let mut allocs = CodeBuilderAllocations::new(self);
+ for (_, ty) in self.funcs[self.funcs.len() - self.num_defined_funcs..].iter() {
+ let body = self.arbitrary_func_body(u, ty, &mut allocs, allow_invalid)?;
+ self.code.push(body);
+ }
+ allocs.finish(u, self)?;
+ Ok(())
+ }
+
+ fn arbitrary_func_body(
+ &self,
+ u: &mut Unstructured,
+ ty: &FuncType,
+ allocs: &mut CodeBuilderAllocations,
+ allow_invalid: bool,
+ ) -> Result<Code> {
+ let mut locals = self.arbitrary_locals(u)?;
+ let builder = allocs.builder(ty, &mut locals);
+ let instructions = if allow_invalid && u.arbitrary().unwrap_or(false) {
+ Instructions::Arbitrary(arbitrary_vec_u8(u)?)
+ } else {
+ Instructions::Generated(builder.arbitrary(u, self)?)
+ };
+
+ Ok(Code {
+ locals,
+ instructions,
+ })
+ }
+
+ fn arbitrary_locals(&self, u: &mut Unstructured) -> Result<Vec<ValType>> {
+ let mut ret = Vec::new();
+ arbitrary_loop(u, 0, 100, |u| {
+ ret.push(self.arbitrary_valtype(u, u32::try_from(self.types.len()).unwrap())?);
+ Ok(true)
+ })?;
+ Ok(ret)
+ }
+
+ fn arbitrary_data(&mut self, u: &mut Unstructured) -> Result<()> {
+ // With bulk-memory we can generate passive data, otherwise if there are
+ // no memories we can't generate any data.
+ let memories = self.memories.len() as u32;
+ if memories == 0 && !self.config.bulk_memory_enabled {
+ return Ok(());
+ }
+ let disallow_traps = self.config.disallow_traps;
+ let mut choices32: Vec<Box<dyn Fn(&mut Unstructured, u64, usize) -> Result<Offset>>> =
+ vec![];
+ choices32.push(Box::new(|u, min_size, data_len| {
+ let min = u32::try_from(min_size.saturating_mul(64 * 1024))
+ .unwrap_or(u32::MAX)
+ .into();
+ let max = if disallow_traps { min } else { u32::MAX.into() };
+ Ok(Offset::Const32(
+ arbitrary_offset(u, min, max, data_len)? as i32
+ ))
+ }));
+ let mut choices64: Vec<Box<dyn Fn(&mut Unstructured, u64, usize) -> Result<Offset>>> =
+ vec![];
+ choices64.push(Box::new(|u, min_size, data_len| {
+ let min = min_size.saturating_mul(64 * 1024);
+ let max = if disallow_traps { min } else { u64::MAX };
+ Ok(Offset::Const64(
+ arbitrary_offset(u, min, max, data_len)? as i64
+ ))
+ }));
+ if !self.config.disallow_traps {
+ for (i, g) in self.globals[..self.globals.len() - self.defined_globals.len()]
+ .iter()
+ .enumerate()
+ {
+ if g.mutable {
+ continue;
+ }
+ if g.val_type == ValType::I32 {
+ choices32.push(Box::new(move |_, _, _| Ok(Offset::Global(i as u32))));
+ } else if g.val_type == ValType::I64 {
+ choices64.push(Box::new(move |_, _, _| Ok(Offset::Global(i as u32))));
+ }
+ }
+ }
+
+ // Build a list of candidate memories that we'll add data initializers
+ // for. If a memory doesn't have an initial size then any initializers
+ // for that memory will trap instantiation, which isn't too
+ // interesting. Try to make this happen less often by making it less
+ // likely that a memory with 0 size will have a data segment.
+ let mut memories = Vec::new();
+ for (i, mem) in self.memories.iter().enumerate() {
+ if mem.minimum > 0 || u.int_in_range(0..=CHANCE_SEGMENT_ON_EMPTY)? == 0 {
+ memories.push(i as u32);
+ }
+ }
+
+ // With memories we can generate data segments, and with bulk memory we
+ // can generate passive segments. Without these though we can't create
+ // a valid module with data segments.
+ if memories.is_empty() && !self.config.bulk_memory_enabled {
+ return Ok(());
+ }
+
+ arbitrary_loop(
+ u,
+ self.config.min_data_segments,
+ self.config.max_data_segments,
+ |u| {
+ let mut init: Vec<u8> = u.arbitrary()?;
+
+ // Passive data can only be generated if bulk memory is enabled.
+ // Otherwise if there are no memories we *only* generate passive
+ // data. Finally if all conditions are met we use an input byte to
+ // determine if it should be passive or active.
+ let kind =
+ if self.config.bulk_memory_enabled && (memories.is_empty() || u.arbitrary()?) {
+ DataSegmentKind::Passive
+ } else {
+ let memory_index = *u.choose(&memories)?;
+ let mem = &self.memories[memory_index as usize];
+ let f = if mem.memory64 {
+ u.choose(&choices64)?
+ } else {
+ u.choose(&choices32)?
+ };
+ let mut offset = f(u, mem.minimum, init.len())?;
+
+ // If traps are disallowed then truncate the size of the
+ // data segment to the minimum size of memory to guarantee
+ // it will fit. Afterwards ensure that the offset of the
+ // data segment is in-bounds by clamping it to the
+ if self.config.disallow_traps {
+ let max_size = (u64::MAX / 64 / 1024).min(mem.minimum) * 64 * 1024;
+ init.truncate(max_size as usize);
+ let max_offset = max_size - init.len() as u64;
+ match &mut offset {
+ Offset::Const32(x) => {
+ *x = (*x as u64).min(max_offset) as i32;
+ }
+ Offset::Const64(x) => {
+ *x = (*x as u64).min(max_offset) as i64;
+ }
+ Offset::Global(_) => unreachable!(),
+ }
+ }
+ DataSegmentKind::Active {
+ offset,
+ memory_index,
+ }
+ };
+ self.data.push(DataSegment { kind, init });
+ Ok(true)
+ },
+ )
+ }
+
+ fn params_results(&self, ty: &BlockType) -> (Vec<ValType>, Vec<ValType>) {
+ match ty {
+ BlockType::Empty => (vec![], vec![]),
+ BlockType::Result(t) => (vec![], vec![*t]),
+ BlockType::FunctionType(ty) => {
+ let ty = self.func_type(*ty);
+ (ty.params.to_vec(), ty.results.to_vec())
+ }
+ }
+ }
+}
+
+pub(crate) fn arbitrary_limits32(
+ u: &mut Unstructured,
+ min_minimum: Option<u32>,
+ max_minimum: u32,
+ max_required: bool,
+ max_inbounds: u32,
+) -> Result<(u32, Option<u32>)> {
+ let (min, max) = arbitrary_limits64(
+ u,
+ min_minimum.map(Into::into),
+ max_minimum.into(),
+ max_required,
+ max_inbounds.into(),
+ )?;
+ Ok((
+ u32::try_from(min).unwrap(),
+ max.map(|i| u32::try_from(i).unwrap()),
+ ))
+}
+
+pub(crate) fn arbitrary_limits64(
+ u: &mut Unstructured,
+ min_minimum: Option<u64>,
+ max_minimum: u64,
+ max_required: bool,
+ max_inbounds: u64,
+) -> Result<(u64, Option<u64>)> {
+ let min = gradually_grow(u, min_minimum.unwrap_or(0), max_inbounds, max_minimum)?;
+ let max = if max_required || u.arbitrary().unwrap_or(false) {
+ Some(u.int_in_range(min..=max_minimum)?)
+ } else {
+ None
+ };
+ Ok((min, max))
+}
+
+pub(crate) fn configured_valtypes(config: &Config) -> Vec<ValType> {
+ let mut valtypes = Vec::with_capacity(25);
+ valtypes.push(ValType::I32);
+ valtypes.push(ValType::I64);
+ valtypes.push(ValType::F32);
+ valtypes.push(ValType::F64);
+ if config.simd_enabled {
+ valtypes.push(ValType::V128);
+ }
+ if config.gc_enabled {
+ for nullable in [
+ // TODO: For now, only create allow nullable reference
+ // types. Eventually we should support non-nullable reference types,
+ // but this means that we will also need to recognize when it is
+ // impossible to create an instance of the reference (eg `(ref
+ // nofunc)` has no instances, and self-referential types that
+ // contain a non-null self-reference are also impossible to create).
+ true,
+ ] {
+ for heap_type in [
+ HeapType::Any,
+ HeapType::Eq,
+ HeapType::I31,
+ HeapType::Array,
+ HeapType::Struct,
+ HeapType::None,
+ HeapType::Func,
+ HeapType::NoFunc,
+ HeapType::Extern,
+ HeapType::NoExtern,
+ ] {
+ valtypes.push(ValType::Ref(RefType {
+ nullable,
+ heap_type,
+ }));
+ }
+ }
+ } else if config.reference_types_enabled {
+ valtypes.push(ValType::EXTERNREF);
+ valtypes.push(ValType::FUNCREF);
+ }
+ valtypes
+}
+
+pub(crate) fn arbitrary_func_type(
+ u: &mut Unstructured,
+ config: &Config,
+ valtypes: &[ValType],
+ max_results: Option<usize>,
+ type_ref_limit: u32,
+) -> Result<Rc<FuncType>> {
+ let mut params = vec![];
+ let mut results = vec![];
+ arbitrary_loop(u, 0, 20, |u| {
+ params.push(arbitrary_valtype(u, config, valtypes, type_ref_limit)?);
+ Ok(true)
+ })?;
+ arbitrary_loop(u, 0, max_results.unwrap_or(20), |u| {
+ results.push(arbitrary_valtype(u, config, valtypes, type_ref_limit)?);
+ Ok(true)
+ })?;
+ Ok(Rc::new(FuncType { params, results }))
+}
+
+fn arbitrary_valtype(
+ u: &mut Unstructured,
+ config: &Config,
+ valtypes: &[ValType],
+ type_ref_limit: u32,
+) -> Result<ValType> {
+ if config.gc_enabled && type_ref_limit > 0 && u.ratio(1, 20)? {
+ Ok(ValType::Ref(RefType {
+ // TODO: For now, only create allow nullable reference
+ // types. Eventually we should support non-nullable reference types,
+ // but this means that we will also need to recognize when it is
+ // impossible to create an instance of the reference (eg `(ref
+ // nofunc)` has no instances, and self-referential types that
+ // contain a non-null self-reference are also impossible to create).
+ nullable: true,
+ heap_type: HeapType::Concrete(u.int_in_range(0..=type_ref_limit - 1)?),
+ }))
+ } else {
+ Ok(*u.choose(valtypes)?)
+ }
+}
+
+pub(crate) fn arbitrary_table_type(u: &mut Unstructured, config: &Config) -> Result<TableType> {
+ // We don't want to generate tables that are too large on average, so
+ // keep the "inbounds" limit here a bit smaller.
+ let max_inbounds = 10_000;
+ let min_elements = if config.disallow_traps { Some(1) } else { None };
+ let max_elements = min_elements.unwrap_or(0).max(config.max_table_elements);
+ let (minimum, maximum) = arbitrary_limits32(
+ u,
+ min_elements,
+ max_elements,
+ config.table_max_size_required,
+ max_inbounds.min(max_elements),
+ )?;
+ if config.disallow_traps {
+ assert!(minimum > 0);
+ }
+ Ok(TableType {
+ element_type: if config.reference_types_enabled {
+ *u.choose(&[RefType::FUNCREF, RefType::EXTERNREF])?
+ } else {
+ RefType::FUNCREF
+ },
+ minimum,
+ maximum,
+ })
+}
+
+pub(crate) fn arbitrary_memtype(u: &mut Unstructured, config: &Config) -> Result<MemoryType> {
+ // When threads are enabled, we only want to generate shared memories about
+ // 25% of the time.
+ let shared = config.threads_enabled && u.ratio(1, 4)?;
+ // We want to favor memories <= 1gb in size, allocate at most 16k pages,
+ // depending on the maximum number of memories.
+ let memory64 = config.memory64_enabled && u.arbitrary()?;
+ let max_inbounds = 16 * 1024 / u64::try_from(config.max_memories).unwrap();
+ let min_pages = if config.disallow_traps { Some(1) } else { None };
+ let max_pages = min_pages.unwrap_or(0).max(if memory64 {
+ config.max_memory64_pages
+ } else {
+ config.max_memory32_pages
+ });
+ let (minimum, maximum) = arbitrary_limits64(
+ u,
+ min_pages,
+ max_pages,
+ config.memory_max_size_required || shared,
+ max_inbounds.min(max_pages),
+ )?;
+ Ok(MemoryType {
+ minimum,
+ maximum,
+ memory64,
+ shared,
+ })
+}
+
+pub(crate) fn arbitrary_tag_type(
+ u: &mut Unstructured,
+ candidate_func_types: &[u32],
+ get_func_type: impl FnOnce(u32) -> Rc<FuncType>,
+) -> Result<TagType> {
+ let max = candidate_func_types.len() - 1;
+ let ty = candidate_func_types[u.int_in_range(0..=max)?];
+ Ok(TagType {
+ func_type_idx: ty,
+ func_type: get_func_type(ty),
+ })
+}
+
+/// This function generates a number between `min` and `max`, favoring values
+/// between `min` and `max_inbounds`.
+///
+/// The thinking behind this function is that it's used for things like offsets
+/// and minimum sizes which, when very large, can trivially make the wasm oom or
+/// abort with a trap. This isn't the most interesting thing to do so it tries
+/// to favor numbers in the `min..max_inbounds` range to avoid immediate ooms.
+fn gradually_grow(u: &mut Unstructured, min: u64, max_inbounds: u64, max: u64) -> Result<u64> {
+ if min == max {
+ return Ok(min);
+ }
+ let min = min as f64;
+ let max = max as f64;
+ let max_inbounds = max_inbounds as f64;
+ let x = u.arbitrary::<u32>()?;
+ let x = f64::from(x);
+ let x = map_custom(
+ x,
+ f64::from(u32::MIN)..f64::from(u32::MAX),
+ min..max_inbounds,
+ min..max,
+ );
+ return Ok(x.round() as u64);
+
+ /// Map a value from within the input range to the output range(s).
+ ///
+ /// This will first map the input range into the `0..1` input range, and
+ /// then depending on the value it will either map it exponentially
+ /// (favoring small values) into the `output_inbounds` range or it will map
+ /// it into the `output` range.
+ fn map_custom(
+ value: f64,
+ input: Range<f64>,
+ output_inbounds: Range<f64>,
+ output: Range<f64>,
+ ) -> f64 {
+ assert!(!value.is_nan(), "{}", value);
+ assert!(value.is_finite(), "{}", value);
+ assert!(input.start < input.end, "{} < {}", input.start, input.end);
+ assert!(
+ output.start < output.end,
+ "{} < {}",
+ output.start,
+ output.end
+ );
+ assert!(value >= input.start, "{} >= {}", value, input.start);
+ assert!(value <= input.end, "{} <= {}", value, input.end);
+ assert!(
+ output.start <= output_inbounds.start,
+ "{} <= {}",
+ output.start,
+ output_inbounds.start
+ );
+ assert!(
+ output_inbounds.end <= output.end,
+ "{} <= {}",
+ output_inbounds.end,
+ output.end
+ );
+
+ let x = map_linear(value, input, 0.0..1.0);
+ let result = if x < PCT_INBOUNDS {
+ if output_inbounds.start == output_inbounds.end {
+ output_inbounds.start
+ } else {
+ let unscaled = x * x * x * x * x * x;
+ map_linear(unscaled, 0.0..1.0, output_inbounds)
+ }
+ } else {
+ map_linear(x, 0.0..1.0, output.clone())
+ };
+
+ assert!(result >= output.start, "{} >= {}", result, output.start);
+ assert!(result <= output.end, "{} <= {}", result, output.end);
+ result
+ }
+
+ /// Map a value from within the input range linearly to the output range.
+ ///
+ /// For example, mapping `0.5` from the input range `0.0..1.0` to the output
+ /// range `1.0..3.0` produces `2.0`.
+ fn map_linear(
+ value: f64,
+ Range {
+ start: in_low,
+ end: in_high,
+ }: Range<f64>,
+ Range {
+ start: out_low,
+ end: out_high,
+ }: Range<f64>,
+ ) -> f64 {
+ assert!(!value.is_nan(), "{}", value);
+ assert!(value.is_finite(), "{}", value);
+ assert!(in_low < in_high, "{} < {}", in_low, in_high);
+ assert!(out_low < out_high, "{} < {}", out_low, out_high);
+ assert!(value >= in_low, "{} >= {}", value, in_low);
+ assert!(value <= in_high, "{} <= {}", value, in_high);
+
+ let dividend = out_high - out_low;
+ let divisor = in_high - in_low;
+ let slope = dividend / divisor;
+ let result = out_low + (slope * (value - in_low));
+
+ assert!(result >= out_low, "{} >= {}", result, out_low);
+ assert!(result <= out_high, "{} <= {}", result, out_high);
+ result
+ }
+}
+
+/// Selects a reasonable offset for an element or data segment. This favors
+/// having the segment being in-bounds, but it may still generate
+/// any offset.
+fn arbitrary_offset(
+ u: &mut Unstructured,
+ limit_min: u64,
+ limit_max: u64,
+ segment_size: usize,
+) -> Result<u64> {
+ let size = u64::try_from(segment_size).unwrap();
+
+ // If the segment is too big for the whole memory, just give it any
+ // offset.
+ if size > limit_min {
+ u.int_in_range(0..=limit_max)
+ } else {
+ gradually_grow(u, 0, limit_min - size, limit_max)
+ }
+}
+
+fn unique_import_strings(max_size: usize, u: &mut Unstructured) -> Result<(String, String)> {
+ let module = limited_string(max_size, u)?;
+ let field = limited_string(max_size, u)?;
+ Ok((module, field))
+}
+
+fn arbitrary_vec_u8(u: &mut Unstructured) -> Result<Vec<u8>> {
+ let size = u.arbitrary_len::<u8>()?;
+ Ok(u.bytes(size)?.to_vec())
+}
+
+impl EntityType {
+ fn size(&self) -> u32 {
+ match self {
+ EntityType::Tag(_)
+ | EntityType::Global(_)
+ | EntityType::Table(_)
+ | EntityType::Memory(_) => 1,
+ EntityType::Func(_, ty) => 1 + (ty.params.len() + ty.results.len()) as u32,
+ }
+ }
+}
+
+// A helper structure used when generating module/instance types to limit the
+// amount of each kind of import created.
+#[derive(Default, Clone, Copy, PartialEq)]
+struct Entities {
+ globals: usize,
+ memories: usize,
+ tables: usize,
+ funcs: usize,
+ tags: usize,
+}
+
+/// A container for the kinds of instructions that wasm-smith is allowed to
+/// emit.
+///
+/// # Example
+///
+/// ```
+/// # use wasm_smith::{InstructionKinds, InstructionKind};
+/// let kinds = InstructionKinds::new(&[InstructionKind::Numeric, InstructionKind::Memory]);
+/// assert!(kinds.contains(InstructionKind::Memory));
+/// ```
+#[derive(Clone, Copy, Debug, Default)]
+#[cfg_attr(feature = "serde_derive", derive(serde_derive::Deserialize))]
+pub struct InstructionKinds(pub(crate) FlagSet<InstructionKind>);
+
+impl InstructionKinds {
+ /// Create a new container.
+ pub fn new(kinds: &[InstructionKind]) -> Self {
+ Self(kinds.iter().fold(FlagSet::default(), |ks, k| ks | *k))
+ }
+
+ /// Include all [InstructionKind]s.
+ pub fn all() -> Self {
+ Self(FlagSet::full())
+ }
+
+ /// Include no [InstructionKind]s.
+ pub fn none() -> Self {
+ Self(FlagSet::default())
+ }
+
+ /// Check if the [InstructionKind] is contained in this set.
+ #[inline]
+ pub fn contains(&self, kind: InstructionKind) -> bool {
+ self.0.contains(kind)
+ }
+}
+
+flags! {
+ /// Enumerate the categories of instructions defined in the [WebAssembly
+ /// specification](https://webassembly.github.io/spec/core/syntax/instructions.html).
+ #[allow(missing_docs)]
+ #[cfg_attr(feature = "_internal_cli", derive(serde_derive::Deserialize))]
+ pub enum InstructionKind: u16 {
+ Numeric,
+ Vector,
+ Reference,
+ Parametric,
+ Variable,
+ Table,
+ Memory,
+ Control,
+ Aggregate,
+ }
+}
+
+impl FromStr for InstructionKinds {
+ type Err = String;
+ fn from_str(s: &str) -> std::prelude::v1::Result<Self, Self::Err> {
+ let mut kinds = vec![];
+ for part in s.split(",") {
+ let kind = InstructionKind::from_str(part)?;
+ kinds.push(kind);
+ }
+ Ok(InstructionKinds::new(&kinds))
+ }
+}
+
+impl FromStr for InstructionKind {
+ type Err = String;
+ fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
+ match s.to_lowercase().as_str() {
+ "numeric" => Ok(InstructionKind::Numeric),
+ "vector" => Ok(InstructionKind::Vector),
+ "reference" => Ok(InstructionKind::Reference),
+ "parametric" => Ok(InstructionKind::Parametric),
+ "variable" => Ok(InstructionKind::Variable),
+ "table" => Ok(InstructionKind::Table),
+ "memory" => Ok(InstructionKind::Memory),
+ "control" => Ok(InstructionKind::Control),
+ _ => Err(format!("unknown instruction kind: {}", s)),
+ }
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/core/code_builder.rs b/third_party/rust/wasm-smith/src/core/code_builder.rs
new file mode 100644
index 0000000000..1bc09008ad
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core/code_builder.rs
@@ -0,0 +1,7058 @@
+use super::{
+ CompositeType, Elements, FuncType, GlobalInitExpr, Instruction, InstructionKind::*,
+ InstructionKinds, Module, ValType,
+};
+use crate::{unique_string, MemoryOffsetChoices};
+use arbitrary::{Result, Unstructured};
+use std::collections::{BTreeMap, BTreeSet};
+use std::convert::TryFrom;
+use std::rc::Rc;
+use wasm_encoder::{
+ ArrayType, BlockType, Catch, ConstExpr, ExportKind, FieldType, GlobalType, HeapType, MemArg,
+ RefType, StorageType, StructType,
+};
+mod no_traps;
+
+macro_rules! instructions {
+ (
+ $(
+ ($predicate:expr, $generator_fn:ident, $instruction_kind:ident $(, $cost:tt)?),
+ )*
+ ) => {
+ static NUM_OPTIONS: usize = instructions!(
+ @count;
+ $( $generator_fn )*
+ );
+
+ fn choose_instruction(
+ u: &mut Unstructured<'_>,
+ module: &Module,
+ allowed_instructions: InstructionKinds,
+ builder: &mut CodeBuilder,
+ ) -> Option<
+ fn(&mut Unstructured<'_>, &Module, &mut CodeBuilder, &mut Vec<Instruction>) -> Result<()>
+ > {
+ builder.allocs.options.clear();
+ let mut cost = 0;
+ // Unroll the loop that checks whether each instruction is valid in
+ // the current context and, if it is valid, pushes it onto our
+ // options. Unrolling this loops lets us avoid dynamic calls through
+ // function pointers and, furthermore, each call site can be branch
+ // predicted and even inlined. This saved us about 30% of time in
+ // the `corpus` benchmark.
+ $(
+ let predicate: Option<fn(&Module, &mut CodeBuilder) -> bool> = $predicate;
+ if predicate.map_or(true, |f| f(module, builder))
+ && allowed_instructions.contains($instruction_kind) {
+ builder.allocs.options.push(($generator_fn, cost));
+ cost += 1000 $(- $cost)?;
+ }
+ )*
+
+ // If there aren't actually any candidate instructions due to
+ // various filters in place then return `None` to indicate the
+ // situation.
+ if cost == 0 {
+ return None;
+ }
+
+ let i = u.int_in_range(0..=cost).ok()?;
+ let idx = builder
+ .allocs
+ .options
+ .binary_search_by_key(&i,|p| p.1)
+ .unwrap_or_else(|i| i - 1);
+ Some(builder.allocs.options[idx].0)
+ }
+ };
+
+ ( @count; ) => {
+ 0
+ };
+ ( @count; $x:ident $( $xs:ident )* ) => {
+ 1 + instructions!( @count; $( $xs )* )
+ };
+}
+
+// The static set of options of instruction to generate that could be valid at
+// some given time. One entry per Wasm instruction.
+//
+// Each entry is made up of up to three parts:
+//
+// 1. A predicate for whether this is a valid choice, if any. `None` means that
+// the choice is always applicable.
+//
+// 2. The function to generate the instruction, given that we've made this
+// choice.
+//
+// 3. The `InstructionKind` the instruction belongs to; this allows filtering
+// out instructions by category.
+//
+// 4. An optional number used to weight how often this instruction is chosen.
+// Higher numbers are less likely to be chosen, and number specified must be
+// less than 1000.
+instructions! {
+ // Control instructions.
+ (Some(unreachable_valid), unreachable, Control, 990),
+ (None, nop, Control, 800),
+ (None, block, Control),
+ (None, r#loop, Control),
+ (Some(try_table_valid), try_table, Control),
+ (Some(if_valid), r#if, Control),
+ (Some(else_valid), r#else, Control),
+ (Some(end_valid), end, Control),
+ (Some(br_valid), br, Control),
+ (Some(br_if_valid), br_if, Control),
+ (Some(br_table_valid), br_table, Control),
+ (Some(return_valid), r#return, Control, 900),
+ (Some(call_valid), call, Control),
+ (Some(call_ref_valid), call_ref, Control),
+ (Some(call_indirect_valid), call_indirect, Control),
+ (Some(return_call_valid), return_call, Control),
+ (Some(return_call_ref_valid), return_call_ref, Control),
+ (Some(return_call_indirect_valid), return_call_indirect, Control),
+ (Some(throw_valid), throw, Control, 850),
+ (Some(throw_ref_valid), throw_ref, Control, 850),
+ (Some(br_on_null_valid), br_on_null, Control),
+ (Some(br_on_non_null_valid), br_on_non_null, Control),
+ (Some(br_on_cast_valid), br_on_cast, Control),
+ (Some(br_on_cast_fail_valid), br_on_cast_fail, Control),
+ // Parametric instructions.
+ (Some(drop_valid), drop, Parametric, 990),
+ (Some(select_valid), select, Parametric),
+ // Variable instructions.
+ (Some(local_get_valid), local_get, Variable),
+ (Some(local_set_valid), local_set, Variable),
+ (Some(local_set_valid), local_tee, Variable),
+ (Some(global_get_valid), global_get, Variable),
+ (Some(global_set_valid), global_set, Variable),
+ // Memory instructions.
+ (Some(have_memory_and_offset), i32_load, Memory),
+ (Some(have_memory_and_offset), i64_load, Memory),
+ (Some(have_memory_and_offset), f32_load, Memory),
+ (Some(have_memory_and_offset), f64_load, Memory),
+ (Some(have_memory_and_offset), i32_load_8_s, Memory),
+ (Some(have_memory_and_offset), i32_load_8_u, Memory),
+ (Some(have_memory_and_offset), i32_load_16_s, Memory),
+ (Some(have_memory_and_offset), i32_load_16_u, Memory),
+ (Some(have_memory_and_offset), i64_load_8_s, Memory),
+ (Some(have_memory_and_offset), i64_load_16_s, Memory),
+ (Some(have_memory_and_offset), i64_load_32_s, Memory),
+ (Some(have_memory_and_offset), i64_load_8_u, Memory),
+ (Some(have_memory_and_offset), i64_load_16_u, Memory),
+ (Some(have_memory_and_offset), i64_load_32_u, Memory),
+ (Some(i32_store_valid), i32_store, Memory),
+ (Some(i64_store_valid), i64_store, Memory),
+ (Some(f32_store_valid), f32_store, Memory),
+ (Some(f64_store_valid), f64_store, Memory),
+ (Some(i32_store_valid), i32_store_8, Memory),
+ (Some(i32_store_valid), i32_store_16, Memory),
+ (Some(i64_store_valid), i64_store_8, Memory),
+ (Some(i64_store_valid), i64_store_16, Memory),
+ (Some(i64_store_valid), i64_store_32, Memory),
+ (Some(have_memory), memory_size, Memory),
+ (Some(memory_grow_valid), memory_grow, Memory),
+ (Some(memory_init_valid), memory_init, Memory),
+ (Some(data_drop_valid), data_drop, Memory),
+ (Some(memory_copy_valid), memory_copy, Memory),
+ (Some(memory_fill_valid), memory_fill, Memory),
+ // Numeric instructions.
+ (None, i32_const, Numeric),
+ (None, i64_const, Numeric),
+ (None, f32_const, Numeric),
+ (None, f64_const, Numeric),
+ (Some(i32_on_stack), i32_eqz, Numeric),
+ (Some(i32_i32_on_stack), i32_eq, Numeric),
+ (Some(i32_i32_on_stack), i32_ne, Numeric),
+ (Some(i32_i32_on_stack), i32_lt_s, Numeric),
+ (Some(i32_i32_on_stack), i32_lt_u, Numeric),
+ (Some(i32_i32_on_stack), i32_gt_s, Numeric),
+ (Some(i32_i32_on_stack), i32_gt_u, Numeric),
+ (Some(i32_i32_on_stack), i32_le_s, Numeric),
+ (Some(i32_i32_on_stack), i32_le_u, Numeric),
+ (Some(i32_i32_on_stack), i32_ge_s, Numeric),
+ (Some(i32_i32_on_stack), i32_ge_u, Numeric),
+ (Some(i64_on_stack), i64_eqz, Numeric),
+ (Some(i64_i64_on_stack), i64_eq, Numeric),
+ (Some(i64_i64_on_stack), i64_ne, Numeric),
+ (Some(i64_i64_on_stack), i64_lt_s, Numeric),
+ (Some(i64_i64_on_stack), i64_lt_u, Numeric),
+ (Some(i64_i64_on_stack), i64_gt_s, Numeric),
+ (Some(i64_i64_on_stack), i64_gt_u, Numeric),
+ (Some(i64_i64_on_stack), i64_le_s, Numeric),
+ (Some(i64_i64_on_stack), i64_le_u, Numeric),
+ (Some(i64_i64_on_stack), i64_ge_s, Numeric),
+ (Some(i64_i64_on_stack), i64_ge_u, Numeric),
+ (Some(f32_f32_on_stack), f32_eq, Numeric),
+ (Some(f32_f32_on_stack), f32_ne, Numeric),
+ (Some(f32_f32_on_stack), f32_lt, Numeric),
+ (Some(f32_f32_on_stack), f32_gt, Numeric),
+ (Some(f32_f32_on_stack), f32_le, Numeric),
+ (Some(f32_f32_on_stack), f32_ge, Numeric),
+ (Some(f64_f64_on_stack), f64_eq, Numeric),
+ (Some(f64_f64_on_stack), f64_ne, Numeric),
+ (Some(f64_f64_on_stack), f64_lt, Numeric),
+ (Some(f64_f64_on_stack), f64_gt, Numeric),
+ (Some(f64_f64_on_stack), f64_le, Numeric),
+ (Some(f64_f64_on_stack), f64_ge, Numeric),
+ (Some(i32_on_stack), i32_clz, Numeric),
+ (Some(i32_on_stack), i32_ctz, Numeric),
+ (Some(i32_on_stack), i32_popcnt, Numeric),
+ (Some(i32_i32_on_stack), i32_add, Numeric),
+ (Some(i32_i32_on_stack), i32_sub, Numeric),
+ (Some(i32_i32_on_stack), i32_mul, Numeric),
+ (Some(i32_i32_on_stack), i32_div_s, Numeric),
+ (Some(i32_i32_on_stack), i32_div_u, Numeric),
+ (Some(i32_i32_on_stack), i32_rem_s, Numeric),
+ (Some(i32_i32_on_stack), i32_rem_u, Numeric),
+ (Some(i32_i32_on_stack), i32_and, Numeric),
+ (Some(i32_i32_on_stack), i32_or, Numeric),
+ (Some(i32_i32_on_stack), i32_xor, Numeric),
+ (Some(i32_i32_on_stack), i32_shl, Numeric),
+ (Some(i32_i32_on_stack), i32_shr_s, Numeric),
+ (Some(i32_i32_on_stack), i32_shr_u, Numeric),
+ (Some(i32_i32_on_stack), i32_rotl, Numeric),
+ (Some(i32_i32_on_stack), i32_rotr, Numeric),
+ (Some(i64_on_stack), i64_clz, Numeric),
+ (Some(i64_on_stack), i64_ctz, Numeric),
+ (Some(i64_on_stack), i64_popcnt, Numeric),
+ (Some(i64_i64_on_stack), i64_add, Numeric),
+ (Some(i64_i64_on_stack), i64_sub, Numeric),
+ (Some(i64_i64_on_stack), i64_mul, Numeric),
+ (Some(i64_i64_on_stack), i64_div_s, Numeric),
+ (Some(i64_i64_on_stack), i64_div_u, Numeric),
+ (Some(i64_i64_on_stack), i64_rem_s, Numeric),
+ (Some(i64_i64_on_stack), i64_rem_u, Numeric),
+ (Some(i64_i64_on_stack), i64_and, Numeric),
+ (Some(i64_i64_on_stack), i64_or, Numeric),
+ (Some(i64_i64_on_stack), i64_xor, Numeric),
+ (Some(i64_i64_on_stack), i64_shl, Numeric),
+ (Some(i64_i64_on_stack), i64_shr_s, Numeric),
+ (Some(i64_i64_on_stack), i64_shr_u, Numeric),
+ (Some(i64_i64_on_stack), i64_rotl, Numeric),
+ (Some(i64_i64_on_stack), i64_rotr, Numeric),
+ (Some(f32_on_stack), f32_abs, Numeric),
+ (Some(f32_on_stack), f32_neg, Numeric),
+ (Some(f32_on_stack), f32_ceil, Numeric),
+ (Some(f32_on_stack), f32_floor, Numeric),
+ (Some(f32_on_stack), f32_trunc, Numeric),
+ (Some(f32_on_stack), f32_nearest, Numeric),
+ (Some(f32_on_stack), f32_sqrt, Numeric),
+ (Some(f32_f32_on_stack), f32_add, Numeric),
+ (Some(f32_f32_on_stack), f32_sub, Numeric),
+ (Some(f32_f32_on_stack), f32_mul, Numeric),
+ (Some(f32_f32_on_stack), f32_div, Numeric),
+ (Some(f32_f32_on_stack), f32_min, Numeric),
+ (Some(f32_f32_on_stack), f32_max, Numeric),
+ (Some(f32_f32_on_stack), f32_copysign, Numeric),
+ (Some(f64_on_stack), f64_abs, Numeric),
+ (Some(f64_on_stack), f64_neg, Numeric),
+ (Some(f64_on_stack), f64_ceil, Numeric),
+ (Some(f64_on_stack), f64_floor, Numeric),
+ (Some(f64_on_stack), f64_trunc, Numeric),
+ (Some(f64_on_stack), f64_nearest, Numeric),
+ (Some(f64_on_stack), f64_sqrt, Numeric),
+ (Some(f64_f64_on_stack), f64_add, Numeric),
+ (Some(f64_f64_on_stack), f64_sub, Numeric),
+ (Some(f64_f64_on_stack), f64_mul, Numeric),
+ (Some(f64_f64_on_stack), f64_div, Numeric),
+ (Some(f64_f64_on_stack), f64_min, Numeric),
+ (Some(f64_f64_on_stack), f64_max, Numeric),
+ (Some(f64_f64_on_stack), f64_copysign, Numeric),
+ (Some(i64_on_stack), i32_wrap_i64, Numeric),
+ (Some(f32_on_stack), i32_trunc_f32_s, Numeric),
+ (Some(f32_on_stack), i32_trunc_f32_u, Numeric),
+ (Some(f64_on_stack), i32_trunc_f64_s, Numeric),
+ (Some(f64_on_stack), i32_trunc_f64_u, Numeric),
+ (Some(i32_on_stack), i64_extend_i32_s, Numeric),
+ (Some(i32_on_stack), i64_extend_i32_u, Numeric),
+ (Some(f32_on_stack), i64_trunc_f32_s, Numeric),
+ (Some(f32_on_stack), i64_trunc_f32_u, Numeric),
+ (Some(f64_on_stack), i64_trunc_f64_s, Numeric),
+ (Some(f64_on_stack), i64_trunc_f64_u, Numeric),
+ (Some(i32_on_stack), f32_convert_i32_s, Numeric),
+ (Some(i32_on_stack), f32_convert_i32_u, Numeric),
+ (Some(i64_on_stack), f32_convert_i64_s, Numeric),
+ (Some(i64_on_stack), f32_convert_i64_u, Numeric),
+ (Some(f64_on_stack), f32_demote_f64, Numeric),
+ (Some(i32_on_stack), f64_convert_i32_s, Numeric),
+ (Some(i32_on_stack), f64_convert_i32_u, Numeric),
+ (Some(i64_on_stack), f64_convert_i64_s, Numeric),
+ (Some(i64_on_stack), f64_convert_i64_u, Numeric),
+ (Some(f32_on_stack), f64_promote_f32, Numeric),
+ (Some(f32_on_stack), i32_reinterpret_f32, Numeric),
+ (Some(f64_on_stack), i64_reinterpret_f64, Numeric),
+ (Some(i32_on_stack), f32_reinterpret_i32, Numeric),
+ (Some(i64_on_stack), f64_reinterpret_i64, Numeric),
+ (Some(extendable_i32_on_stack), i32_extend_8_s, Numeric),
+ (Some(extendable_i32_on_stack), i32_extend_16_s, Numeric),
+ (Some(extendable_i64_on_stack), i64_extend_8_s, Numeric),
+ (Some(extendable_i64_on_stack), i64_extend_16_s, Numeric),
+ (Some(extendable_i64_on_stack), i64_extend_32_s, Numeric),
+ (Some(nontrapping_f32_on_stack), i32_trunc_sat_f32_s, Numeric),
+ (Some(nontrapping_f32_on_stack), i32_trunc_sat_f32_u, Numeric),
+ (Some(nontrapping_f64_on_stack), i32_trunc_sat_f64_s, Numeric),
+ (Some(nontrapping_f64_on_stack), i32_trunc_sat_f64_u, Numeric),
+ (Some(nontrapping_f32_on_stack), i64_trunc_sat_f32_s, Numeric),
+ (Some(nontrapping_f32_on_stack), i64_trunc_sat_f32_u, Numeric),
+ (Some(nontrapping_f64_on_stack), i64_trunc_sat_f64_s, Numeric),
+ (Some(nontrapping_f64_on_stack), i64_trunc_sat_f64_u, Numeric),
+ // Reference instructions.
+ (Some(ref_null_valid), ref_null, Reference),
+ (Some(ref_func_valid), ref_func, Reference),
+ (Some(ref_as_non_null_valid), ref_as_non_null, Reference),
+ (Some(ref_eq_valid), ref_eq, Reference),
+ (Some(ref_test_valid), ref_test, Reference),
+ (Some(ref_cast_valid), ref_cast, Reference),
+ (Some(ref_is_null_valid), ref_is_null, Reference),
+ (Some(table_fill_valid), table_fill, Reference),
+ (Some(table_set_valid), table_set, Reference),
+ (Some(table_get_valid), table_get, Reference),
+ (Some(table_size_valid), table_size, Reference),
+ (Some(table_grow_valid), table_grow, Reference),
+ (Some(table_copy_valid), table_copy, Reference),
+ (Some(table_init_valid), table_init, Reference),
+ (Some(elem_drop_valid), elem_drop, Reference),
+ // Aggregate instructions.
+ (Some(struct_new_valid), struct_new, Aggregate),
+ (Some(struct_new_default_valid), struct_new_default, Aggregate),
+ (Some(struct_get_valid), struct_get, Aggregate),
+ (Some(struct_set_valid), struct_set, Aggregate),
+ (Some(array_new_valid), array_new, Aggregate),
+ (Some(array_new_fixed_valid), array_new_fixed, Aggregate),
+ (Some(array_new_default_valid), array_new_default, Aggregate),
+ (Some(array_new_data_valid), array_new_data, Aggregate),
+ (Some(array_new_elem_valid), array_new_elem, Aggregate),
+ (Some(array_get_valid), array_get, Aggregate),
+ (Some(array_set_valid), array_set, Aggregate),
+ (Some(array_len_valid), array_len, Aggregate),
+ (Some(array_fill_valid), array_fill, Aggregate),
+ (Some(array_copy_valid), array_copy, Aggregate),
+ (Some(array_init_data_valid), array_init_data, Aggregate),
+ (Some(array_init_elem_valid), array_init_elem, Aggregate),
+ (Some(ref_i31_valid), ref_i31, Aggregate),
+ (Some(i31_get_valid), i31_get, Aggregate),
+ (Some(any_convert_extern_valid), any_convert_extern, Aggregate),
+ (Some(extern_convert_any_valid), extern_convert_any, Aggregate),
+ // SIMD instructions.
+ (Some(simd_have_memory_and_offset), v128_load, Vector),
+ (Some(simd_have_memory_and_offset), v128_load8x8s, Vector),
+ (Some(simd_have_memory_and_offset), v128_load8x8u, Vector),
+ (Some(simd_have_memory_and_offset), v128_load16x4s, Vector),
+ (Some(simd_have_memory_and_offset), v128_load16x4u, Vector),
+ (Some(simd_have_memory_and_offset), v128_load32x2s, Vector),
+ (Some(simd_have_memory_and_offset), v128_load32x2u, Vector),
+ (Some(simd_have_memory_and_offset), v128_load8_splat, Vector),
+ (Some(simd_have_memory_and_offset), v128_load16_splat, Vector),
+ (Some(simd_have_memory_and_offset), v128_load32_splat, Vector),
+ (Some(simd_have_memory_and_offset), v128_load64_splat, Vector),
+ (Some(simd_have_memory_and_offset), v128_load32_zero, Vector),
+ (Some(simd_have_memory_and_offset), v128_load64_zero, Vector),
+ (Some(simd_v128_store_valid), v128_store, Vector),
+ (Some(simd_load_lane_valid), v128_load8_lane, Vector),
+ (Some(simd_load_lane_valid), v128_load16_lane, Vector),
+ (Some(simd_load_lane_valid), v128_load32_lane, Vector),
+ (Some(simd_load_lane_valid), v128_load64_lane, Vector),
+ (Some(simd_store_lane_valid), v128_store8_lane, Vector),
+ (Some(simd_store_lane_valid), v128_store16_lane, Vector),
+ (Some(simd_store_lane_valid), v128_store32_lane, Vector),
+ (Some(simd_store_lane_valid), v128_store64_lane, Vector),
+ (Some(simd_enabled), v128_const, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_shuffle, Vector),
+ (Some(simd_v128_on_stack), i8x16_extract_lane_s, Vector),
+ (Some(simd_v128_on_stack), i8x16_extract_lane_u, Vector),
+ (Some(simd_v128_i32_on_stack), i8x16_replace_lane, Vector),
+ (Some(simd_v128_on_stack), i16x8_extract_lane_s, Vector),
+ (Some(simd_v128_on_stack), i16x8_extract_lane_u, Vector),
+ (Some(simd_v128_i32_on_stack), i16x8_replace_lane, Vector),
+ (Some(simd_v128_on_stack), i32x4_extract_lane, Vector),
+ (Some(simd_v128_i32_on_stack), i32x4_replace_lane, Vector),
+ (Some(simd_v128_on_stack), i64x2_extract_lane, Vector),
+ (Some(simd_v128_i64_on_stack), i64x2_replace_lane, Vector),
+ (Some(simd_v128_on_stack), f32x4_extract_lane, Vector),
+ (Some(simd_v128_f32_on_stack), f32x4_replace_lane, Vector),
+ (Some(simd_v128_on_stack), f64x2_extract_lane, Vector),
+ (Some(simd_v128_f64_on_stack), f64x2_replace_lane, Vector),
+ (Some(simd_i32_on_stack), i8x16_splat, Vector),
+ (Some(simd_i32_on_stack), i16x8_splat, Vector),
+ (Some(simd_i32_on_stack), i32x4_splat, Vector),
+ (Some(simd_i64_on_stack), i64x2_splat, Vector),
+ (Some(simd_f32_on_stack), f32x4_splat, Vector),
+ (Some(simd_f64_on_stack), f64x2_splat, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_swizzle, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), i8x16_relaxed_swizzle, Vector),
+ (Some(simd_v128_v128_v128_on_stack), v128_bitselect, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i8x16_relaxed_laneselect, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i16x8_relaxed_laneselect, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i32x4_relaxed_laneselect, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i64x2_relaxed_laneselect, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_eq, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_ne, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_lt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_lt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_gt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_gt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_le_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_le_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_ge_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_ge_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_eq, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_ne, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_lt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_lt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_gt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_gt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_le_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_le_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_ge_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_ge_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_eq, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_ne, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_lt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_lt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_gt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_gt_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_le_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_le_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_ge_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_ge_u, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_eq, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_ne, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_lt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_gt_s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_le_s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_ge_s, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_eq, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_ne, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_lt, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_gt, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_le, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_ge, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_eq, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_ne, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_lt, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_gt, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_le, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_ge, Vector),
+ (Some(simd_v128_on_stack), v128_not, Vector),
+ (Some(simd_v128_v128_on_stack), v128_and, Vector),
+ (Some(simd_v128_v128_on_stack), v128_and_not, Vector),
+ (Some(simd_v128_v128_on_stack), v128_or, Vector),
+ (Some(simd_v128_v128_on_stack), v128_xor, Vector),
+ (Some(simd_v128_v128_on_stack), v128_any_true, Vector),
+ (Some(simd_v128_on_stack), i8x16_abs, Vector),
+ (Some(simd_v128_on_stack), i8x16_neg, Vector),
+ (Some(simd_v128_on_stack), i8x16_popcnt, Vector),
+ (Some(simd_v128_on_stack), i8x16_all_true, Vector),
+ (Some(simd_v128_on_stack), i8x16_bitmask, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_narrow_i16x8s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_narrow_i16x8u, Vector),
+ (Some(simd_v128_i32_on_stack), i8x16_shl, Vector),
+ (Some(simd_v128_i32_on_stack), i8x16_shr_s, Vector),
+ (Some(simd_v128_i32_on_stack), i8x16_shr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_add, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_add_sat_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_add_sat_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_sub, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_sub_sat_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_sub_sat_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_min_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_min_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_max_s, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_max_u, Vector),
+ (Some(simd_v128_v128_on_stack), i8x16_avgr_u, Vector),
+ (Some(simd_v128_on_stack), i16x8_extadd_pairwise_i8x16s, Vector),
+ (Some(simd_v128_on_stack), i16x8_extadd_pairwise_i8x16u, Vector),
+ (Some(simd_v128_on_stack), i16x8_abs, Vector),
+ (Some(simd_v128_on_stack), i16x8_neg, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8q15_mulr_sat_s, Vector),
+ (Some(simd_v128_on_stack), i16x8_all_true, Vector),
+ (Some(simd_v128_on_stack), i16x8_bitmask, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_narrow_i32x4s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_narrow_i32x4u, Vector),
+ (Some(simd_v128_on_stack), i16x8_extend_low_i8x16s, Vector),
+ (Some(simd_v128_on_stack), i16x8_extend_high_i8x16s, Vector),
+ (Some(simd_v128_on_stack), i16x8_extend_low_i8x16u, Vector),
+ (Some(simd_v128_on_stack), i16x8_extend_high_i8x16u, Vector),
+ (Some(simd_v128_i32_on_stack), i16x8_shl, Vector),
+ (Some(simd_v128_i32_on_stack), i16x8_shr_s, Vector),
+ (Some(simd_v128_i32_on_stack), i16x8_shr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_add, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_add_sat_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_add_sat_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_sub, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_sub_sat_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_sub_sat_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_mul, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_min_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_min_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_max_s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_max_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_avgr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_extmul_low_i8x16s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_extmul_high_i8x16s, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_extmul_low_i8x16u, Vector),
+ (Some(simd_v128_v128_on_stack), i16x8_extmul_high_i8x16u, Vector),
+ (Some(simd_v128_on_stack), i32x4_extadd_pairwise_i16x8s, Vector),
+ (Some(simd_v128_on_stack), i32x4_extadd_pairwise_i16x8u, Vector),
+ (Some(simd_v128_on_stack), i32x4_abs, Vector),
+ (Some(simd_v128_on_stack), i32x4_neg, Vector),
+ (Some(simd_v128_on_stack), i32x4_all_true, Vector),
+ (Some(simd_v128_on_stack), i32x4_bitmask, Vector),
+ (Some(simd_v128_on_stack), i32x4_extend_low_i16x8s, Vector),
+ (Some(simd_v128_on_stack), i32x4_extend_high_i16x8s, Vector),
+ (Some(simd_v128_on_stack), i32x4_extend_low_i16x8u, Vector),
+ (Some(simd_v128_on_stack), i32x4_extend_high_i16x8u, Vector),
+ (Some(simd_v128_i32_on_stack), i32x4_shl, Vector),
+ (Some(simd_v128_i32_on_stack), i32x4_shr_s, Vector),
+ (Some(simd_v128_i32_on_stack), i32x4_shr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_add, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_sub, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_mul, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_min_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_min_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_max_s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_max_u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_dot_i16x8s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_extmul_low_i16x8s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_extmul_high_i16x8s, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_extmul_low_i16x8u, Vector),
+ (Some(simd_v128_v128_on_stack), i32x4_extmul_high_i16x8u, Vector),
+ (Some(simd_v128_on_stack), i64x2_abs, Vector),
+ (Some(simd_v128_on_stack), i64x2_neg, Vector),
+ (Some(simd_v128_on_stack), i64x2_all_true, Vector),
+ (Some(simd_v128_on_stack), i64x2_bitmask, Vector),
+ (Some(simd_v128_on_stack), i64x2_extend_low_i32x4s, Vector),
+ (Some(simd_v128_on_stack), i64x2_extend_high_i32x4s, Vector),
+ (Some(simd_v128_on_stack), i64x2_extend_low_i32x4u, Vector),
+ (Some(simd_v128_on_stack), i64x2_extend_high_i32x4u, Vector),
+ (Some(simd_v128_i32_on_stack), i64x2_shl, Vector),
+ (Some(simd_v128_i32_on_stack), i64x2_shr_s, Vector),
+ (Some(simd_v128_i32_on_stack), i64x2_shr_u, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_add, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_sub, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_mul, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_extmul_low_i32x4s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_extmul_high_i32x4s, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_extmul_low_i32x4u, Vector),
+ (Some(simd_v128_v128_on_stack), i64x2_extmul_high_i32x4u, Vector),
+ (Some(simd_v128_on_stack), f32x4_ceil, Vector),
+ (Some(simd_v128_on_stack), f32x4_floor, Vector),
+ (Some(simd_v128_on_stack), f32x4_trunc, Vector),
+ (Some(simd_v128_on_stack), f32x4_nearest, Vector),
+ (Some(simd_v128_on_stack), f32x4_abs, Vector),
+ (Some(simd_v128_on_stack), f32x4_neg, Vector),
+ (Some(simd_v128_on_stack), f32x4_sqrt, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_add, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_sub, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_mul, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_div, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_min, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4_max, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4p_min, Vector),
+ (Some(simd_v128_v128_on_stack), f32x4p_max, Vector),
+ (Some(simd_v128_on_stack), f64x2_ceil, Vector),
+ (Some(simd_v128_on_stack), f64x2_floor, Vector),
+ (Some(simd_v128_on_stack), f64x2_trunc, Vector),
+ (Some(simd_v128_on_stack), f64x2_nearest, Vector),
+ (Some(simd_v128_on_stack), f64x2_abs, Vector),
+ (Some(simd_v128_on_stack), f64x2_neg, Vector),
+ (Some(simd_v128_on_stack), f64x2_sqrt, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_add, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_sub, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_mul, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_div, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_min, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2_max, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2p_min, Vector),
+ (Some(simd_v128_v128_on_stack), f64x2p_max, Vector),
+ (Some(simd_v128_on_stack), i32x4_trunc_sat_f32x4s, Vector),
+ (Some(simd_v128_on_stack), i32x4_trunc_sat_f32x4u, Vector),
+ (Some(simd_v128_on_stack), f32x4_convert_i32x4s, Vector),
+ (Some(simd_v128_on_stack), f32x4_convert_i32x4u, Vector),
+ (Some(simd_v128_on_stack), i32x4_trunc_sat_f64x2s_zero, Vector),
+ (Some(simd_v128_on_stack), i32x4_trunc_sat_f64x2u_zero, Vector),
+ (Some(simd_v128_on_stack), f64x2_convert_low_i32x4s, Vector),
+ (Some(simd_v128_on_stack), f64x2_convert_low_i32x4u, Vector),
+ (Some(simd_v128_on_stack), f32x4_demote_f64x2_zero, Vector),
+ (Some(simd_v128_on_stack), f64x2_promote_low_f32x4, Vector),
+ (Some(simd_v128_on_stack_relaxed), i32x4_relaxed_trunc_f32x4s, Vector),
+ (Some(simd_v128_on_stack_relaxed), i32x4_relaxed_trunc_f32x4u, Vector),
+ (Some(simd_v128_on_stack_relaxed), i32x4_relaxed_trunc_f64x2s_zero, Vector),
+ (Some(simd_v128_on_stack_relaxed), i32x4_relaxed_trunc_f64x2u_zero, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), f32x4_relaxed_madd, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), f32x4_relaxed_nmadd, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), f64x2_relaxed_madd, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), f64x2_relaxed_nmadd, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), f32x4_relaxed_min, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), f32x4_relaxed_max, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), f64x2_relaxed_min, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), f64x2_relaxed_max, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), i16x8_relaxed_q15mulr_s, Vector),
+ (Some(simd_v128_v128_on_stack_relaxed), i16x8_relaxed_dot_i8x16_i7x16_s, Vector),
+ (Some(simd_v128_v128_v128_on_stack_relaxed), i32x4_relaxed_dot_i8x16_i7x16_add_s, Vector),
+}
+
+pub(crate) struct CodeBuilderAllocations {
+ // The control labels in scope right now.
+ controls: Vec<Control>,
+
+ // The types on the operand stack right now.
+ operands: Vec<Option<ValType>>,
+
+ // Dynamic set of options of instruction we can generate that are known to
+ // be valid right now.
+ options: Vec<(
+ fn(&mut Unstructured, &Module, &mut CodeBuilder, &mut Vec<Instruction>) -> Result<()>,
+ u32,
+ )>,
+
+ // Cached information about the module that we're generating functions for,
+ // used to speed up validity checks. The mutable globals map is a map of the
+ // type of global to the global indices which have that type (and they're
+ // all mutable).
+ mutable_globals: BTreeMap<ValType, Vec<u32>>,
+
+ // Like mutable globals above this is a map from function types to the list
+ // of functions that have that function type.
+ functions: BTreeMap<Rc<FuncType>, Vec<u32>>,
+
+ // Like functions above this is a map from tag types to the list of tags
+ // have that tag type.
+ tags: BTreeMap<Vec<ValType>, Vec<u32>>,
+
+ // Tables in this module which have a funcref element type.
+ funcref_tables: Vec<u32>,
+
+ // Functions that are referenced in the module through globals and segments.
+ referenced_functions: Vec<u32>,
+
+ // Flag that indicates if any element segments have the same type as any
+ // table
+ table_init_possible: bool,
+
+ // Lists of memory indices which are either 32-bit or 64-bit. This is used
+ // for faster lookup in validating instructions to know which memories have
+ // which types. For example if there are no 64-bit memories then we
+ // shouldn't ever look for i64 on the stack for `i32.load`.
+ memory32: Vec<u32>,
+ memory64: Vec<u32>,
+
+ // State used when dropping operands to avoid dropping them into the ether
+ // but instead folding their final values into module state, at this time
+ // chosen to be exported globals.
+ globals_cnt: u32,
+ new_globals: Vec<(ValType, ConstExpr)>,
+ global_dropped_i32: Option<u32>,
+ global_dropped_i64: Option<u32>,
+ global_dropped_f32: Option<u32>,
+ global_dropped_f64: Option<u32>,
+ global_dropped_v128: Option<u32>,
+}
+
+pub(crate) struct CodeBuilder<'a> {
+ func_ty: &'a FuncType,
+ locals: &'a mut Vec<ValType>,
+ allocs: &'a mut CodeBuilderAllocations,
+
+ // Temporary locals injected and used by nan canonicalization. Note that
+ // this list of extra locals is appended to `self.locals` at the end of code
+ // generation, and it's kept separate here to avoid using these locals in
+ // `local.get` and similar instructions.
+ extra_locals: Vec<ValType>,
+ f32_scratch: Option<usize>,
+ f64_scratch: Option<usize>,
+ v128_scratch: Option<usize>,
+}
+
+/// A control frame.
+#[derive(Debug, Clone)]
+struct Control {
+ kind: ControlKind,
+ /// Value types that must be on the stack when entering this control frame.
+ params: Vec<ValType>,
+ /// Value types that are left on the stack when exiting this control frame.
+ results: Vec<ValType>,
+ /// How far down the operand stack instructions inside this control frame
+ /// can reach.
+ height: usize,
+}
+
+impl Control {
+ fn label_types(&self) -> &[ValType] {
+ if self.kind == ControlKind::Loop {
+ &self.params
+ } else {
+ &self.results
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum ControlKind {
+ Block,
+ If,
+ Loop,
+ TryTable,
+}
+
+enum Float {
+ F32,
+ F64,
+ F32x4,
+ F64x2,
+}
+
+impl CodeBuilderAllocations {
+ pub(crate) fn new(module: &Module) -> Self {
+ let mut mutable_globals = BTreeMap::new();
+ for (i, global) in module.globals.iter().enumerate() {
+ if global.mutable {
+ mutable_globals
+ .entry(global.val_type)
+ .or_insert(Vec::new())
+ .push(i as u32);
+ }
+ }
+
+ let mut tags = BTreeMap::new();
+ for (idx, tag_type) in module.tags() {
+ tags.entry(tag_type.func_type.params.to_vec())
+ .or_insert(Vec::new())
+ .push(idx);
+ }
+
+ let mut functions = BTreeMap::new();
+ for (idx, func) in module.funcs() {
+ functions
+ .entry(func.clone())
+ .or_insert(Vec::new())
+ .push(idx);
+ }
+
+ let mut funcref_tables = Vec::new();
+ let mut table_tys = Vec::new();
+ for (i, table) in module.tables.iter().enumerate() {
+ table_tys.push(table.element_type);
+ if table.element_type == RefType::FUNCREF {
+ funcref_tables.push(i as u32);
+ }
+ }
+
+ let mut referenced_functions = BTreeSet::new();
+ for (_, expr) in module.defined_globals.iter() {
+ if let GlobalInitExpr::FuncRef(i) = *expr {
+ referenced_functions.insert(i);
+ }
+ }
+ for g in module.elems.iter() {
+ match &g.items {
+ Elements::Expressions(e) => {
+ let iter = e.iter().filter_map(|i| *i);
+ referenced_functions.extend(iter);
+ }
+ Elements::Functions(e) => {
+ referenced_functions.extend(e.iter().cloned());
+ }
+ }
+ }
+
+ let table_init_possible = module.elems.iter().any(|e| table_tys.contains(&e.ty));
+
+ let mut memory32 = Vec::new();
+ let mut memory64 = Vec::new();
+ for (i, mem) in module.memories.iter().enumerate() {
+ if mem.memory64 {
+ memory64.push(i as u32);
+ } else {
+ memory32.push(i as u32);
+ }
+ }
+
+ CodeBuilderAllocations {
+ controls: Vec::with_capacity(4),
+ operands: Vec::with_capacity(16),
+ options: Vec::with_capacity(NUM_OPTIONS),
+ functions,
+ tags,
+ mutable_globals,
+ funcref_tables,
+ referenced_functions: referenced_functions.into_iter().collect(),
+ table_init_possible,
+ memory32,
+ memory64,
+
+ global_dropped_i32: None,
+ global_dropped_i64: None,
+ global_dropped_f32: None,
+ global_dropped_f64: None,
+ global_dropped_v128: None,
+ globals_cnt: module.globals.len() as u32,
+ new_globals: Vec::new(),
+ }
+ }
+
+ pub(crate) fn builder<'a>(
+ &'a mut self,
+ func_ty: &'a FuncType,
+ locals: &'a mut Vec<ValType>,
+ ) -> CodeBuilder<'a> {
+ self.controls.clear();
+ self.controls.push(Control {
+ kind: ControlKind::Block,
+ params: vec![],
+ results: func_ty.results.to_vec(),
+ height: 0,
+ });
+
+ self.operands.clear();
+ self.options.clear();
+
+ CodeBuilder {
+ func_ty,
+ locals,
+ allocs: self,
+ extra_locals: Vec::new(),
+ f32_scratch: None,
+ f64_scratch: None,
+ v128_scratch: None,
+ }
+ }
+
+ pub fn finish(self, u: &mut Unstructured<'_>, module: &mut Module) -> arbitrary::Result<()> {
+ // Any globals injected as part of dropping operands on the stack get
+ // injected into the module here. Each global is then exported, most of
+ // the time, to ensure it's part of the "image" of this module available
+ // for differential execution for example.
+ for (ty, init) in self.new_globals {
+ let global_idx = module.globals.len() as u32;
+ module.globals.push(GlobalType {
+ val_type: ty,
+ mutable: true,
+ });
+ let init = GlobalInitExpr::ConstExpr(init);
+ module.defined_globals.push((global_idx, init));
+
+ if u.ratio(1, 100).unwrap_or(false) {
+ continue;
+ }
+
+ let name = unique_string(1_000, &mut module.export_names, u)?;
+ module.add_arbitrary_export(name, ExportKind::Global, global_idx)?;
+ }
+ Ok(())
+ }
+}
+
+impl CodeBuilder<'_> {
+ fn pop_control(&mut self) -> Control {
+ let control = self.allocs.controls.pop().unwrap();
+
+ // Pop the actual types on the stack (which could be subtypes of the
+ // declared types) and then push the declared types. This avoids us
+ // accidentally generating code that relies on erased subtypes.
+ for _ in &control.results {
+ self.pop_operand();
+ }
+ for ty in &control.results {
+ self.push_operand(Some(*ty));
+ }
+
+ control
+ }
+
+ fn push_control(
+ &mut self,
+ kind: ControlKind,
+ params: impl Into<Vec<ValType>>,
+ results: impl Into<Vec<ValType>>,
+ ) {
+ let params = params.into();
+ let results = results.into();
+
+ // Similar to in `pop_control`, we want to pop the actual argument types
+ // off the stack (which could be subtypes of the declared parameter
+ // types) and then push the parameter types. This effectively does type
+ // erasure of any subtyping that exists so that we don't accidentally
+ // generate code that relies on the specific subtypes.
+ for _ in &params {
+ self.pop_operand();
+ }
+ self.push_operands(&params);
+
+ let height = self.allocs.operands.len() - params.len();
+ self.allocs.controls.push(Control {
+ kind,
+ params,
+ results,
+ height,
+ });
+ }
+
+ /// Get the operands that are in-scope within the current control frame.
+ #[inline]
+ fn operands(&self) -> &[Option<ValType>] {
+ let height = self.allocs.controls.last().map_or(0, |c| c.height);
+ &self.allocs.operands[height..]
+ }
+
+ /// Pop a single operand from the stack, regardless of expected type.
+ #[inline]
+ fn pop_operand(&mut self) -> Option<ValType> {
+ self.allocs.operands.pop().unwrap()
+ }
+
+ #[inline]
+ fn pop_operands(&mut self, module: &Module, to_pop: &[ValType]) {
+ debug_assert!(self.types_on_stack(module, to_pop));
+ self.allocs
+ .operands
+ .truncate(self.allocs.operands.len() - to_pop.len());
+ }
+
+ #[inline]
+ fn push_operands(&mut self, to_push: &[ValType]) {
+ self.allocs
+ .operands
+ .extend(to_push.iter().copied().map(Some));
+ }
+
+ #[inline]
+ fn push_operand(&mut self, ty: Option<ValType>) {
+ self.allocs.operands.push(ty);
+ }
+
+ fn label_types_on_stack(&self, module: &Module, to_check: &Control) -> bool {
+ self.types_on_stack(module, to_check.label_types())
+ }
+
+ /// Is the given type on top of the stack?
+ #[inline]
+ fn type_on_stack(&self, module: &Module, ty: ValType) -> bool {
+ self.type_on_stack_at(module, 0, ty)
+ }
+
+ /// Is the given type on the stack at the given index (indexing from the top
+ /// of the stack towards the bottom).
+ #[inline]
+ fn type_on_stack_at(&self, module: &Module, at: usize, expected: ValType) -> bool {
+ let operands = self.operands();
+ if at >= operands.len() {
+ return false;
+ }
+ match operands[operands.len() - 1 - at] {
+ None => true,
+ Some(actual) => module.val_type_is_sub_type(actual, expected),
+ }
+ }
+
+ /// Are the given types on top of the stack?
+ #[inline]
+ fn types_on_stack(&self, module: &Module, types: &[ValType]) -> bool {
+ self.operands().len() >= types.len()
+ && types
+ .iter()
+ .rev()
+ .enumerate()
+ .all(|(idx, ty)| self.type_on_stack_at(module, idx, *ty))
+ }
+
+ /// Are the given field types on top of the stack?
+ #[inline]
+ fn field_types_on_stack(&self, module: &Module, types: &[FieldType]) -> bool {
+ self.operands().len() >= types.len()
+ && types
+ .iter()
+ .rev()
+ .enumerate()
+ .all(|(idx, ty)| self.type_on_stack_at(module, idx, ty.element_type.unpack()))
+ }
+
+ /// Is the given field type on top of the stack?
+ #[inline]
+ fn field_type_on_stack(&self, module: &Module, ty: FieldType) -> bool {
+ self.type_on_stack(module, ty.element_type.unpack())
+ }
+
+ /// Is the given field type on the stack at the given position (indexed from
+ /// the top of the stack)?
+ #[inline]
+ fn field_type_on_stack_at(&self, module: &Module, at: usize, ty: FieldType) -> bool {
+ self.type_on_stack_at(module, at, ty.element_type.unpack())
+ }
+
+ /// Get the ref type on the top of the operand stack, if any.
+ ///
+ /// * `None` means no reftype on the stack.
+ /// * `Some(None)` means that the stack is polymorphic.
+ /// * `Some(Some(r))` means that `r` is the ref type on top of the stack.
+ fn ref_type_on_stack(&self) -> Option<Option<RefType>> {
+ match self.operands().last().copied()? {
+ Some(ValType::Ref(r)) => Some(Some(r)),
+ Some(_) => None,
+ None => Some(None),
+ }
+ }
+
+ /// Is there a `(ref null? <index>)` on the stack at the given position? If
+ /// so return its nullability and type index.
+ fn concrete_ref_type_on_stack_at(&self, at: usize) -> Option<(bool, u32)> {
+ match self.operands().iter().copied().rev().nth(at)?? {
+ ValType::Ref(RefType {
+ nullable,
+ heap_type: HeapType::Concrete(ty),
+ }) => Some((nullable, ty)),
+ _ => None,
+ }
+ }
+
+ /// Is there a `(ref null? <index>)` at the given stack position that
+ /// references a concrete array type?
+ fn concrete_array_ref_type_on_stack_at(
+ &self,
+ module: &Module,
+ at: usize,
+ ) -> Option<(bool, u32, ArrayType)> {
+ let (nullable, ty) = self.concrete_ref_type_on_stack_at(at)?;
+ match &module.ty(ty).composite_type {
+ CompositeType::Array(a) => Some((nullable, ty, *a)),
+ _ => None,
+ }
+ }
+
+ /// Is there a `(ref null? <index>)` at the given stack position that
+ /// references a concrete struct type?
+ fn concrete_struct_ref_type_on_stack_at<'a>(
+ &self,
+ module: &'a Module,
+ at: usize,
+ ) -> Option<(bool, u32, &'a StructType)> {
+ let (nullable, ty) = self.concrete_ref_type_on_stack_at(at)?;
+ match &module.ty(ty).composite_type {
+ CompositeType::Struct(s) => Some((nullable, ty, s)),
+ _ => None,
+ }
+ }
+
+ /// Pop a reference type from the stack and return it.
+ ///
+ /// When in unreachable code and the stack is polymorphic, returns `None`.
+ fn pop_ref_type(&mut self) -> Option<RefType> {
+ let ref_ty = self.ref_type_on_stack().unwrap();
+ self.pop_operand();
+ ref_ty
+ }
+
+ /// Pops a `(ref null? <index>)` from the stack and return its nullability
+ /// and type index.
+ fn pop_concrete_ref_type(&mut self) -> (bool, u32) {
+ let ref_ty = self.pop_ref_type().unwrap();
+ match ref_ty.heap_type {
+ HeapType::Concrete(i) => (ref_ty.nullable, i),
+ _ => panic!("not a concrete ref type"),
+ }
+ }
+
+ /// Get the `(ref null? <index>)` type on the top of the stack that
+ /// references a function type, if any.
+ fn concrete_funcref_on_stack(&self, module: &Module) -> Option<RefType> {
+ match self.operands().last().copied()?? {
+ ValType::Ref(r) => match r.heap_type {
+ HeapType::Concrete(idx) => match &module.ty(idx).composite_type {
+ CompositeType::Func(_) => Some(r),
+ CompositeType::Struct(_) | CompositeType::Array(_) => None,
+ },
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+
+ /// Is there a `(ref null? <index>)` on the top of the stack that references
+ /// a struct type with at least one field?
+ fn non_empty_struct_ref_on_stack(&self, module: &Module, allow_null_refs: bool) -> bool {
+ match self.operands().last() {
+ Some(Some(ValType::Ref(RefType {
+ nullable,
+ heap_type: HeapType::Concrete(idx),
+ }))) => match &module.ty(*idx).composite_type {
+ CompositeType::Struct(s) => !s.fields.is_empty() && (!nullable || allow_null_refs),
+ _ => false,
+ },
+ _ => false,
+ }
+ }
+
+ #[inline(never)]
+ fn arbitrary_block_type(&self, u: &mut Unstructured, module: &Module) -> Result<BlockType> {
+ let mut options: Vec<Box<dyn Fn(&mut Unstructured) -> Result<BlockType>>> = vec![
+ Box::new(|_| Ok(BlockType::Empty)),
+ Box::new(|u| {
+ Ok(BlockType::Result(module.arbitrary_valtype(
+ u,
+ u32::try_from(module.types.len()).unwrap(),
+ )?))
+ }),
+ ];
+ if module.config.multi_value_enabled {
+ for (i, ty) in module.func_types() {
+ if self.types_on_stack(module, &ty.params) {
+ options.push(Box::new(move |_| Ok(BlockType::FunctionType(i as u32))));
+ }
+ }
+ }
+ let f = u.choose(&options)?;
+ f(u)
+ }
+
+ pub(crate) fn arbitrary(
+ mut self,
+ u: &mut Unstructured,
+ module: &Module,
+ ) -> Result<Vec<Instruction>> {
+ let max_instructions = module.config.max_instructions;
+ let allowed_instructions = module.config.allowed_instructions;
+ let mut instructions = vec![];
+
+ while !self.allocs.controls.is_empty() {
+ let keep_going = instructions.len() < max_instructions && u.arbitrary::<u8>()? != 0;
+ if !keep_going {
+ self.end_active_control_frames(
+ u,
+ module,
+ &mut instructions,
+ module.config.disallow_traps,
+ )?;
+ break;
+ }
+
+ match choose_instruction(u, module, allowed_instructions, &mut self) {
+ Some(f) => {
+ f(u, module, &mut self, &mut instructions)?;
+ }
+ // Choosing an instruction can fail because there is not enough
+ // underlying data, so we really cannot generate any more
+ // instructions. In this case we swallow that error and instead
+ // just terminate our wasm function's frames.
+ None => {
+ self.end_active_control_frames(
+ u,
+ module,
+ &mut instructions,
+ module.config.disallow_traps,
+ )?;
+ break;
+ }
+ }
+
+ // If the configuration for this module requests nan
+ // canonicalization then perform that here based on whether or not
+ // the previous instruction needs canonicalization. Note that this
+ // is based off Cranelift's pass for nan canonicalization for which
+ // instructions to canonicalize, but the general idea is most
+ // floating-point operations.
+ if module.config.canonicalize_nans {
+ match instructions.last().unwrap() {
+ Instruction::F32Ceil
+ | Instruction::F32Floor
+ | Instruction::F32Nearest
+ | Instruction::F32Sqrt
+ | Instruction::F32Trunc
+ | Instruction::F32Div
+ | Instruction::F32Max
+ | Instruction::F32Min
+ | Instruction::F32Mul
+ | Instruction::F32Sub
+ | Instruction::F32Add => self.canonicalize_nan(Float::F32, &mut instructions),
+ Instruction::F64Ceil
+ | Instruction::F64Floor
+ | Instruction::F64Nearest
+ | Instruction::F64Sqrt
+ | Instruction::F64Trunc
+ | Instruction::F64Div
+ | Instruction::F64Max
+ | Instruction::F64Min
+ | Instruction::F64Mul
+ | Instruction::F64Sub
+ | Instruction::F64Add => self.canonicalize_nan(Float::F64, &mut instructions),
+ Instruction::F32x4Ceil
+ | Instruction::F32x4Floor
+ | Instruction::F32x4Nearest
+ | Instruction::F32x4Sqrt
+ | Instruction::F32x4Trunc
+ | Instruction::F32x4Div
+ | Instruction::F32x4Max
+ | Instruction::F32x4Min
+ | Instruction::F32x4Mul
+ | Instruction::F32x4Sub
+ | Instruction::F32x4Add => {
+ self.canonicalize_nan(Float::F32x4, &mut instructions)
+ }
+ Instruction::F64x2Ceil
+ | Instruction::F64x2Floor
+ | Instruction::F64x2Nearest
+ | Instruction::F64x2Sqrt
+ | Instruction::F64x2Trunc
+ | Instruction::F64x2Div
+ | Instruction::F64x2Max
+ | Instruction::F64x2Min
+ | Instruction::F64x2Mul
+ | Instruction::F64x2Sub
+ | Instruction::F64x2Add => {
+ self.canonicalize_nan(Float::F64x2, &mut instructions)
+ }
+ _ => {}
+ }
+ }
+ }
+
+ self.locals.extend(self.extra_locals.drain(..));
+
+ Ok(instructions)
+ }
+
+ fn canonicalize_nan(&mut self, ty: Float, ins: &mut Vec<Instruction>) {
+ // We'll need to temporarily save the top of the stack into a local, so
+ // figure out that local here. Note that this tries to use the same
+ // local if canonicalization happens more than once in a function.
+ let (local, val_ty) = match ty {
+ Float::F32 => (&mut self.f32_scratch, ValType::F32),
+ Float::F64 => (&mut self.f64_scratch, ValType::F64),
+ Float::F32x4 | Float::F64x2 => (&mut self.v128_scratch, ValType::V128),
+ };
+ let local = match *local {
+ Some(i) => i as u32,
+ None => self.alloc_local(val_ty),
+ };
+
+ // Save the previous instruction's result into a local. This also leaves
+ // a value on the stack as `val1` for the `select` instruction.
+ ins.push(Instruction::LocalTee(local));
+
+ // The `val2` value input to the `select` below, our nan pattern.
+ //
+ // The nan patterns here are chosen to be a canonical representation
+ // which is still NaN but the wasm will always produce the same bits of
+ // a nan so if the wasm takes a look at the nan inside it'll always see
+ // the same representation.
+ const CANON_32BIT_NAN: u32 = 0b01111111110000000000000000000000;
+ const CANON_64BIT_NAN: u64 =
+ 0b0111111111111000000000000000000000000000000000000000000000000000;
+ ins.push(match ty {
+ Float::F32 => Instruction::F32Const(f32::from_bits(CANON_32BIT_NAN)),
+ Float::F64 => Instruction::F64Const(f64::from_bits(CANON_64BIT_NAN)),
+ Float::F32x4 => {
+ let nan = CANON_32BIT_NAN as i128;
+ let nan = nan | (nan << 32) | (nan << 64) | (nan << 96);
+ Instruction::V128Const(nan)
+ }
+ Float::F64x2 => {
+ let nan = CANON_64BIT_NAN as i128;
+ let nan = nan | (nan << 64);
+ Instruction::V128Const(nan)
+ }
+ });
+
+ // the condition of the `select`, which is the float's equality test
+ // with itself.
+ ins.push(Instruction::LocalGet(local));
+ ins.push(Instruction::LocalGet(local));
+ ins.push(match ty {
+ Float::F32 => Instruction::F32Eq,
+ Float::F64 => Instruction::F64Eq,
+ Float::F32x4 => Instruction::F32x4Eq,
+ Float::F64x2 => Instruction::F64x2Eq,
+ });
+
+ // Select the result. If the condition is nonzero (aka the float is
+ // equal to itself) it picks `val1`, otherwise if zero (aka the float
+ // is nan) it picks `val2`.
+ ins.push(match ty {
+ Float::F32 | Float::F64 => Instruction::Select,
+ Float::F32x4 | Float::F64x2 => Instruction::V128Bitselect,
+ });
+ }
+
+ fn alloc_local(&mut self, ty: ValType) -> u32 {
+ let val = self.locals.len() + self.func_ty.params.len() + self.extra_locals.len();
+ self.extra_locals.push(ty);
+ u32::try_from(val).unwrap()
+ }
+
+ fn end_active_control_frames(
+ &mut self,
+ u: &mut Unstructured<'_>,
+ module: &Module,
+ instructions: &mut Vec<Instruction>,
+ disallow_traps: bool,
+ ) -> Result<()> {
+ while !self.allocs.controls.is_empty() {
+ // Ensure that this label is valid by placing the right types onto
+ // the operand stack for the end of the label.
+ self.guarantee_label_results(u, module, instructions, disallow_traps)?;
+
+ // Remove the label and clear the operand stack since the label has
+ // been removed.
+ let label = self.allocs.controls.pop().unwrap();
+ self.allocs.operands.truncate(label.height);
+
+ // If this is an `if` that is not stack neutral, then it
+ // must have an `else`. Generate synthetic results here in the same
+ // manner we did above.
+ if label.kind == ControlKind::If && label.params != label.results {
+ instructions.push(Instruction::Else);
+ self.allocs.controls.push(label.clone());
+ self.allocs
+ .operands
+ .extend(label.params.into_iter().map(Some));
+ self.guarantee_label_results(u, module, instructions, disallow_traps)?;
+ self.allocs.controls.pop();
+ self.allocs.operands.truncate(label.height);
+ }
+
+ // The last control frame for the function return does not
+ // need an `end` instruction.
+ if !self.allocs.controls.is_empty() {
+ instructions.push(Instruction::End);
+ }
+
+ // Place the results of the label onto the operand stack for use
+ // after the label.
+ self.allocs
+ .operands
+ .extend(label.results.into_iter().map(Some));
+ }
+ Ok(())
+ }
+
+ /// Modifies the instruction stream to guarantee that the current control
+ /// label's results are on the stack and ready for the control label to return.
+ fn guarantee_label_results(
+ &mut self,
+ u: &mut Unstructured<'_>,
+ module: &Module,
+ instructions: &mut Vec<Instruction>,
+ disallow_traps: bool,
+ ) -> Result<()> {
+ let operands = self.operands();
+ let label = self.allocs.controls.last().unwrap();
+
+ // Already done, yay!
+ if label.results.len() == operands.len() && self.types_on_stack(module, &label.results) {
+ return Ok(());
+ }
+
+ // Generating an unreachable instruction is always a valid way to
+ // generate any types for a label, but it's not too interesting, so
+ // don't favor it.
+ if !disallow_traps && u.ratio(1, u16::MAX)? {
+ instructions.push(Instruction::Unreachable);
+ return Ok(());
+ }
+
+ // Arbitrarily massage the stack to get the expected results. First we
+ // drop all extraneous results to we're only dealing with those we want
+ // to deal with. Afterwards we start at the bottom of the stack and move
+ // up, figuring out what matches and what doesn't. As soon as something
+ // doesn't match we throw out that and everything else remaining,
+ // filling in results with dummy values.
+ let operands = operands.to_vec();
+ let mut operands = operands.as_slice();
+ let label_results = label.results.to_vec();
+ while operands.len() > label_results.len() {
+ self.drop_operand(u, *operands.last().unwrap(), instructions)?;
+ operands = &operands[..operands.len() - 1];
+ }
+ for (i, expected) in label_results.iter().enumerate() {
+ if let Some(actual) = operands.get(i) {
+ if Some(*expected) == *actual {
+ continue;
+ }
+ for ty in operands[i..].iter().rev() {
+ self.drop_operand(u, *ty, instructions)?;
+ }
+ operands = &[];
+ }
+ instructions.push(arbitrary_val(*expected, u));
+ }
+ Ok(())
+ }
+
+ fn drop_operand(
+ &mut self,
+ u: &mut Unstructured<'_>,
+ ty: Option<ValType>,
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ if !self.mix_operand_into_global(u, ty, instructions)? {
+ instructions.push(Instruction::Drop);
+ }
+ Ok(())
+ }
+
+ /// Attempts to drop the top operand on the stack by "mixing" it into a
+ /// global.
+ ///
+ /// This is done to avoid dropping values on the floor to ensure that
+ /// everything is part of some computation somewhere. Otherwise, for
+ /// example, most function results are dropped on the floor as the stack
+ /// won't happen to match the function type that we're generating.
+ ///
+ /// This will return `true` if the operand has been dropped, and `false` if
+ /// it didn't for one reason or another.
+ fn mix_operand_into_global(
+ &mut self,
+ u: &mut Unstructured<'_>,
+ ty: Option<ValType>,
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<bool> {
+ // If the type of this operand isn't known, for example if it's relevant
+ // to unreachable code, then it can't be combined, so return `false`.
+ let ty = match ty {
+ Some(ty) => ty,
+ None => return Ok(false),
+ };
+
+ // Use the input stream to allow a small chance of dropping the value
+ // without combining it.
+ if u.ratio(1, 100)? {
+ return Ok(false);
+ }
+
+ // Depending on the type lookup or inject a global to place this value
+ // into.
+ let (global, combine) = match ty {
+ ValType::I32 => {
+ let global = *self.allocs.global_dropped_i32.get_or_insert_with(|| {
+ self.allocs.new_globals.push((ty, ConstExpr::i32_const(0)));
+ inc(&mut self.allocs.globals_cnt)
+ });
+ (global, Instruction::I32Xor)
+ }
+ ValType::I64 => {
+ let global = *self.allocs.global_dropped_i64.get_or_insert_with(|| {
+ self.allocs.new_globals.push((ty, ConstExpr::i64_const(0)));
+ inc(&mut self.allocs.globals_cnt)
+ });
+ (global, Instruction::I64Xor)
+ }
+ ValType::F32 => {
+ let global = *self.allocs.global_dropped_f32.get_or_insert_with(|| {
+ self.allocs
+ .new_globals
+ .push((ValType::I32, ConstExpr::i32_const(0)));
+ inc(&mut self.allocs.globals_cnt)
+ });
+ instructions.push(Instruction::I32ReinterpretF32);
+ (global, Instruction::I32Xor)
+ }
+ ValType::F64 => {
+ let global = *self.allocs.global_dropped_f64.get_or_insert_with(|| {
+ self.allocs
+ .new_globals
+ .push((ValType::I64, ConstExpr::i64_const(0)));
+ inc(&mut self.allocs.globals_cnt)
+ });
+ instructions.push(Instruction::I64ReinterpretF64);
+ (global, Instruction::I64Xor)
+ }
+ ValType::V128 => {
+ let global = *self.allocs.global_dropped_v128.get_or_insert_with(|| {
+ self.allocs.new_globals.push((ty, ConstExpr::v128_const(0)));
+ inc(&mut self.allocs.globals_cnt)
+ });
+ (global, Instruction::V128Xor)
+ }
+
+ // Don't know how to combine reference types at this time, so just
+ // let it get dropped.
+ ValType::Ref(_) => return Ok(false),
+ };
+ instructions.push(Instruction::GlobalGet(global));
+ instructions.push(combine);
+ instructions.push(Instruction::GlobalSet(global));
+
+ return Ok(true);
+
+ fn inc(val: &mut u32) -> u32 {
+ let ret = *val;
+ *val += 1;
+ ret
+ }
+ }
+}
+
+fn arbitrary_val(ty: ValType, u: &mut Unstructured<'_>) -> Instruction {
+ match ty {
+ ValType::I32 => Instruction::I32Const(u.arbitrary().unwrap_or(0)),
+ ValType::I64 => Instruction::I64Const(u.arbitrary().unwrap_or(0)),
+ ValType::F32 => Instruction::F32Const(u.arbitrary().unwrap_or(0.0)),
+ ValType::F64 => Instruction::F64Const(u.arbitrary().unwrap_or(0.0)),
+ ValType::V128 => Instruction::V128Const(u.arbitrary().unwrap_or(0)),
+ ValType::Ref(ty) => {
+ assert!(ty.nullable);
+ Instruction::RefNull(ty.heap_type)
+ }
+ }
+}
+
+#[inline]
+fn unreachable_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+}
+
+fn unreachable(
+ _: &mut Unstructured,
+ _: &Module,
+ _: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ instructions.push(Instruction::Unreachable);
+ Ok(())
+}
+
+fn nop(
+ _: &mut Unstructured,
+ _: &Module,
+ _: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ instructions.push(Instruction::Nop);
+ Ok(())
+}
+
+fn block(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let block_ty = builder.arbitrary_block_type(u, module)?;
+ let (params, results) = module.params_results(&block_ty);
+ builder.push_control(ControlKind::Block, params, results);
+ instructions.push(Instruction::Block(block_ty));
+ Ok(())
+}
+
+#[inline]
+fn try_table_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.config.exceptions_enabled
+}
+
+fn try_table(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let block_ty = builder.arbitrary_block_type(u, module)?;
+
+ let mut catch_options: Vec<
+ Box<dyn Fn(&mut Unstructured<'_>, &mut CodeBuilder<'_>) -> Result<Catch>>,
+ > = Vec::new();
+
+ for (i, ctrl) in builder.allocs.controls.iter().rev().enumerate() {
+ let i = i as u32;
+
+ let label_types = ctrl.label_types();
+ if label_types.is_empty() {
+ catch_options.push(Box::new(move |_, _| Ok(Catch::All { label: i })));
+ }
+ if label_types == [ValType::EXNREF] {
+ catch_options.push(Box::new(move |_, _| Ok(Catch::AllRef { label: i })));
+ }
+
+ if builder.allocs.tags.contains_key(label_types) {
+ let label_types = label_types.to_vec();
+ catch_options.push(Box::new(move |u, builder| {
+ Ok(Catch::One {
+ tag: *u.choose(&builder.allocs.tags[&label_types])?,
+ label: i,
+ })
+ }));
+ }
+
+ let mut label_types_with_exnref = label_types.to_vec();
+ label_types_with_exnref.push(ValType::EXNREF);
+ if builder.allocs.tags.contains_key(&label_types_with_exnref) {
+ catch_options.push(Box::new(move |u, builder| {
+ Ok(Catch::OneRef {
+ tag: *u.choose(&builder.allocs.tags[&label_types_with_exnref])?,
+ label: i,
+ })
+ }));
+ }
+ }
+
+ let mut catches = Vec::new();
+ if catch_options.len() > 0 {
+ for _ in 0..u.int_in_range(0..=10)? {
+ catches.push(u.choose(&mut catch_options)?(u, builder)?);
+ }
+ }
+
+ let (params, results) = module.params_results(&block_ty);
+ builder.push_control(ControlKind::TryTable, params, results);
+
+ instructions.push(Instruction::TryTable(block_ty, catches.into()));
+ Ok(())
+}
+
+fn r#loop(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let block_ty = builder.arbitrary_block_type(u, module)?;
+ let (params, results) = module.params_results(&block_ty);
+ builder.push_control(ControlKind::Loop, params, results);
+ instructions.push(Instruction::Loop(block_ty));
+ Ok(())
+}
+
+#[inline]
+fn if_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.type_on_stack(module, ValType::I32)
+}
+
+fn r#if(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ let block_ty = builder.arbitrary_block_type(u, module)?;
+ let (params, results) = module.params_results(&block_ty);
+ builder.push_control(ControlKind::If, params, results);
+ instructions.push(Instruction::If(block_ty));
+ Ok(())
+}
+
+#[inline]
+fn else_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ let last_control = builder.allocs.controls.last().unwrap();
+ last_control.kind == ControlKind::If
+ && builder.operands().len() == last_control.results.len()
+ && builder.types_on_stack(module, &last_control.results)
+}
+
+fn r#else(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let control = builder.pop_control();
+ builder.pop_operands(module, &control.results);
+ builder.push_operands(&control.params);
+ builder.push_control(ControlKind::Block, control.params, control.results);
+ instructions.push(Instruction::Else);
+ Ok(())
+}
+
+#[inline]
+fn end_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ // Note: first control frame is the function return's control frame, which
+ // does not have an associated `end`.
+ if builder.allocs.controls.len() <= 1 {
+ return false;
+ }
+ let control = builder.allocs.controls.last().unwrap();
+ builder.operands().len() == control.results.len()
+ && builder.types_on_stack(module, &control.results)
+ // `if`s that don't leave the stack as they found it must have an
+ // `else`.
+ && !(control.kind == ControlKind::If && control.params != control.results)
+}
+
+fn end(
+ _: &mut Unstructured,
+ _: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_control();
+ instructions.push(Instruction::End);
+ Ok(())
+}
+
+#[inline]
+fn br_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder
+ .allocs
+ .controls
+ .iter()
+ .any(|l| builder.label_types_on_stack(module, l))
+}
+
+fn br(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| builder.label_types_on_stack(module, l))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (target, _) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| builder.label_types_on_stack(module, l))
+ .nth(i)
+ .unwrap();
+ let control = &builder.allocs.controls[builder.allocs.controls.len() - 1 - target];
+ let tys = control.label_types().to_vec();
+ builder.pop_operands(module, &tys);
+ instructions.push(Instruction::Br(target as u32));
+ Ok(())
+}
+
+#[inline]
+fn br_if_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !builder.type_on_stack(module, ValType::I32) {
+ return false;
+ }
+ let ty = builder.allocs.operands.pop().unwrap();
+ let is_valid = builder
+ .allocs
+ .controls
+ .iter()
+ .any(|l| builder.label_types_on_stack(module, l));
+ builder.allocs.operands.push(ty);
+ is_valid
+}
+
+fn br_if(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| builder.label_types_on_stack(module, l))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (target, _) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| builder.label_types_on_stack(module, l))
+ .nth(i)
+ .unwrap();
+ instructions.push(Instruction::BrIf(target as u32));
+ Ok(())
+}
+
+#[inline]
+fn br_table_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !builder.type_on_stack(module, ValType::I32) {
+ return false;
+ }
+ let ty = builder.allocs.operands.pop().unwrap();
+ let is_valid = br_valid(module, builder);
+ builder.allocs.operands.push(ty);
+ is_valid
+}
+
+fn br_table(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| builder.label_types_on_stack(module, l))
+ .count();
+ debug_assert!(n > 0);
+
+ let i = u.int_in_range(0..=n - 1)?;
+ let (default_target, _) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| builder.label_types_on_stack(module, l))
+ .nth(i)
+ .unwrap();
+ let control = &builder.allocs.controls[builder.allocs.controls.len() - 1 - default_target];
+
+ let targets = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| l.label_types() == control.label_types())
+ .map(|(t, _)| t as u32)
+ .collect();
+
+ let tys = control.label_types().to_vec();
+ builder.pop_operands(module, &tys);
+
+ instructions.push(Instruction::BrTable(targets, default_target as u32));
+ Ok(())
+}
+
+#[inline]
+fn return_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.label_types_on_stack(module, &builder.allocs.controls[0])
+}
+
+fn r#return(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let results = builder.allocs.controls[0].results.clone();
+ builder.pop_operands(module, &results);
+ instructions.push(Instruction::Return);
+ Ok(())
+}
+
+#[inline]
+fn call_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder
+ .allocs
+ .functions
+ .keys()
+ .any(|func_ty| builder.types_on_stack(module, &func_ty.params))
+}
+
+fn call(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let candidates = builder
+ .allocs
+ .functions
+ .iter()
+ .filter(|(func_ty, _)| builder.types_on_stack(module, &func_ty.params))
+ .flat_map(|(_, v)| v.iter().copied())
+ .collect::<Vec<_>>();
+ assert!(candidates.len() > 0);
+ let i = u.int_in_range(0..=candidates.len() - 1)?;
+ let (func_idx, ty) = module.funcs().nth(candidates[i] as usize).unwrap();
+ builder.pop_operands(module, &ty.params);
+ builder.push_operands(&ty.results);
+ instructions.push(Instruction::Call(func_idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn call_ref_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.gc_enabled {
+ return false;
+ }
+ let funcref = match builder.concrete_funcref_on_stack(module) {
+ Some(f) => f,
+ None => return false,
+ };
+ if module.config.disallow_traps && funcref.nullable {
+ return false;
+ }
+ match funcref.heap_type {
+ HeapType::Concrete(idx) => {
+ let ty = builder.allocs.operands.pop().unwrap();
+ let params = &module.ty(idx).unwrap_func().params;
+ let valid = builder.types_on_stack(module, params);
+ builder.allocs.operands.push(ty);
+ valid
+ }
+ _ => unreachable!(),
+ }
+}
+
+fn call_ref(
+ _u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let heap_ty = match builder.pop_operand() {
+ Some(ValType::Ref(r)) => r.heap_type,
+ _ => unreachable!(),
+ };
+ let idx = match heap_ty {
+ HeapType::Concrete(idx) => idx,
+ _ => unreachable!(),
+ };
+ let func_ty = match &module.ty(idx).composite_type {
+ CompositeType::Func(f) => f,
+ _ => unreachable!(),
+ };
+ builder.pop_operands(module, &func_ty.params);
+ builder.push_operands(&func_ty.results);
+ instructions.push(Instruction::CallRef(idx));
+ Ok(())
+}
+
+#[inline]
+fn call_indirect_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if builder.allocs.funcref_tables.is_empty() || !builder.type_on_stack(module, ValType::I32) {
+ return false;
+ }
+ if module.config.disallow_traps {
+ // We have no way to reflect, at run time, on a `funcref` in
+ // the `i`th slot in a table and dynamically avoid trapping
+ // `call_indirect`s. Therefore, we can't emit *any*
+ // `call_indirect` instructions if we want to avoid traps.
+ return false;
+ }
+ let ty = builder.allocs.operands.pop().unwrap();
+ let is_valid = module
+ .func_types()
+ .any(|(_, ty)| builder.types_on_stack(module, &ty.params));
+ builder.allocs.operands.push(ty);
+ is_valid
+}
+
+fn call_indirect(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+
+ let choices = module
+ .func_types()
+ .filter(|(_, ty)| builder.types_on_stack(module, &ty.params))
+ .collect::<Vec<_>>();
+ let (type_idx, ty) = u.choose(&choices)?;
+ builder.pop_operands(module, &ty.params);
+ builder.push_operands(&ty.results);
+ let table = *u.choose(&builder.allocs.funcref_tables)?;
+ instructions.push(Instruction::CallIndirect {
+ ty: *type_idx as u32,
+ table,
+ });
+ Ok(())
+}
+
+#[inline]
+fn return_call_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.tail_call_enabled {
+ return false;
+ }
+
+ builder.allocs.functions.keys().any(|func_ty| {
+ builder.types_on_stack(module, &func_ty.params)
+ && builder.allocs.controls[0].label_types() == &func_ty.results
+ })
+}
+
+fn return_call(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let candidates = builder
+ .allocs
+ .functions
+ .iter()
+ .filter(|(func_ty, _)| {
+ builder.types_on_stack(module, &func_ty.params)
+ && builder.allocs.controls[0].label_types() == &func_ty.results
+ })
+ .flat_map(|(_, v)| v.iter().copied())
+ .collect::<Vec<_>>();
+ assert!(candidates.len() > 0);
+ let i = u.int_in_range(0..=candidates.len() - 1)?;
+ let (func_idx, ty) = module.funcs().nth(candidates[i] as usize).unwrap();
+ builder.pop_operands(module, &ty.params);
+ builder.push_operands(&ty.results);
+ instructions.push(Instruction::ReturnCall(func_idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn return_call_ref_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.gc_enabled {
+ return false;
+ }
+
+ let ref_ty = match builder.concrete_funcref_on_stack(module) {
+ None => return false,
+ Some(r) if r.nullable && module.config.disallow_traps => return false,
+ Some(r) => r,
+ };
+
+ let idx = match ref_ty.heap_type {
+ HeapType::Concrete(idx) => idx,
+ _ => unreachable!(),
+ };
+ let func_ty = match &module.ty(idx).composite_type {
+ CompositeType::Func(f) => f,
+ CompositeType::Array(_) | CompositeType::Struct(_) => return false,
+ };
+
+ let ty = builder.allocs.operands.pop().unwrap();
+ let valid = builder.types_on_stack(module, &func_ty.params)
+ && builder.func_ty.results == func_ty.results;
+ builder.allocs.operands.push(ty);
+ valid
+}
+
+fn return_call_ref(
+ _u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let heap_ty = match builder.pop_operand() {
+ Some(ValType::Ref(r)) => r.heap_type,
+ _ => unreachable!(),
+ };
+ let idx = match heap_ty {
+ HeapType::Concrete(idx) => idx,
+ _ => unreachable!(),
+ };
+ let func_ty = match &module.ty(idx).composite_type {
+ CompositeType::Func(f) => f,
+ _ => unreachable!(),
+ };
+ builder.pop_operands(module, &func_ty.params);
+ builder.push_operands(&func_ty.results);
+ instructions.push(Instruction::ReturnCallRef(idx));
+ Ok(())
+}
+
+#[inline]
+fn return_call_indirect_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.tail_call_enabled
+ || builder.allocs.funcref_tables.is_empty()
+ || !builder.type_on_stack(module, ValType::I32)
+ {
+ return false;
+ }
+
+ if module.config.disallow_traps {
+ // See comment in `call_indirect_valid`; same applies here.
+ return false;
+ }
+
+ let ty = builder.allocs.operands.pop().unwrap();
+ let is_valid = module.func_types().any(|(_, ty)| {
+ builder.types_on_stack(module, &ty.params)
+ && builder.allocs.controls[0].label_types() == &ty.results
+ });
+ builder.allocs.operands.push(ty);
+ is_valid
+}
+
+fn return_call_indirect(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+
+ let choices = module
+ .func_types()
+ .filter(|(_, ty)| {
+ builder.types_on_stack(module, &ty.params)
+ && builder.allocs.controls[0].label_types() == &ty.results
+ })
+ .collect::<Vec<_>>();
+ let (type_idx, ty) = u.choose(&choices)?;
+ builder.pop_operands(module, &ty.params);
+ builder.push_operands(&ty.results);
+ let table = *u.choose(&builder.allocs.funcref_tables)?;
+ instructions.push(Instruction::ReturnCallIndirect {
+ ty: *type_idx as u32,
+ table,
+ });
+ Ok(())
+}
+
+#[inline]
+fn throw_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.exceptions_enabled
+ && builder
+ .allocs
+ .tags
+ .keys()
+ .any(|k| builder.types_on_stack(module, k))
+}
+
+fn throw(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let candidates = builder
+ .allocs
+ .tags
+ .iter()
+ .filter(|(k, _)| builder.types_on_stack(module, k))
+ .flat_map(|(_, v)| v.iter().copied())
+ .collect::<Vec<_>>();
+ assert!(candidates.len() > 0);
+ let i = u.int_in_range(0..=candidates.len() - 1)?;
+ let (tag_idx, tag_type) = module.tags().nth(candidates[i] as usize).unwrap();
+ // Tags have no results, throwing cannot return
+ assert!(tag_type.func_type.results.len() == 0);
+ builder.pop_operands(module, &tag_type.func_type.params);
+ instructions.push(Instruction::Throw(tag_idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn throw_ref_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.exceptions_enabled && builder.types_on_stack(module, &[ValType::EXNREF])
+}
+
+fn throw_ref(
+ _u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::EXNREF]);
+ instructions.push(Instruction::ThrowRef);
+ Ok(())
+}
+
+#[inline]
+fn br_on_null_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.gc_enabled {
+ return false;
+ }
+ if builder.ref_type_on_stack().is_none() {
+ return false;
+ }
+ let ty = builder.allocs.operands.pop().unwrap();
+ let valid = br_valid(module, builder);
+ builder.allocs.operands.push(ty);
+ valid
+}
+
+fn br_on_null(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let heap_type = match builder.pop_ref_type() {
+ Some(r) => r.heap_type,
+ None => {
+ if !module.types.is_empty() && u.arbitrary()? {
+ HeapType::Concrete(u.int_in_range(0..=u32::try_from(module.types.len()).unwrap())?)
+ } else {
+ *u.choose(&[
+ HeapType::Func,
+ HeapType::Extern,
+ HeapType::Any,
+ HeapType::None,
+ HeapType::NoExtern,
+ HeapType::NoFunc,
+ HeapType::Eq,
+ HeapType::Struct,
+ HeapType::Array,
+ HeapType::I31,
+ ])?
+ }
+ }
+ };
+
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| builder.label_types_on_stack(module, l))
+ .count();
+ debug_assert!(n > 0);
+
+ let i = u.int_in_range(0..=n - 1)?;
+ let (target, _) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| builder.label_types_on_stack(module, l))
+ .nth(i)
+ .unwrap();
+
+ builder.push_operands(&[ValType::Ref(RefType {
+ nullable: false,
+ heap_type,
+ })]);
+
+ instructions.push(Instruction::BrOnNull(u32::try_from(target).unwrap()));
+ Ok(())
+}
+
+fn is_valid_br_on_non_null_control(
+ module: &Module,
+ control: &Control,
+ builder: &CodeBuilder,
+) -> bool {
+ let ref_ty = match control.label_types().last() {
+ Some(ValType::Ref(r)) => *r,
+ Some(_) | None => return false,
+ };
+ let nullable_ref_ty = RefType {
+ nullable: true,
+ ..ref_ty
+ };
+ builder.type_on_stack(module, ValType::Ref(nullable_ref_ty))
+ && control
+ .label_types()
+ .iter()
+ .rev()
+ .enumerate()
+ .skip(1)
+ .all(|(idx, ty)| builder.type_on_stack_at(module, idx, *ty))
+}
+
+#[inline]
+fn br_on_non_null_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && builder
+ .allocs
+ .controls
+ .iter()
+ .any(|l| is_valid_br_on_non_null_control(module, l, builder))
+}
+
+fn br_on_non_null(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| is_valid_br_on_non_null_control(module, l, builder))
+ .count();
+ debug_assert!(n > 0);
+
+ let i = u.int_in_range(0..=n - 1)?;
+ let (target, _) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| is_valid_br_on_non_null_control(module, l, builder))
+ .nth(i)
+ .unwrap();
+
+ builder.pop_ref_type();
+ instructions.push(Instruction::BrOnNonNull(u32::try_from(target).unwrap()));
+ Ok(())
+}
+
+fn is_valid_br_on_cast_control(
+ module: &Module,
+ builder: &CodeBuilder,
+ control: &Control,
+ from_ref_ty: Option<RefType>,
+) -> bool {
+ // The last label type is a sub type of the type we are casting from...
+ let to_ref_ty = match control.label_types().last() {
+ Some(ValType::Ref(r)) => *r,
+ _ => return false,
+ };
+ if let Some(from_ty) = from_ref_ty {
+ if !module.ref_type_is_sub_type(to_ref_ty, from_ty) {
+ return false;
+ }
+ }
+ // ... and the rest of the label types are on the stack.
+ control
+ .label_types()
+ .iter()
+ .rev()
+ .enumerate()
+ .skip(1)
+ .all(|(idx, ty)| builder.type_on_stack_at(module, idx, *ty))
+}
+
+#[inline]
+fn br_on_cast_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ let from_ref_ty = match builder.ref_type_on_stack() {
+ None => return false,
+ Some(r) => r,
+ };
+ module.config.gc_enabled
+ && builder
+ .allocs
+ .controls
+ .iter()
+ .any(|l| is_valid_br_on_cast_control(module, builder, l, from_ref_ty))
+}
+
+/// Compute the [type difference] between the two given ref types.
+///
+/// [type difference]: https://webassembly.github.io/gc/core/valid/conventions.html#aux-reftypediff
+fn ref_type_difference(a: RefType, b: RefType) -> RefType {
+ RefType {
+ nullable: if b.nullable { false } else { a.nullable },
+ heap_type: a.heap_type,
+ }
+}
+
+fn br_on_cast(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let from_ref_type = builder.ref_type_on_stack().unwrap();
+
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| is_valid_br_on_cast_control(module, builder, l, from_ref_type))
+ .count();
+ debug_assert!(n > 0);
+
+ let i = u.int_in_range(0..=n - 1)?;
+ let (relative_depth, control) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| is_valid_br_on_cast_control(module, builder, l, from_ref_type))
+ .nth(i)
+ .unwrap();
+ let relative_depth = u32::try_from(relative_depth).unwrap();
+
+ let to_ref_type = match control.label_types().last() {
+ Some(ValType::Ref(r)) => *r,
+ _ => unreachable!(),
+ };
+
+ let to_ref_type = module.arbitrary_matching_ref_type(u, to_ref_type)?;
+ let from_ref_type = from_ref_type.unwrap_or(to_ref_type);
+ let from_ref_type = module.arbitrary_super_type_of_ref_type(u, from_ref_type)?;
+
+ builder.pop_operand();
+ builder.push_operands(&[ValType::Ref(ref_type_difference(
+ from_ref_type,
+ to_ref_type,
+ ))]);
+
+ instructions.push(Instruction::BrOnCast {
+ from_ref_type,
+ to_ref_type,
+ relative_depth,
+ });
+ Ok(())
+}
+
+fn is_valid_br_on_cast_fail_control(
+ module: &Module,
+ builder: &CodeBuilder,
+ control: &Control,
+ from_ref_type: Option<RefType>,
+) -> bool {
+ control
+ .label_types()
+ .last()
+ .map_or(false, |label_ty| match (label_ty, from_ref_type) {
+ (ValType::Ref(label_ty), Some(from_ty)) => {
+ module.ref_type_is_sub_type(from_ty, *label_ty)
+ }
+ (ValType::Ref(_), None) => true,
+ _ => false,
+ })
+ && control
+ .label_types()
+ .iter()
+ .rev()
+ .enumerate()
+ .skip(1)
+ .all(|(idx, ty)| builder.type_on_stack_at(module, idx, *ty))
+}
+
+#[inline]
+fn br_on_cast_fail_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ let from_ref_ty = match builder.ref_type_on_stack() {
+ None => return false,
+ Some(r) => r,
+ };
+ module.config.gc_enabled
+ && builder
+ .allocs
+ .controls
+ .iter()
+ .any(|l| is_valid_br_on_cast_fail_control(module, builder, l, from_ref_ty))
+}
+
+fn br_on_cast_fail(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let from_ref_type = builder.ref_type_on_stack().unwrap();
+
+ let n = builder
+ .allocs
+ .controls
+ .iter()
+ .filter(|l| is_valid_br_on_cast_fail_control(module, builder, l, from_ref_type))
+ .count();
+ debug_assert!(n > 0);
+
+ let i = u.int_in_range(0..=n - 1)?;
+ let (target, control) = builder
+ .allocs
+ .controls
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, l)| is_valid_br_on_cast_fail_control(module, builder, l, from_ref_type))
+ .nth(i)
+ .unwrap();
+
+ let from_ref_type =
+ from_ref_type.unwrap_or_else(|| match control.label_types().last().unwrap() {
+ ValType::Ref(r) => *r,
+ _ => unreachable!(),
+ });
+ let to_ref_type = module.arbitrary_matching_ref_type(u, from_ref_type)?;
+
+ builder.pop_operand();
+ builder.push_operand(Some(ValType::Ref(to_ref_type)));
+
+ instructions.push(Instruction::BrOnCastFail {
+ from_ref_type,
+ to_ref_type,
+ relative_depth: u32::try_from(target).unwrap(),
+ });
+ Ok(())
+}
+
+#[inline]
+fn drop_valid(_module: &Module, builder: &mut CodeBuilder) -> bool {
+ !builder.operands().is_empty()
+}
+
+fn drop(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ty = builder.pop_operand();
+ builder.drop_operand(u, ty, instructions)?;
+ Ok(())
+}
+
+#[inline]
+fn select_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !(builder.operands().len() >= 3 && builder.type_on_stack(module, ValType::I32)) {
+ return false;
+ }
+ let t = builder.operands()[builder.operands().len() - 2];
+ let u = builder.operands()[builder.operands().len() - 3];
+ t.is_none() || u.is_none() || t == u
+}
+
+fn select(
+ _: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ let t = builder.pop_operand();
+ let u = builder.pop_operand();
+ let ty = t.or(u);
+ builder.allocs.operands.push(ty);
+ match ty {
+ Some(ty @ ValType::Ref(_)) => instructions.push(Instruction::TypedSelect(ty)),
+ Some(ValType::I32) | Some(ValType::I64) | Some(ValType::F32) | Some(ValType::F64)
+ | Some(ValType::V128) | None => instructions.push(Instruction::Select),
+ }
+ Ok(())
+}
+
+#[inline]
+fn local_get_valid(_module: &Module, builder: &mut CodeBuilder) -> bool {
+ !builder.func_ty.params.is_empty() || !builder.locals.is_empty()
+}
+
+fn local_get(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let num_params = builder.func_ty.params.len();
+ let n = num_params + builder.locals.len();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ builder.allocs.operands.push(Some(if i < num_params {
+ builder.func_ty.params[i]
+ } else {
+ builder.locals[i - num_params]
+ }));
+ instructions.push(Instruction::LocalGet(i as u32));
+ Ok(())
+}
+
+#[inline]
+fn local_set_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .any(|ty| builder.type_on_stack(module, *ty))
+}
+
+fn local_set(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .filter(|ty| builder.type_on_stack(module, **ty))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (j, _) = builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .enumerate()
+ .filter(|(_, ty)| builder.type_on_stack(module, **ty))
+ .nth(i)
+ .unwrap();
+ builder.allocs.operands.pop();
+ instructions.push(Instruction::LocalSet(j as u32));
+ Ok(())
+}
+
+fn local_tee(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .filter(|ty| builder.type_on_stack(module, **ty))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (j, _) = builder
+ .func_ty
+ .params
+ .iter()
+ .chain(builder.locals.iter())
+ .enumerate()
+ .filter(|(_, ty)| builder.type_on_stack(module, **ty))
+ .nth(i)
+ .unwrap();
+ instructions.push(Instruction::LocalTee(j as u32));
+ Ok(())
+}
+
+#[inline]
+fn global_get_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.globals.len() > 0
+}
+
+fn global_get(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ debug_assert!(module.globals.len() > 0);
+ let global_idx = u.int_in_range(0..=module.globals.len() - 1)?;
+ builder
+ .allocs
+ .operands
+ .push(Some(module.globals[global_idx].val_type));
+ instructions.push(Instruction::GlobalGet(global_idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn global_set_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder
+ .allocs
+ .mutable_globals
+ .iter()
+ .any(|(ty, _)| builder.type_on_stack(module, *ty))
+}
+
+fn global_set(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let candidates = builder
+ .allocs
+ .mutable_globals
+ .iter()
+ .find(|(ty, _)| builder.type_on_stack(module, **ty))
+ .unwrap()
+ .1;
+ let i = u.int_in_range(0..=candidates.len() - 1)?;
+ builder.allocs.operands.pop();
+ instructions.push(Instruction::GlobalSet(candidates[i]));
+ Ok(())
+}
+
+#[inline]
+fn have_memory(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.memories.len() > 0
+}
+
+#[inline]
+fn have_memory_and_offset(module: &Module, builder: &mut CodeBuilder) -> bool {
+ (builder.allocs.memory32.len() > 0 && builder.type_on_stack(module, ValType::I32))
+ || (builder.allocs.memory64.len() > 0 && builder.type_on_stack(module, ValType::I64))
+}
+
+#[inline]
+fn have_data(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.data.len() > 0
+}
+
+fn i32_load(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps {
+ no_traps::load(Instruction::I32Load(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32Load(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps {
+ no_traps::load(Instruction::I64Load(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64Load(memarg));
+ }
+ Ok(())
+}
+
+fn f32_load(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ builder.allocs.operands.push(Some(ValType::F32));
+ if module.config.disallow_traps {
+ no_traps::load(Instruction::F32Load(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::F32Load(memarg));
+ }
+ Ok(())
+}
+
+fn f64_load(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3])?;
+ builder.allocs.operands.push(Some(ValType::F64));
+ if module.config.disallow_traps {
+ no_traps::load(Instruction::F64Load(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::F64Load(memarg));
+ }
+ Ok(())
+}
+
+fn i32_load_8_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I32Load8S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Load8S(memarg));
+ }
+ Ok(())
+}
+
+fn i32_load_8_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I32Load8U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Load8U(memarg));
+ }
+ Ok(())
+}
+
+fn i32_load_16_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I32Load16S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Load16S(memarg));
+ }
+ Ok(())
+}
+
+fn i32_load_16_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ builder.allocs.operands.push(Some(ValType::I32));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I32Load16U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Load16U(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_8_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I64Load8S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load8S(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_16_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I64Load16S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load16S(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_32_s(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I64Load32S(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load32S(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_8_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I64Load8U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load8U(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_16_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I64Load16U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load16U(memarg));
+ }
+ Ok(())
+}
+
+fn i64_load_32_u(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ builder.allocs.operands.push(Some(ValType::I64));
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::I64Load32U(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Load32U(memarg));
+ }
+ Ok(())
+}
+
+#[inline]
+fn store_valid(module: &Module, builder: &mut CodeBuilder, f: impl Fn() -> ValType) -> bool {
+ (builder.allocs.memory32.len() > 0 && builder.types_on_stack(module, &[ValType::I32, f()]))
+ || (builder.allocs.memory64.len() > 0
+ && builder.types_on_stack(module, &[ValType::I64, f()]))
+}
+
+#[inline]
+fn i32_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ store_valid(module, builder, || ValType::I32)
+}
+
+fn i32_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ if module.config.disallow_traps {
+ no_traps::store(Instruction::I32Store(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32Store(memarg));
+ }
+ Ok(())
+}
+
+#[inline]
+fn i64_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ store_valid(module, builder, || ValType::I64)
+}
+
+fn i64_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3])?;
+ if module.config.disallow_traps {
+ no_traps::store(Instruction::I64Store(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64Store(memarg));
+ }
+ Ok(())
+}
+
+#[inline]
+fn f32_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ store_valid(module, builder, || ValType::F32)
+}
+
+fn f32_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ if module.config.disallow_traps {
+ no_traps::store(Instruction::F32Store(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::F32Store(memarg));
+ }
+ Ok(())
+}
+
+#[inline]
+fn f64_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ store_valid(module, builder, || ValType::F64)
+}
+
+fn f64_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3])?;
+ if module.config.disallow_traps {
+ no_traps::store(Instruction::F64Store(memarg), module, builder, instructions);
+ } else {
+ instructions.push(Instruction::F64Store(memarg));
+ }
+ Ok(())
+}
+
+fn i32_store_8(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ if module.config.disallow_traps {
+ no_traps::store(
+ Instruction::I32Store8(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Store8(memarg));
+ }
+ Ok(())
+}
+
+fn i32_store_16(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ if module.config.disallow_traps {
+ no_traps::store(
+ Instruction::I32Store16(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I32Store16(memarg));
+ }
+ Ok(())
+}
+
+fn i64_store_8(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ let memarg = mem_arg(u, module, builder, &[0])?;
+ if module.config.disallow_traps {
+ no_traps::store(
+ Instruction::I64Store8(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Store8(memarg));
+ }
+ Ok(())
+}
+
+fn i64_store_16(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ let memarg = mem_arg(u, module, builder, &[0, 1])?;
+ if module.config.disallow_traps {
+ no_traps::store(
+ Instruction::I64Store16(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Store16(memarg));
+ }
+ Ok(())
+}
+
+fn i64_store_32(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2])?;
+ if module.config.disallow_traps {
+ no_traps::store(
+ Instruction::I64Store32(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::I64Store32(memarg));
+ }
+ Ok(())
+}
+
+fn memory_size(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let i = u.int_in_range(0..=module.memories.len() - 1)?;
+ let ty = if module.memories[i].memory64 {
+ ValType::I64
+ } else {
+ ValType::I32
+ };
+ builder.push_operands(&[ty]);
+ instructions.push(Instruction::MemorySize(i as u32));
+ Ok(())
+}
+
+#[inline]
+fn memory_grow_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ (builder.allocs.memory32.len() > 0 && builder.type_on_stack(module, ValType::I32))
+ || (builder.allocs.memory64.len() > 0 && builder.type_on_stack(module, ValType::I64))
+}
+
+fn memory_grow(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ty = if builder.type_on_stack(module, ValType::I32) {
+ ValType::I32
+ } else {
+ ValType::I64
+ };
+ let index = memory_index(u, builder, ty)?;
+ builder.pop_operands(module, &[ty]);
+ builder.push_operands(&[ty]);
+ instructions.push(Instruction::MemoryGrow(index));
+ Ok(())
+}
+
+#[inline]
+fn memory_init_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.bulk_memory_enabled
+ && have_data(module, builder)
+ && !module.config.disallow_traps // Non-trapping memory init not yet implemented
+ && (builder.allocs.memory32.len() > 0
+ && builder.types_on_stack(module, &[ValType::I32, ValType::I32, ValType::I32])
+ || (builder.allocs.memory64.len() > 0
+ && builder.types_on_stack(module, &[ValType::I64, ValType::I32, ValType::I32])))
+}
+
+fn memory_init(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ let ty = if builder.type_on_stack(module, ValType::I32) {
+ ValType::I32
+ } else {
+ ValType::I64
+ };
+ let mem = memory_index(u, builder, ty)?;
+ let data_index = data_index(u, module)?;
+ builder.pop_operands(module, &[ty]);
+ instructions.push(Instruction::MemoryInit { mem, data_index });
+ Ok(())
+}
+
+#[inline]
+fn memory_fill_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.bulk_memory_enabled
+ && !module.config.disallow_traps // Non-trapping memory fill generation not yet implemented
+ && (builder.allocs.memory32.len() > 0
+ && builder.types_on_stack(module, &[ValType::I32, ValType::I32, ValType::I32])
+ || (builder.allocs.memory64.len() > 0
+ && builder.types_on_stack(module, &[ValType::I64, ValType::I32, ValType::I64])))
+}
+
+fn memory_fill(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ty = if builder.type_on_stack(module, ValType::I32) {
+ ValType::I32
+ } else {
+ ValType::I64
+ };
+ let mem = memory_index(u, builder, ty)?;
+ builder.pop_operands(module, &[ty, ValType::I32, ty]);
+ instructions.push(Instruction::MemoryFill(mem));
+ Ok(())
+}
+
+#[inline]
+fn memory_copy_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.bulk_memory_enabled {
+ return false;
+ }
+
+ // The non-trapping case for memory copy has not yet been implemented,
+ // so we are excluding them for now
+ if module.config.disallow_traps {
+ return false;
+ }
+
+ if builder.types_on_stack(module, &[ValType::I64, ValType::I64, ValType::I64])
+ && builder.allocs.memory64.len() > 0
+ {
+ return true;
+ }
+ if builder.types_on_stack(module, &[ValType::I32, ValType::I32, ValType::I32])
+ && builder.allocs.memory32.len() > 0
+ {
+ return true;
+ }
+ if builder.types_on_stack(module, &[ValType::I64, ValType::I32, ValType::I32])
+ && builder.allocs.memory32.len() > 0
+ && builder.allocs.memory64.len() > 0
+ {
+ return true;
+ }
+ if builder.types_on_stack(module, &[ValType::I32, ValType::I64, ValType::I32])
+ && builder.allocs.memory32.len() > 0
+ && builder.allocs.memory64.len() > 0
+ {
+ return true;
+ }
+ false
+}
+
+fn memory_copy(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let (src_mem, dst_mem) =
+ if builder.types_on_stack(module, &[ValType::I64, ValType::I64, ValType::I64]) {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64, ValType::I64]);
+ (
+ memory_index(u, builder, ValType::I64)?,
+ memory_index(u, builder, ValType::I64)?,
+ )
+ } else if builder.types_on_stack(module, &[ValType::I32, ValType::I32, ValType::I32]) {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32, ValType::I32]);
+ (
+ memory_index(u, builder, ValType::I32)?,
+ memory_index(u, builder, ValType::I32)?,
+ )
+ } else if builder.types_on_stack(module, &[ValType::I64, ValType::I32, ValType::I32]) {
+ builder.pop_operands(module, &[ValType::I64, ValType::I32, ValType::I32]);
+ (
+ memory_index(u, builder, ValType::I32)?,
+ memory_index(u, builder, ValType::I64)?,
+ )
+ } else if builder.types_on_stack(module, &[ValType::I32, ValType::I64, ValType::I32]) {
+ builder.pop_operands(module, &[ValType::I32, ValType::I64, ValType::I32]);
+ (
+ memory_index(u, builder, ValType::I64)?,
+ memory_index(u, builder, ValType::I32)?,
+ )
+ } else {
+ unreachable!()
+ };
+ instructions.push(Instruction::MemoryCopy { dst_mem, src_mem });
+ Ok(())
+}
+
+#[inline]
+fn data_drop_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ have_data(module, builder) && module.config.bulk_memory_enabled
+}
+
+fn data_drop(
+ u: &mut Unstructured,
+ module: &Module,
+ _builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ instructions.push(Instruction::DataDrop(data_index(u, module)?));
+ Ok(())
+}
+
+fn i32_const(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let x = u.arbitrary()?;
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Const(x));
+ Ok(())
+}
+
+fn i64_const(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let x = u.arbitrary()?;
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Const(x));
+ Ok(())
+}
+
+fn f32_const(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let x = u.arbitrary()?;
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Const(x));
+ Ok(())
+}
+
+fn f64_const(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let x = u.arbitrary()?;
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Const(x));
+ Ok(())
+}
+
+#[inline]
+fn i32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.type_on_stack(module, ValType::I32)
+}
+
+fn i32_eqz(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Eqz);
+ Ok(())
+}
+
+#[inline]
+fn i32_i32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(module, &[ValType::I32, ValType::I32])
+}
+
+fn i32_eq(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Eq);
+ Ok(())
+}
+
+fn i32_ne(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Ne);
+ Ok(())
+}
+
+fn i32_lt_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32LtS);
+ Ok(())
+}
+
+fn i32_lt_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32LtU);
+ Ok(())
+}
+
+fn i32_gt_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32GtS);
+ Ok(())
+}
+
+fn i32_gt_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32GtU);
+ Ok(())
+}
+
+fn i32_le_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32LeS);
+ Ok(())
+}
+
+fn i32_le_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32LeU);
+ Ok(())
+}
+
+fn i32_ge_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32GeS);
+ Ok(())
+}
+
+fn i32_ge_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32GeU);
+ Ok(())
+}
+
+#[inline]
+fn i64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(module, &[ValType::I64])
+}
+
+fn i64_eqz(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64Eqz);
+ Ok(())
+}
+
+#[inline]
+fn i64_i64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(module, &[ValType::I64, ValType::I64])
+}
+
+fn i64_eq(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64Eq);
+ Ok(())
+}
+
+fn i64_ne(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64Ne);
+ Ok(())
+}
+
+fn i64_lt_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64LtS);
+ Ok(())
+}
+
+fn i64_lt_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64LtU);
+ Ok(())
+}
+
+fn i64_gt_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64GtS);
+ Ok(())
+}
+
+fn i64_gt_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64GtU);
+ Ok(())
+}
+
+fn i64_le_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64LeS);
+ Ok(())
+}
+
+fn i64_le_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64LeU);
+ Ok(())
+}
+
+fn i64_ge_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64GeS);
+ Ok(())
+}
+
+fn i64_ge_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I64GeU);
+ Ok(())
+}
+
+fn f32_f32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(module, &[ValType::F32, ValType::F32])
+}
+
+fn f32_eq(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Eq);
+ Ok(())
+}
+
+fn f32_ne(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Ne);
+ Ok(())
+}
+
+fn f32_lt(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Lt);
+ Ok(())
+}
+
+fn f32_gt(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Gt);
+ Ok(())
+}
+
+fn f32_le(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Le);
+ Ok(())
+}
+
+fn f32_ge(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F32Ge);
+ Ok(())
+}
+
+fn f64_f64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(module, &[ValType::F64, ValType::F64])
+}
+
+fn f64_eq(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Eq);
+ Ok(())
+}
+
+fn f64_ne(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Ne);
+ Ok(())
+}
+
+fn f64_lt(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Lt);
+ Ok(())
+}
+
+fn f64_gt(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Gt);
+ Ok(())
+}
+
+fn f64_le(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Le);
+ Ok(())
+}
+
+fn f64_ge(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::F64Ge);
+ Ok(())
+}
+
+fn i32_clz(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Clz);
+ Ok(())
+}
+
+fn i32_ctz(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Ctz);
+ Ok(())
+}
+
+fn i32_popcnt(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Popcnt);
+ Ok(())
+}
+
+fn i32_add(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Add);
+ Ok(())
+}
+
+fn i32_sub(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Sub);
+ Ok(())
+}
+
+fn i32_mul(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Mul);
+ Ok(())
+}
+
+fn i32_div_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps {
+ no_traps::signed_div_rem(Instruction::I32DivS, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32DivS);
+ }
+ Ok(())
+}
+
+fn i32_div_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps {
+ no_traps::unsigned_div_rem(Instruction::I32DivU, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32DivU);
+ }
+ Ok(())
+}
+
+fn i32_rem_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps {
+ no_traps::signed_div_rem(Instruction::I32RemS, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32RemS);
+ }
+ Ok(())
+}
+
+fn i32_rem_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps {
+ no_traps::unsigned_div_rem(Instruction::I32RemU, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32RemU);
+ }
+ Ok(())
+}
+
+fn i32_and(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32And);
+ Ok(())
+}
+
+fn i32_or(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Or);
+ Ok(())
+}
+
+fn i32_xor(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Xor);
+ Ok(())
+}
+
+fn i32_shl(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Shl);
+ Ok(())
+}
+
+fn i32_shr_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32ShrS);
+ Ok(())
+}
+
+fn i32_shr_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32ShrU);
+ Ok(())
+}
+
+fn i32_rotl(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Rotl);
+ Ok(())
+}
+
+fn i32_rotr(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Rotr);
+ Ok(())
+}
+
+fn i64_clz(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Clz);
+ Ok(())
+}
+
+fn i64_ctz(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Ctz);
+ Ok(())
+}
+
+fn i64_popcnt(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Popcnt);
+ Ok(())
+}
+
+fn i64_add(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Add);
+ Ok(())
+}
+
+fn i64_sub(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Sub);
+ Ok(())
+}
+
+fn i64_mul(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Mul);
+ Ok(())
+}
+
+fn i64_div_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps {
+ no_traps::signed_div_rem(Instruction::I64DivS, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64DivS);
+ }
+ Ok(())
+}
+
+fn i64_div_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps {
+ no_traps::unsigned_div_rem(Instruction::I64DivU, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64DivU);
+ }
+ Ok(())
+}
+
+fn i64_rem_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps {
+ no_traps::signed_div_rem(Instruction::I64RemS, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64RemS);
+ }
+ Ok(())
+}
+
+fn i64_rem_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps {
+ no_traps::unsigned_div_rem(Instruction::I64RemU, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64RemU);
+ }
+ Ok(())
+}
+
+fn i64_and(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64And);
+ Ok(())
+}
+
+fn i64_or(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Or);
+ Ok(())
+}
+
+fn i64_xor(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Xor);
+ Ok(())
+}
+
+fn i64_shl(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Shl);
+ Ok(())
+}
+
+fn i64_shr_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ShrS);
+ Ok(())
+}
+
+fn i64_shr_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ShrU);
+ Ok(())
+}
+
+fn i64_rotl(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Rotl);
+ Ok(())
+}
+
+fn i64_rotr(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64, ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Rotr);
+ Ok(())
+}
+
+#[inline]
+fn f32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(module, &[ValType::F32])
+}
+
+fn f32_abs(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Abs);
+ Ok(())
+}
+
+fn f32_neg(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Neg);
+ Ok(())
+}
+
+fn f32_ceil(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Ceil);
+ Ok(())
+}
+
+fn f32_floor(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Floor);
+ Ok(())
+}
+
+fn f32_trunc(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Trunc);
+ Ok(())
+}
+
+fn f32_nearest(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Nearest);
+ Ok(())
+}
+
+fn f32_sqrt(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Sqrt);
+ Ok(())
+}
+
+fn f32_add(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Add);
+ Ok(())
+}
+
+fn f32_sub(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Sub);
+ Ok(())
+}
+
+fn f32_mul(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Mul);
+ Ok(())
+}
+
+fn f32_div(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Div);
+ Ok(())
+}
+
+fn f32_min(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Min);
+ Ok(())
+}
+
+fn f32_max(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Max);
+ Ok(())
+}
+
+fn f32_copysign(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32, ValType::F32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32Copysign);
+ Ok(())
+}
+
+#[inline]
+fn f64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ builder.types_on_stack(module, &[ValType::F64])
+}
+
+fn f64_abs(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Abs);
+ Ok(())
+}
+
+fn f64_neg(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Neg);
+ Ok(())
+}
+
+fn f64_ceil(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Ceil);
+ Ok(())
+}
+
+fn f64_floor(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Floor);
+ Ok(())
+}
+
+fn f64_trunc(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Trunc);
+ Ok(())
+}
+
+fn f64_nearest(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Nearest);
+ Ok(())
+}
+
+fn f64_sqrt(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Sqrt);
+ Ok(())
+}
+
+fn f64_add(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Add);
+ Ok(())
+}
+
+fn f64_sub(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Sub);
+ Ok(())
+}
+
+fn f64_mul(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Mul);
+ Ok(())
+}
+
+fn f64_div(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Div);
+ Ok(())
+}
+
+fn f64_min(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Min);
+ Ok(())
+}
+
+fn f64_max(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Max);
+ Ok(())
+}
+
+fn f64_copysign(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64, ValType::F64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64Copysign);
+ Ok(())
+}
+
+fn i32_wrap_i64(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32WrapI64);
+ Ok(())
+}
+
+fn nontrapping_f32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.saturating_float_to_int_enabled && f32_on_stack(module, builder)
+}
+
+fn i32_trunc_f32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps {
+ no_traps::trunc(Instruction::I32TruncF32S, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32TruncF32S);
+ }
+ Ok(())
+}
+
+fn i32_trunc_f32_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps {
+ no_traps::trunc(Instruction::I32TruncF32U, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32TruncF32U);
+ }
+ Ok(())
+}
+
+fn nontrapping_f64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.saturating_float_to_int_enabled && f64_on_stack(module, builder)
+}
+
+fn i32_trunc_f64_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps {
+ no_traps::trunc(Instruction::I32TruncF64S, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32TruncF64S);
+ }
+ Ok(())
+}
+
+fn i32_trunc_f64_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ if module.config.disallow_traps {
+ no_traps::trunc(Instruction::I32TruncF64U, builder, instructions);
+ } else {
+ instructions.push(Instruction::I32TruncF64U);
+ }
+ Ok(())
+}
+
+fn i64_extend_i32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ExtendI32S);
+ Ok(())
+}
+
+fn i64_extend_i32_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ExtendI32U);
+ Ok(())
+}
+
+fn i64_trunc_f32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps {
+ no_traps::trunc(Instruction::I64TruncF32S, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64TruncF32S);
+ }
+ Ok(())
+}
+
+fn i64_trunc_f32_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps {
+ no_traps::trunc(Instruction::I64TruncF32U, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64TruncF32U);
+ }
+ Ok(())
+}
+
+fn i64_trunc_f64_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps {
+ no_traps::trunc(Instruction::I64TruncF64S, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64TruncF64S);
+ }
+ Ok(())
+}
+
+fn i64_trunc_f64_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ if module.config.disallow_traps {
+ no_traps::trunc(Instruction::I64TruncF64U, builder, instructions);
+ } else {
+ instructions.push(Instruction::I64TruncF64U);
+ }
+ Ok(())
+}
+
+fn f32_convert_i32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ConvertI32S);
+ Ok(())
+}
+
+fn f32_convert_i32_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ConvertI32U);
+ Ok(())
+}
+
+fn f32_convert_i64_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ConvertI64S);
+ Ok(())
+}
+
+fn f32_convert_i64_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ConvertI64U);
+ Ok(())
+}
+
+fn f32_demote_f64(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32DemoteF64);
+ Ok(())
+}
+
+fn f64_convert_i32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ConvertI32S);
+ Ok(())
+}
+
+fn f64_convert_i32_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ConvertI32U);
+ Ok(())
+}
+
+fn f64_convert_i64_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ConvertI64S);
+ Ok(())
+}
+
+fn f64_convert_i64_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ConvertI64U);
+ Ok(())
+}
+
+fn f64_promote_f32(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64PromoteF32);
+ Ok(())
+}
+
+fn i32_reinterpret_f32(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32ReinterpretF32);
+ Ok(())
+}
+
+fn i64_reinterpret_f64(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64ReinterpretF64);
+ Ok(())
+}
+
+fn f32_reinterpret_i32(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::F32]);
+ instructions.push(Instruction::F32ReinterpretI32);
+ Ok(())
+}
+
+fn f64_reinterpret_i64(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::F64]);
+ instructions.push(Instruction::F64ReinterpretI64);
+ Ok(())
+}
+
+fn extendable_i32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.sign_extension_ops_enabled && i32_on_stack(module, builder)
+}
+
+fn i32_extend_8_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Extend8S);
+ Ok(())
+}
+
+fn i32_extend_16_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32Extend16S);
+ Ok(())
+}
+
+fn extendable_i64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.sign_extension_ops_enabled && i64_on_stack(module, builder)
+}
+
+fn i64_extend_8_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Extend8S);
+ Ok(())
+}
+
+fn i64_extend_16_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Extend16S);
+ Ok(())
+}
+
+fn i64_extend_32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64Extend32S);
+ Ok(())
+}
+
+fn i32_trunc_sat_f32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32TruncSatF32S);
+ Ok(())
+}
+
+fn i32_trunc_sat_f32_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32TruncSatF32U);
+ Ok(())
+}
+
+fn i32_trunc_sat_f64_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32TruncSatF64S);
+ Ok(())
+}
+
+fn i32_trunc_sat_f64_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::I32TruncSatF64U);
+ Ok(())
+}
+
+fn i64_trunc_sat_f32_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64TruncSatF32S);
+ Ok(())
+}
+
+fn i64_trunc_sat_f32_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F32]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64TruncSatF32U);
+ Ok(())
+}
+
+fn i64_trunc_sat_f64_s(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64TruncSatF64S);
+ Ok(())
+}
+
+fn i64_trunc_sat_f64_u(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::F64]);
+ builder.push_operands(&[ValType::I64]);
+ instructions.push(Instruction::I64TruncSatF64U);
+ Ok(())
+}
+
+fn memory_offset(u: &mut Unstructured, module: &Module, memory_index: u32) -> Result<u64> {
+ let MemoryOffsetChoices(a, b, c) = module.config.memory_offset_choices;
+ assert!(a + b + c != 0);
+
+ let memory_type = &module.memories[memory_index as usize];
+ let min = memory_type.minimum.saturating_mul(65536);
+ let max = memory_type
+ .maximum
+ .map(|max| max.saturating_mul(65536))
+ .unwrap_or(u64::MAX);
+
+ let (min, max, true_max) = match (memory_type.memory64, module.config.disallow_traps) {
+ (true, false) => {
+ // 64-bit memories can use the limits calculated above as-is
+ (min, max, u64::MAX)
+ }
+ (false, false) => {
+ // 32-bit memories can't represent a full 4gb offset, so if that's the
+ // min/max sizes then we need to switch the m to `u32::MAX`.
+ (
+ u64::from(u32::try_from(min).unwrap_or(u32::MAX)),
+ u64::from(u32::try_from(max).unwrap_or(u32::MAX)),
+ u64::from(u32::MAX),
+ )
+ }
+ // The logic for non-trapping versions of load/store involves pushing
+ // the offset + load/store size onto the stack as either an i32 or i64
+ // value. So even though offsets can normally be as high as u32 or u64,
+ // we need to limit them to lower in order for our non-trapping logic to
+ // work. 16 is the number of bytes of the largest load type (V128).
+ (true, true) => {
+ let no_trap_max = (i64::MAX - 16) as u64;
+ (min.min(no_trap_max), no_trap_max, no_trap_max)
+ }
+ (false, true) => {
+ let no_trap_max = (i32::MAX - 16) as u64;
+ (min.min(no_trap_max), no_trap_max, no_trap_max)
+ }
+ };
+
+ let choice = u.int_in_range(0..=a + b + c - 1)?;
+ if choice < a {
+ u.int_in_range(0..=min)
+ } else if choice < a + b {
+ u.int_in_range(min..=max)
+ } else {
+ u.int_in_range(max..=true_max)
+ }
+}
+
+fn mem_arg(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ alignments: &[u32],
+) -> Result<MemArg> {
+ let memory_index = if builder.type_on_stack(module, ValType::I32) {
+ builder.pop_operands(module, &[ValType::I32]);
+ memory_index(u, builder, ValType::I32)?
+ } else {
+ builder.pop_operands(module, &[ValType::I64]);
+ memory_index(u, builder, ValType::I64)?
+ };
+ let offset = memory_offset(u, module, memory_index)?;
+ let align = *u.choose(alignments)?;
+ Ok(MemArg {
+ memory_index,
+ offset,
+ align,
+ })
+}
+
+fn memory_index(u: &mut Unstructured, builder: &CodeBuilder, ty: ValType) -> Result<u32> {
+ if ty == ValType::I32 {
+ Ok(*u.choose(&builder.allocs.memory32)?)
+ } else {
+ Ok(*u.choose(&builder.allocs.memory64)?)
+ }
+}
+
+fn data_index(u: &mut Unstructured, module: &Module) -> Result<u32> {
+ let data = module.data.len() as u32;
+ assert!(data > 0);
+ if data == 1 {
+ Ok(0)
+ } else {
+ u.int_in_range(0..=data - 1)
+ }
+}
+
+#[inline]
+fn ref_null_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled
+}
+
+fn ref_null(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let mut choices = vec![RefType::EXTERNREF, RefType::FUNCREF];
+ if module.config.exceptions_enabled {
+ choices.push(RefType::EXNREF);
+ }
+ if module.config.gc_enabled {
+ let r = |heap_type| RefType {
+ nullable: true,
+ heap_type,
+ };
+ choices.push(r(HeapType::Any));
+ choices.push(r(HeapType::Eq));
+ choices.push(r(HeapType::Array));
+ choices.push(r(HeapType::Struct));
+ choices.push(r(HeapType::I31));
+ choices.push(r(HeapType::None));
+ choices.push(r(HeapType::NoFunc));
+ choices.push(r(HeapType::NoExtern));
+ for i in 0..module.types.len() {
+ let i = u32::try_from(i).unwrap();
+ choices.push(r(HeapType::Concrete(i)));
+ }
+ }
+ let ty = *u.choose(&choices)?;
+ builder.push_operand(Some(ty.into()));
+ instructions.push(Instruction::RefNull(ty.heap_type));
+ Ok(())
+}
+
+#[inline]
+fn ref_func_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled && builder.allocs.referenced_functions.len() > 0
+}
+
+fn ref_func(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let i = *u.choose(&builder.allocs.referenced_functions)?;
+ let ty = module.funcs[usize::try_from(i).unwrap()].0;
+ builder.push_operand(Some(ValType::Ref(if module.config.gc_enabled {
+ RefType {
+ nullable: false,
+ heap_type: HeapType::Concrete(ty),
+ }
+ } else {
+ RefType::FUNCREF
+ })));
+ instructions.push(Instruction::RefFunc(i));
+ Ok(())
+}
+
+#[inline]
+fn ref_as_non_null_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled && builder.ref_type_on_stack().is_some()
+}
+
+fn ref_as_non_null(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ref_ty = match builder.pop_ref_type() {
+ Some(r) => r,
+ None => module.arbitrary_ref_type(u)?,
+ };
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable: false,
+ heap_type: ref_ty.heap_type,
+ })));
+ instructions.push(Instruction::RefAsNonNull);
+ Ok(())
+}
+
+#[inline]
+fn ref_eq_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ let eq_ref = ValType::Ref(RefType {
+ nullable: true,
+ heap_type: HeapType::Eq,
+ });
+ module.config.gc_enabled && builder.types_on_stack(module, &[eq_ref, eq_ref])
+}
+
+fn ref_eq(
+ _u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ builder.pop_operand();
+ builder.push_operand(Some(ValType::I32));
+ instructions.push(Instruction::RefEq);
+ Ok(())
+}
+
+#[inline]
+fn ref_test_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled && builder.ref_type_on_stack().is_some()
+}
+
+fn ref_test(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ref_ty = match builder.pop_ref_type() {
+ Some(r) => r,
+ None => module.arbitrary_ref_type(u)?,
+ };
+ builder.push_operand(Some(ValType::I32));
+
+ let sub_ty = module.arbitrary_matching_heap_type(u, ref_ty.heap_type)?;
+ instructions.push(if !ref_ty.nullable || u.arbitrary()? {
+ Instruction::RefTestNonNull(sub_ty)
+ } else {
+ Instruction::RefTestNullable(sub_ty)
+ });
+ Ok(())
+}
+
+#[inline]
+fn ref_cast_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.gc_enabled
+ && builder.ref_type_on_stack().is_some()
+}
+
+fn ref_cast(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let ref_ty = match builder.pop_ref_type() {
+ Some(r) => r,
+ None => module.arbitrary_ref_type(u)?,
+ };
+ let sub_ty = RefType {
+ nullable: if !ref_ty.nullable {
+ false
+ } else {
+ u.arbitrary()?
+ },
+ heap_type: module.arbitrary_matching_heap_type(u, ref_ty.heap_type)?,
+ };
+ builder.push_operand(Some(ValType::Ref(sub_ty)));
+
+ instructions.push(if !sub_ty.nullable {
+ Instruction::RefCastNonNull(sub_ty.heap_type)
+ } else {
+ Instruction::RefCastNullable(sub_ty.heap_type)
+ });
+ Ok(())
+}
+
+#[inline]
+fn ref_is_null_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled
+ && (builder.type_on_stack(module, ValType::EXTERNREF)
+ || builder.type_on_stack(module, ValType::FUNCREF))
+}
+
+fn ref_is_null(
+ _: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_ref_type();
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::RefIsNull);
+ Ok(())
+}
+
+#[inline]
+fn table_fill_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled
+ && module.config.bulk_memory_enabled
+ && !module.config.disallow_traps // Non-trapping table fill generation not yet implemented
+ && [ValType::EXTERNREF, ValType::FUNCREF].iter().any(|ty| {
+ builder.types_on_stack(module, &[ValType::I32, *ty, ValType::I32])
+ && module.tables.iter().any(|t| {
+ module.val_type_is_sub_type(*ty, t.element_type.into())
+ })
+ })
+}
+
+fn table_fill(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ let table = match builder.pop_ref_type() {
+ Some(ty) => table_index(ty, u, module)?,
+ // Stack polymorphic, can choose any reference type we have a table for,
+ // so just choose the table directly.
+ None => u.int_in_range(0..=u32::try_from(module.tables.len()).unwrap())?,
+ };
+ builder.pop_operands(module, &[ValType::I32]);
+ instructions.push(Instruction::TableFill(table));
+ Ok(())
+}
+
+#[inline]
+fn table_set_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled
+ && !module.config.disallow_traps // Non-trapping table.set generation not yet implemented
+ && [ValType::EXTERNREF, ValType::FUNCREF].iter().any(|ty| {
+ builder.types_on_stack(module, &[ValType::I32, *ty])
+ && module.tables.iter().any(|t| {
+ module.val_type_is_sub_type(*ty, t.element_type.into())
+ })
+ })
+}
+
+fn table_set(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let table = match builder.pop_ref_type() {
+ Some(ty) => table_index(ty, u, module)?,
+ // Stack polymorphic, can choose any reference type we have a table for,
+ // so just choose the table directly.
+ None => u.int_in_range(0..=u32::try_from(module.tables.len()).unwrap())?,
+ };
+ builder.pop_operands(module, &[ValType::I32]);
+ instructions.push(Instruction::TableSet(table));
+ Ok(())
+}
+
+#[inline]
+fn table_get_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled
+ && !module.config.disallow_traps // Non-trapping table.get generation not yet implemented
+ && builder.type_on_stack(module, ValType::I32)
+ && module.tables.len() > 0
+}
+
+fn table_get(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ let idx = u.int_in_range(0..=module.tables.len() - 1)?;
+ let ty = module.tables[idx].element_type;
+ builder.push_operands(&[ty.into()]);
+ instructions.push(Instruction::TableGet(idx as u32));
+ Ok(())
+}
+
+#[inline]
+fn table_size_valid(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled && module.tables.len() > 0
+}
+
+fn table_size(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let table = u.int_in_range(0..=module.tables.len() - 1)? as u32;
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::TableSize(table));
+ Ok(())
+}
+
+#[inline]
+fn table_grow_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.reference_types_enabled
+ && [ValType::EXTERNREF, ValType::FUNCREF].iter().any(|ty| {
+ builder.types_on_stack(module, &[*ty, ValType::I32])
+ && module
+ .tables
+ .iter()
+ .any(|t| module.val_type_is_sub_type(*ty, t.element_type.into()))
+ })
+}
+
+fn table_grow(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32]);
+ let table = match builder.pop_ref_type() {
+ Some(ty) => table_index(ty, u, module)?,
+ // Stack polymorphic, can choose any reference type we have a table for,
+ // so just choose the table directly.
+ None => u.int_in_range(0..=u32::try_from(module.tables.len()).unwrap())?,
+ };
+ builder.push_operands(&[ValType::I32]);
+ instructions.push(Instruction::TableGrow(table));
+ Ok(())
+}
+
+#[inline]
+fn table_copy_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.bulk_memory_enabled
+ && !module.config.disallow_traps // Non-trapping table.copy generation not yet implemented
+ && module.tables.len() > 0
+ && builder.types_on_stack(module, &[ValType::I32, ValType::I32, ValType::I32])
+}
+
+fn table_copy(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32, ValType::I32]);
+ let src_table = u.int_in_range(0..=module.tables.len() - 1)? as u32;
+ let dst_table = table_index(module.tables[src_table as usize].element_type, u, module)?;
+ instructions.push(Instruction::TableCopy {
+ src_table,
+ dst_table,
+ });
+ Ok(())
+}
+
+#[inline]
+fn table_init_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.bulk_memory_enabled
+ && !module.config.disallow_traps // Non-trapping table.init generation not yet implemented.
+ && builder.allocs.table_init_possible
+ && builder.types_on_stack(module, &[ValType::I32, ValType::I32, ValType::I32])
+}
+
+fn table_init(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::I32, ValType::I32, ValType::I32]);
+ let segments = module
+ .elems
+ .iter()
+ .enumerate()
+ .filter(|(_, e)| module.tables.iter().any(|t| t.element_type == e.ty))
+ .map(|(i, _)| i)
+ .collect::<Vec<_>>();
+ let segment = *u.choose(&segments)?;
+ let table = table_index(module.elems[segment].ty, u, module)?;
+ instructions.push(Instruction::TableInit {
+ elem_index: segment as u32,
+ table,
+ });
+ Ok(())
+}
+
+#[inline]
+fn elem_drop_valid(module: &Module, _builder: &mut CodeBuilder) -> bool {
+ module.config.bulk_memory_enabled && module.elems.len() > 0
+}
+
+fn elem_drop(
+ u: &mut Unstructured,
+ module: &Module,
+ _builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let segment = u.int_in_range(0..=module.elems.len() - 1)? as u32;
+ instructions.push(Instruction::ElemDrop(segment));
+ Ok(())
+}
+
+#[inline]
+fn struct_new_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && module
+ .struct_types
+ .iter()
+ .copied()
+ .any(|i| builder.field_types_on_stack(module, &module.ty(i).unwrap_struct().fields))
+}
+
+fn struct_new(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = module
+ .struct_types
+ .iter()
+ .filter(|i| builder.field_types_on_stack(module, &module.ty(**i).unwrap_struct().fields))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let ty = module
+ .struct_types
+ .iter()
+ .copied()
+ .filter(|i| builder.field_types_on_stack(module, &module.ty(*i).unwrap_struct().fields))
+ .nth(i)
+ .unwrap();
+
+ for _ in module.ty(ty).unwrap_struct().fields.iter() {
+ builder.pop_operand();
+ }
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable: false,
+ heap_type: HeapType::Concrete(ty),
+ })));
+
+ instructions.push(Instruction::StructNew(ty));
+ Ok(())
+}
+
+#[inline]
+fn struct_new_default_valid(module: &Module, _builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && module.struct_types.iter().copied().any(|i| {
+ module
+ .ty(i)
+ .unwrap_struct()
+ .fields
+ .iter()
+ .all(|f| f.element_type.is_defaultable())
+ })
+}
+
+fn struct_new_default(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = module
+ .struct_types
+ .iter()
+ .filter(|i| {
+ module
+ .ty(**i)
+ .unwrap_struct()
+ .fields
+ .iter()
+ .all(|f| f.element_type.is_defaultable())
+ })
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let ty = module
+ .struct_types
+ .iter()
+ .copied()
+ .filter(|i| {
+ module
+ .ty(*i)
+ .unwrap_struct()
+ .fields
+ .iter()
+ .all(|f| f.element_type.is_defaultable())
+ })
+ .nth(i)
+ .unwrap();
+
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable: false,
+ heap_type: HeapType::Concrete(ty),
+ })));
+
+ instructions.push(Instruction::StructNewDefault(ty));
+ Ok(())
+}
+
+#[inline]
+fn struct_get_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && !module.config.disallow_traps
+ && builder.non_empty_struct_ref_on_stack(module, !module.config.disallow_traps)
+}
+
+fn struct_get(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let (_, struct_type_index) = builder.pop_concrete_ref_type();
+ let struct_ty = module.ty(struct_type_index).unwrap_struct();
+ let num_fields = u32::try_from(struct_ty.fields.len()).unwrap();
+ debug_assert!(num_fields > 0);
+ let field_index = u.int_in_range(0..=num_fields - 1)?;
+ let (val_ty, ext) = match struct_ty.fields[usize::try_from(field_index).unwrap()].element_type {
+ StorageType::I8 | StorageType::I16 => (ValType::I32, Some(u.arbitrary()?)),
+ StorageType::Val(v) => (v, None),
+ };
+ builder.push_operand(Some(val_ty));
+ instructions.push(match ext {
+ None => Instruction::StructGet {
+ struct_type_index,
+ field_index,
+ },
+ Some(true) => Instruction::StructGetS {
+ struct_type_index,
+ field_index,
+ },
+ Some(false) => Instruction::StructGetU {
+ struct_type_index,
+ field_index,
+ },
+ });
+ Ok(())
+}
+
+#[inline]
+fn struct_set_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.gc_enabled {
+ return false;
+ }
+ match builder.concrete_struct_ref_type_on_stack_at(module, 1) {
+ None => return false,
+ Some((true, _, _)) if module.config.disallow_traps => return false,
+ Some((_, _, ty)) => ty
+ .fields
+ .iter()
+ .any(|f| f.mutable && builder.type_on_stack(module, f.element_type.unpack())),
+ }
+}
+
+fn struct_set(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let val_ty = builder.pop_operand();
+ let (_, struct_type_index) = builder.pop_concrete_ref_type();
+ let struct_ty = module.ty(struct_type_index).unwrap_struct();
+
+ let valid_field = |f: &FieldType| -> bool {
+ match val_ty {
+ None => f.mutable,
+ Some(val_ty) => {
+ f.mutable && module.val_type_is_sub_type(val_ty, f.element_type.unpack())
+ }
+ }
+ };
+
+ let n = struct_ty.fields.iter().filter(|f| valid_field(f)).count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let (field_index, _) = struct_ty
+ .fields
+ .iter()
+ .enumerate()
+ .filter(|(_, f)| valid_field(f))
+ .nth(i)
+ .unwrap();
+ let field_index = u32::try_from(field_index).unwrap();
+
+ instructions.push(Instruction::StructSet {
+ struct_type_index,
+ field_index,
+ });
+ Ok(())
+}
+
+#[inline]
+fn array_new_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && builder.type_on_stack(module, ValType::I32)
+ && module
+ .array_types
+ .iter()
+ .any(|i| builder.field_type_on_stack_at(module, 1, module.ty(*i).unwrap_array().0))
+}
+
+fn array_new(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = module
+ .array_types
+ .iter()
+ .filter(|i| builder.field_type_on_stack_at(module, 1, module.ty(**i).unwrap_array().0))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let ty = module
+ .array_types
+ .iter()
+ .copied()
+ .filter(|i| builder.field_type_on_stack_at(module, 1, module.ty(*i).unwrap_array().0))
+ .nth(i)
+ .unwrap();
+
+ builder.pop_operand();
+ builder.pop_operand();
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable: false,
+ heap_type: HeapType::Concrete(ty),
+ })));
+
+ instructions.push(Instruction::ArrayNew(ty));
+ Ok(())
+}
+
+#[inline]
+fn array_new_fixed_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && module
+ .array_types
+ .iter()
+ .any(|i| builder.field_type_on_stack(module, module.ty(*i).unwrap_array().0))
+}
+
+fn array_new_fixed(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = module
+ .array_types
+ .iter()
+ .filter(|i| builder.field_type_on_stack(module, module.ty(**i).unwrap_array().0))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let array_type_index = module
+ .array_types
+ .iter()
+ .copied()
+ .filter(|i| builder.field_type_on_stack(module, module.ty(*i).unwrap_array().0))
+ .nth(i)
+ .unwrap();
+ let elem_ty = module
+ .ty(array_type_index)
+ .unwrap_array()
+ .0
+ .element_type
+ .unpack();
+
+ let m = (0..builder.operands().len())
+ .take_while(|i| builder.type_on_stack_at(module, *i, elem_ty))
+ .count();
+ debug_assert!(m > 0);
+ let array_size = u.int_in_range(0..=m - 1)?;
+ let array_size = u32::try_from(array_size).unwrap();
+
+ for _ in 0..array_size {
+ builder.pop_operand();
+ }
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable: false,
+ heap_type: HeapType::Concrete(array_type_index),
+ })));
+
+ instructions.push(Instruction::ArrayNewFixed {
+ array_type_index,
+ array_size,
+ });
+ Ok(())
+}
+
+#[inline]
+fn array_new_default_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && builder.type_on_stack(module, ValType::I32)
+ && module
+ .array_types
+ .iter()
+ .any(|i| module.ty(*i).unwrap_array().0.element_type.is_defaultable())
+}
+
+fn array_new_default(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = module
+ .array_types
+ .iter()
+ .filter(|i| {
+ module
+ .ty(**i)
+ .unwrap_array()
+ .0
+ .element_type
+ .is_defaultable()
+ })
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let array_type_index = module
+ .array_types
+ .iter()
+ .copied()
+ .filter(|i| module.ty(*i).unwrap_array().0.element_type.is_defaultable())
+ .nth(i)
+ .unwrap();
+
+ builder.pop_operand();
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable: false,
+ heap_type: HeapType::Concrete(array_type_index),
+ })));
+ instructions.push(Instruction::ArrayNewDefault(array_type_index));
+ Ok(())
+}
+
+#[inline]
+fn array_new_data_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && module.config.bulk_memory_enabled // Requires data count section
+ && !module.config.disallow_traps
+ && !module.data.is_empty()
+ && builder.types_on_stack(module, &[ValType::I32, ValType::I32])
+ && module.array_types.iter().any(|i| {
+ let ty = module.ty(*i).unwrap_array().0.element_type.unpack();
+ ty.is_numeric() | ty.is_vector()
+ })
+}
+
+fn array_new_data(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = module
+ .array_types
+ .iter()
+ .filter(|i| {
+ let ty = module.ty(**i).unwrap_array().0.element_type.unpack();
+ ty.is_numeric() | ty.is_vector()
+ })
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let array_type_index = module
+ .array_types
+ .iter()
+ .copied()
+ .filter(|i| {
+ let ty = module.ty(*i).unwrap_array().0.element_type.unpack();
+ ty.is_numeric() | ty.is_vector()
+ })
+ .nth(i)
+ .unwrap();
+
+ let m = module.data.len();
+ debug_assert!(m > 0);
+ let array_data_index = u.int_in_range(0..=m - 1)?;
+ let array_data_index = u32::try_from(array_data_index).unwrap();
+
+ builder.pop_operand();
+ builder.pop_operand();
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable: false,
+ heap_type: HeapType::Concrete(array_type_index),
+ })));
+ instructions.push(Instruction::ArrayNewData {
+ array_type_index,
+ array_data_index,
+ });
+ Ok(())
+}
+
+fn module_has_elem_segment_of_array_type(module: &Module, ty: &ArrayType) -> bool {
+ module
+ .elems
+ .iter()
+ .any(|elem| module.val_type_is_sub_type(ValType::Ref(elem.ty), ty.0.element_type.unpack()))
+}
+
+#[inline]
+fn array_new_elem_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && !module.config.disallow_traps
+ && builder.types_on_stack(module, &[ValType::I32, ValType::I32])
+ && module
+ .array_types
+ .iter()
+ .any(|i| module_has_elem_segment_of_array_type(module, module.ty(*i).unwrap_array()))
+}
+
+fn array_new_elem(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let n = module
+ .array_types
+ .iter()
+ .filter(|i| module_has_elem_segment_of_array_type(module, module.ty(**i).unwrap_array()))
+ .count();
+ debug_assert!(n > 0);
+ let i = u.int_in_range(0..=n - 1)?;
+ let array_type_index = module
+ .array_types
+ .iter()
+ .copied()
+ .filter(|i| module_has_elem_segment_of_array_type(module, module.ty(*i).unwrap_array()))
+ .nth(i)
+ .unwrap();
+ let elem_ty = module
+ .ty(array_type_index)
+ .unwrap_array()
+ .0
+ .element_type
+ .unpack();
+
+ let m = module
+ .elems
+ .iter()
+ .filter(|elem| module.val_type_is_sub_type(ValType::Ref(elem.ty), elem_ty))
+ .count();
+ debug_assert!(m > 0);
+ let j = u.int_in_range(0..=m - 1)?;
+ let (array_elem_index, _) = module
+ .elems
+ .iter()
+ .enumerate()
+ .filter(|(_, elem)| module.val_type_is_sub_type(ValType::Ref(elem.ty), elem_ty))
+ .nth(j)
+ .unwrap();
+ let array_elem_index = u32::try_from(array_elem_index).unwrap();
+
+ builder.pop_operand();
+ builder.pop_operand();
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable: false,
+ heap_type: HeapType::Concrete(array_type_index),
+ })));
+
+ instructions.push(Instruction::ArrayNewElem {
+ array_type_index,
+ array_elem_index,
+ });
+ Ok(())
+}
+
+#[inline]
+fn array_get_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && !module.config.disallow_traps // TODO: add support for disallowing traps
+ && builder.type_on_stack(module, ValType::I32)
+ && builder.concrete_array_ref_type_on_stack_at(module, 1).is_some()
+}
+
+fn array_get(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ let (_, array_type_index) = builder.pop_concrete_ref_type();
+ let elem_ty = module.ty(array_type_index).unwrap_array().0.element_type;
+ builder.push_operand(Some(elem_ty.unpack()));
+ instructions.push(match elem_ty {
+ StorageType::I8 | StorageType::I16 => {
+ if u.arbitrary()? {
+ Instruction::ArrayGetS(array_type_index)
+ } else {
+ Instruction::ArrayGetU(array_type_index)
+ }
+ }
+ StorageType::Val(_) => Instruction::ArrayGet(array_type_index),
+ });
+ Ok(())
+}
+
+#[inline]
+fn array_set_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.gc_enabled
+ // TODO: implement disallowing traps.
+ || module.config.disallow_traps
+ || !builder.type_on_stack_at(module, 1, ValType::I32)
+ {
+ return false;
+ }
+ match builder.concrete_array_ref_type_on_stack_at(module, 2) {
+ None => false,
+ Some((_nullable, _idx, array_ty)) => {
+ array_ty.0.mutable && builder.field_type_on_stack(module, array_ty.0)
+ }
+ }
+}
+
+fn array_set(
+ _u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ builder.pop_operand();
+ let (_, ty) = builder.pop_concrete_ref_type();
+ instructions.push(Instruction::ArraySet(ty));
+ Ok(())
+}
+
+#[inline]
+fn array_len_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && builder.type_on_stack(
+ module,
+ ValType::Ref(RefType {
+ nullable: true,
+ heap_type: HeapType::Array,
+ }),
+ )
+}
+
+fn array_len(
+ _u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ builder.push_operand(Some(ValType::I32));
+ instructions.push(Instruction::ArrayLen);
+ Ok(())
+}
+
+#[inline]
+fn array_fill_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.gc_enabled
+ // TODO: add support for disallowing traps
+ || module.config.disallow_traps
+ || !builder.type_on_stack_at(module, 0, ValType::I32)
+ || !builder.type_on_stack_at(module, 2, ValType::I32)
+ {
+ return false;
+ }
+ match builder.concrete_array_ref_type_on_stack_at(module, 3) {
+ None => return false,
+ Some((_, _, array_ty)) => {
+ array_ty.0.mutable && builder.field_type_on_stack_at(module, 1, array_ty.0)
+ }
+ }
+}
+
+fn array_fill(
+ _u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ builder.pop_operand();
+ builder.pop_operand();
+ let (_, ty) = builder.pop_concrete_ref_type();
+ instructions.push(Instruction::ArrayFill(ty));
+ Ok(())
+}
+
+#[inline]
+fn array_copy_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.gc_enabled
+ // TODO: add support for disallowing traps
+ || module.config.disallow_traps
+ || !builder.type_on_stack_at(module, 0, ValType::I32)
+ || !builder.type_on_stack_at(module, 1, ValType::I32)
+ || !builder.type_on_stack_at(module, 3, ValType::I32)
+ {
+ return false;
+ }
+ let x = match builder.concrete_array_ref_type_on_stack_at(module, 4) {
+ None => return false,
+ Some((_, _, x)) => x,
+ };
+ if !x.0.mutable {
+ return false;
+ }
+ let y = match builder.concrete_array_ref_type_on_stack_at(module, 2) {
+ None => return false,
+ Some((_, _, y)) => y,
+ };
+ match (x.0.element_type, y.0.element_type) {
+ (StorageType::I8, StorageType::I8) => true,
+ (StorageType::I8, _) => false,
+ (StorageType::I16, StorageType::I16) => true,
+ (StorageType::I16, _) => false,
+ (StorageType::Val(x), StorageType::Val(y)) => module.val_type_is_sub_type(y, x),
+ (StorageType::Val(_), _) => false,
+ }
+}
+
+fn array_copy(
+ _u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ builder.pop_operand();
+ let (_, array_type_index_src) = builder.pop_concrete_ref_type();
+ builder.pop_operand();
+ let (_, array_type_index_dst) = builder.pop_concrete_ref_type();
+ instructions.push(Instruction::ArrayCopy {
+ array_type_index_dst,
+ array_type_index_src,
+ });
+ Ok(())
+}
+
+#[inline]
+fn array_init_data_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.gc_enabled
+ || !module.config.bulk_memory_enabled // Requires data count section
+ || module.config.disallow_traps
+ || module.data.is_empty()
+ || !builder.types_on_stack(module, &[ValType::I32, ValType::I32, ValType::I32])
+ {
+ return false;
+ }
+ match builder.concrete_array_ref_type_on_stack_at(module, 3) {
+ None => return false,
+ Some((_, _, ty)) => {
+ let elem_ty = ty.0.element_type.unpack();
+ ty.0.mutable && (elem_ty.is_numeric() || elem_ty.is_vector())
+ }
+ }
+}
+
+fn array_init_data(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ builder.pop_operand();
+ builder.pop_operand();
+ let (_, array_type_index) = builder.pop_concrete_ref_type();
+
+ let n = module.data.len();
+ debug_assert!(n > 0);
+ let array_data_index = u.int_in_range(0..=n - 1)?;
+ let array_data_index = u32::try_from(array_data_index).unwrap();
+
+ instructions.push(Instruction::ArrayInitData {
+ array_type_index,
+ array_data_index,
+ });
+ Ok(())
+}
+
+#[inline]
+fn array_init_elem_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ if !module.config.gc_enabled
+ || module.config.disallow_traps
+ || !builder.types_on_stack(module, &[ValType::I32, ValType::I32, ValType::I32])
+ {
+ return false;
+ }
+ match builder.concrete_array_ref_type_on_stack_at(module, 3) {
+ None => return false,
+ Some((_, _, array_ty)) => {
+ array_ty.0.mutable && module_has_elem_segment_of_array_type(module, &array_ty)
+ }
+ }
+}
+
+fn array_init_elem(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ builder.pop_operand();
+ builder.pop_operand();
+ let (_, array_type_index) = builder.pop_concrete_ref_type();
+
+ let elem_ty = module
+ .ty(array_type_index)
+ .unwrap_array()
+ .0
+ .element_type
+ .unpack();
+
+ let n = module
+ .elems
+ .iter()
+ .filter(|elem| module.val_type_is_sub_type(ValType::Ref(elem.ty), elem_ty))
+ .count();
+ debug_assert!(n > 0);
+ let j = u.int_in_range(0..=n - 1)?;
+ let (array_elem_index, _) = module
+ .elems
+ .iter()
+ .enumerate()
+ .filter(|(_, elem)| module.val_type_is_sub_type(ValType::Ref(elem.ty), elem_ty))
+ .nth(j)
+ .unwrap();
+ let array_elem_index = u32::try_from(array_elem_index).unwrap();
+
+ instructions.push(Instruction::ArrayInitElem {
+ array_type_index,
+ array_elem_index,
+ });
+ Ok(())
+}
+
+#[inline]
+fn ref_i31_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled && builder.type_on_stack(module, ValType::I32)
+}
+
+fn ref_i31(
+ _u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable: false,
+ heap_type: HeapType::I31,
+ })));
+ instructions.push(Instruction::RefI31);
+ Ok(())
+}
+
+#[inline]
+fn i31_get_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && builder.type_on_stack(
+ module,
+ ValType::Ref(RefType {
+ nullable: true,
+ heap_type: HeapType::I31,
+ }),
+ )
+}
+
+fn i31_get(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operand();
+ builder.push_operand(Some(ValType::I32));
+ instructions.push(if u.arbitrary()? {
+ Instruction::I31GetS
+ } else {
+ Instruction::I31GetU
+ });
+ Ok(())
+}
+
+#[inline]
+fn any_convert_extern_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && builder.type_on_stack(
+ module,
+ ValType::Ref(RefType {
+ nullable: true,
+ heap_type: HeapType::Extern,
+ }),
+ )
+}
+
+fn any_convert_extern(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let nullable = match builder.pop_ref_type() {
+ None => u.arbitrary()?,
+ Some(r) => r.nullable,
+ };
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable,
+ heap_type: HeapType::Any,
+ })));
+ instructions.push(Instruction::AnyConvertExtern);
+ Ok(())
+}
+
+#[inline]
+fn extern_convert_any_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ module.config.gc_enabled
+ && builder.type_on_stack(
+ module,
+ ValType::Ref(RefType {
+ nullable: true,
+ heap_type: HeapType::Any,
+ }),
+ )
+}
+
+fn extern_convert_any(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ let nullable = match builder.pop_ref_type() {
+ None => u.arbitrary()?,
+ Some(r) => r.nullable,
+ };
+ builder.push_operand(Some(ValType::Ref(RefType {
+ nullable,
+ heap_type: HeapType::Extern,
+ })));
+ instructions.push(Instruction::ExternConvertAny);
+ Ok(())
+}
+
+fn table_index(ty: RefType, u: &mut Unstructured, module: &Module) -> Result<u32> {
+ let tables = module
+ .tables
+ .iter()
+ .enumerate()
+ .filter(|(_, t)| module.ref_type_is_sub_type(ty, t.element_type))
+ .map(|t| t.0 as u32)
+ .collect::<Vec<_>>();
+ Ok(*u.choose(&tables)?)
+}
+
+fn lane_index(u: &mut Unstructured, number_of_lanes: u8) -> Result<u8> {
+ u.int_in_range(0..=(number_of_lanes - 1))
+}
+
+#[inline]
+fn simd_v128_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128])
+}
+
+#[inline]
+fn simd_v128_on_stack_relaxed(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.relaxed_simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128])
+}
+
+#[inline]
+fn simd_v128_v128_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128, ValType::V128])
+}
+
+#[inline]
+fn simd_v128_v128_on_stack_relaxed(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.relaxed_simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128, ValType::V128])
+}
+
+#[inline]
+fn simd_v128_v128_v128_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128, ValType::V128, ValType::V128])
+}
+
+#[inline]
+fn simd_v128_v128_v128_on_stack_relaxed(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.relaxed_simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128, ValType::V128, ValType::V128])
+}
+
+#[inline]
+fn simd_v128_i32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128, ValType::I32])
+}
+
+#[inline]
+fn simd_v128_i64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128, ValType::I64])
+}
+
+#[inline]
+fn simd_v128_f32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128, ValType::F32])
+}
+
+#[inline]
+fn simd_v128_f64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.types_on_stack(module, &[ValType::V128, ValType::F64])
+}
+
+#[inline]
+fn simd_i32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.type_on_stack(module, ValType::I32)
+}
+
+#[inline]
+fn simd_i64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.type_on_stack(module, ValType::I64)
+}
+
+#[inline]
+fn simd_f32_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.type_on_stack(module, ValType::F32)
+}
+
+#[inline]
+fn simd_f64_on_stack(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && builder.type_on_stack(module, ValType::F64)
+}
+
+#[inline]
+fn simd_have_memory_and_offset(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && have_memory_and_offset(module, builder)
+}
+
+#[inline]
+fn simd_have_memory_and_offset_and_v128(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && store_valid(module, builder, || ValType::V128)
+}
+
+#[inline]
+fn simd_load_lane_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ // The SIMD non-trapping case is not yet implemented.
+ !module.config.disallow_traps && simd_have_memory_and_offset_and_v128(module, builder)
+}
+
+#[inline]
+fn simd_v128_store_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ !module.config.disallow_traps
+ && module.config.simd_enabled
+ && store_valid(module, builder, || ValType::V128)
+}
+
+#[inline]
+fn simd_store_lane_valid(module: &Module, builder: &mut CodeBuilder) -> bool {
+ // The SIMD non-trapping case is not yet implemented.
+ !module.config.disallow_traps && simd_v128_store_valid(module, builder)
+}
+
+#[inline]
+fn simd_enabled(module: &Module, _: &mut CodeBuilder) -> bool {
+ module.config.simd_enabled
+}
+
+macro_rules! simd_load {
+ ($instruction:ident, $generator_fn_name:ident, $alignments:expr) => {
+ fn $generator_fn_name(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ let memarg = mem_arg(u, module, builder, $alignments)?;
+ builder.push_operands(&[ValType::V128]);
+ if module.config.disallow_traps {
+ no_traps::load(
+ Instruction::$instruction(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::$instruction(memarg));
+ }
+ Ok(())
+ }
+ };
+}
+
+simd_load!(V128Load, v128_load, &[0, 1, 2, 3, 4]);
+simd_load!(V128Load8x8S, v128_load8x8s, &[0, 1, 2, 3]);
+simd_load!(V128Load8x8U, v128_load8x8u, &[0, 1, 2, 3]);
+simd_load!(V128Load16x4S, v128_load16x4s, &[0, 1, 2, 3]);
+simd_load!(V128Load16x4U, v128_load16x4u, &[0, 1, 2, 3]);
+simd_load!(V128Load32x2S, v128_load32x2s, &[0, 1, 2, 3]);
+simd_load!(V128Load32x2U, v128_load32x2u, &[0, 1, 2, 3]);
+simd_load!(V128Load8Splat, v128_load8_splat, &[0]);
+simd_load!(V128Load16Splat, v128_load16_splat, &[0, 1]);
+simd_load!(V128Load32Splat, v128_load32_splat, &[0, 1, 2]);
+simd_load!(V128Load64Splat, v128_load64_splat, &[0, 1, 2, 3]);
+simd_load!(V128Load32Zero, v128_load32_zero, &[0, 1, 2]);
+simd_load!(V128Load64Zero, v128_load64_zero, &[0, 1, 2, 3]);
+
+fn v128_store(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::V128]);
+ let memarg = mem_arg(u, module, builder, &[0, 1, 2, 3, 4])?;
+ if module.config.disallow_traps {
+ no_traps::store(
+ Instruction::V128Store(memarg),
+ module,
+ builder,
+ instructions,
+ );
+ } else {
+ instructions.push(Instruction::V128Store(memarg));
+ }
+ Ok(())
+}
+
+macro_rules! simd_load_lane {
+ ($instruction:ident, $generator_fn_name:ident, $alignments:expr, $number_of_lanes:expr) => {
+ fn $generator_fn_name(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(module, &[ValType::V128]);
+ let memarg = mem_arg(u, module, builder, $alignments)?;
+ builder.push_operands(&[ValType::V128]);
+ instructions.push(Instruction::$instruction {
+ memarg,
+ lane: lane_index(u, $number_of_lanes)?,
+ });
+ Ok(())
+ }
+ };
+}
+
+simd_load_lane!(V128Load8Lane, v128_load8_lane, &[0], 16);
+simd_load_lane!(V128Load16Lane, v128_load16_lane, &[0, 1], 8);
+simd_load_lane!(V128Load32Lane, v128_load32_lane, &[0, 1, 2], 4);
+simd_load_lane!(V128Load64Lane, v128_load64_lane, &[0, 1, 2, 3], 2);
+
+macro_rules! simd_store_lane {
+ ($instruction:ident, $generator_fn_name:ident, $alignments:expr, $number_of_lanes:expr) => {
+ fn $generator_fn_name(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(module, &[ValType::V128]);
+ let memarg = mem_arg(u, module, builder, $alignments)?;
+ instructions.push(Instruction::$instruction {
+ memarg,
+ lane: lane_index(u, $number_of_lanes)?,
+ });
+ Ok(())
+ }
+ };
+}
+
+simd_store_lane!(V128Store8Lane, v128_store8_lane, &[0], 16);
+simd_store_lane!(V128Store16Lane, v128_store16_lane, &[0, 1], 8);
+simd_store_lane!(V128Store32Lane, v128_store32_lane, &[0, 1, 2], 4);
+simd_store_lane!(V128Store64Lane, v128_store64_lane, &[0, 1, 2, 3], 2);
+
+fn v128_const(
+ u: &mut Unstructured,
+ _module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.push_operands(&[ValType::V128]);
+ let c = i128::from_le_bytes(u.arbitrary()?);
+ instructions.push(Instruction::V128Const(c));
+ Ok(())
+}
+
+fn i8x16_shuffle(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+) -> Result<()> {
+ builder.pop_operands(module, &[ValType::V128, ValType::V128]);
+ builder.push_operands(&[ValType::V128]);
+ let mut lanes = [0; 16];
+ for i in 0..16 {
+ lanes[i] = u.int_in_range(0..=31)?;
+ }
+ instructions.push(Instruction::I8x16Shuffle(lanes));
+ Ok(())
+}
+
+macro_rules! simd_lane_access {
+ ($instruction:ident, $generator_fn_name:ident, $in_types:expr => $out_types:expr, $number_of_lanes:expr) => {
+ fn $generator_fn_name(
+ u: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(module, $in_types);
+ builder.push_operands($out_types);
+ instructions.push(Instruction::$instruction(lane_index(u, $number_of_lanes)?));
+ Ok(())
+ }
+ };
+}
+
+simd_lane_access!(I8x16ExtractLaneS, i8x16_extract_lane_s, &[ValType::V128] => &[ValType::I32], 16);
+simd_lane_access!(I8x16ExtractLaneU, i8x16_extract_lane_u, &[ValType::V128] => &[ValType::I32], 16);
+simd_lane_access!(I8x16ReplaceLane, i8x16_replace_lane, &[ValType::V128, ValType::I32] => &[ValType::V128], 16);
+simd_lane_access!(I16x8ExtractLaneS, i16x8_extract_lane_s, &[ValType::V128] => &[ValType::I32], 8);
+simd_lane_access!(I16x8ExtractLaneU, i16x8_extract_lane_u, &[ValType::V128] => &[ValType::I32], 8);
+simd_lane_access!(I16x8ReplaceLane, i16x8_replace_lane, &[ValType::V128, ValType::I32] => &[ValType::V128], 8);
+simd_lane_access!(I32x4ExtractLane, i32x4_extract_lane, &[ValType::V128] => &[ValType::I32], 4);
+simd_lane_access!(I32x4ReplaceLane, i32x4_replace_lane, &[ValType::V128, ValType::I32] => &[ValType::V128], 4);
+simd_lane_access!(I64x2ExtractLane, i64x2_extract_lane, &[ValType::V128] => &[ValType::I64], 2);
+simd_lane_access!(I64x2ReplaceLane, i64x2_replace_lane, &[ValType::V128, ValType::I64] => &[ValType::V128], 2);
+simd_lane_access!(F32x4ExtractLane, f32x4_extract_lane, &[ValType::V128] => &[ValType::F32], 4);
+simd_lane_access!(F32x4ReplaceLane, f32x4_replace_lane, &[ValType::V128, ValType::F32] => &[ValType::V128], 4);
+simd_lane_access!(F64x2ExtractLane, f64x2_extract_lane, &[ValType::V128] => &[ValType::F64], 2);
+simd_lane_access!(F64x2ReplaceLane, f64x2_replace_lane, &[ValType::V128, ValType::F64] => &[ValType::V128], 2);
+
+macro_rules! simd_binop {
+ ($instruction:ident, $generator_fn_name:ident) => {
+ fn $generator_fn_name(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(module, &[ValType::V128, ValType::V128]);
+ builder.push_operands(&[ValType::V128]);
+ instructions.push(Instruction::$instruction);
+ Ok(())
+ }
+ };
+}
+
+macro_rules! simd_unop {
+ ($instruction:ident, $generator_fn_name:ident) => {
+ simd_unop!($instruction, $generator_fn_name, V128 -> V128);
+ };
+
+ ($instruction:ident, $generator_fn_name:ident, $in_type:ident -> $out_type:ident) => {
+ fn $generator_fn_name(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>, ) -> Result<()> {
+ builder.pop_operands(module, &[ValType::$in_type]);
+ builder.push_operands(&[ValType::$out_type]);
+ instructions.push(Instruction::$instruction);
+ Ok(())
+ }
+ };
+}
+
+macro_rules! simd_ternop {
+ ($instruction:ident, $generator_fn_name:ident) => {
+ fn $generator_fn_name(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(module, &[ValType::V128, ValType::V128, ValType::V128]);
+ builder.push_operands(&[ValType::V128]);
+ instructions.push(Instruction::$instruction);
+ Ok(())
+ }
+ };
+}
+
+macro_rules! simd_shift {
+ ($instruction:ident, $generator_fn_name:ident) => {
+ fn $generator_fn_name(
+ _: &mut Unstructured,
+ module: &Module,
+ builder: &mut CodeBuilder,
+
+ instructions: &mut Vec<Instruction>,
+ ) -> Result<()> {
+ builder.pop_operands(module, &[ValType::V128, ValType::I32]);
+ builder.push_operands(&[ValType::V128]);
+ instructions.push(Instruction::$instruction);
+ Ok(())
+ }
+ };
+}
+
+simd_unop!(I8x16Splat, i8x16_splat, I32 -> V128);
+simd_unop!(I16x8Splat, i16x8_splat, I32 -> V128);
+simd_unop!(I32x4Splat, i32x4_splat, I32 -> V128);
+simd_unop!(I64x2Splat, i64x2_splat, I64 -> V128);
+simd_unop!(F32x4Splat, f32x4_splat, F32 -> V128);
+simd_unop!(F64x2Splat, f64x2_splat, F64 -> V128);
+simd_binop!(I8x16Swizzle, i8x16_swizzle);
+simd_binop!(I8x16Eq, i8x16_eq);
+simd_binop!(I8x16Ne, i8x16_ne);
+simd_binop!(I8x16LtS, i8x16_lt_s);
+simd_binop!(I8x16LtU, i8x16_lt_u);
+simd_binop!(I8x16GtS, i8x16_gt_s);
+simd_binop!(I8x16GtU, i8x16_gt_u);
+simd_binop!(I8x16LeS, i8x16_le_s);
+simd_binop!(I8x16LeU, i8x16_le_u);
+simd_binop!(I8x16GeS, i8x16_ge_s);
+simd_binop!(I8x16GeU, i8x16_ge_u);
+simd_binop!(I16x8Eq, i16x8_eq);
+simd_binop!(I16x8Ne, i16x8_ne);
+simd_binop!(I16x8LtS, i16x8_lt_s);
+simd_binop!(I16x8LtU, i16x8_lt_u);
+simd_binop!(I16x8GtS, i16x8_gt_s);
+simd_binop!(I16x8GtU, i16x8_gt_u);
+simd_binop!(I16x8LeS, i16x8_le_s);
+simd_binop!(I16x8LeU, i16x8_le_u);
+simd_binop!(I16x8GeS, i16x8_ge_s);
+simd_binop!(I16x8GeU, i16x8_ge_u);
+simd_binop!(I32x4Eq, i32x4_eq);
+simd_binop!(I32x4Ne, i32x4_ne);
+simd_binop!(I32x4LtS, i32x4_lt_s);
+simd_binop!(I32x4LtU, i32x4_lt_u);
+simd_binop!(I32x4GtS, i32x4_gt_s);
+simd_binop!(I32x4GtU, i32x4_gt_u);
+simd_binop!(I32x4LeS, i32x4_le_s);
+simd_binop!(I32x4LeU, i32x4_le_u);
+simd_binop!(I32x4GeS, i32x4_ge_s);
+simd_binop!(I32x4GeU, i32x4_ge_u);
+simd_binop!(I64x2Eq, i64x2_eq);
+simd_binop!(I64x2Ne, i64x2_ne);
+simd_binop!(I64x2LtS, i64x2_lt_s);
+simd_binop!(I64x2GtS, i64x2_gt_s);
+simd_binop!(I64x2LeS, i64x2_le_s);
+simd_binop!(I64x2GeS, i64x2_ge_s);
+simd_binop!(F32x4Eq, f32x4_eq);
+simd_binop!(F32x4Ne, f32x4_ne);
+simd_binop!(F32x4Lt, f32x4_lt);
+simd_binop!(F32x4Gt, f32x4_gt);
+simd_binop!(F32x4Le, f32x4_le);
+simd_binop!(F32x4Ge, f32x4_ge);
+simd_binop!(F64x2Eq, f64x2_eq);
+simd_binop!(F64x2Ne, f64x2_ne);
+simd_binop!(F64x2Lt, f64x2_lt);
+simd_binop!(F64x2Gt, f64x2_gt);
+simd_binop!(F64x2Le, f64x2_le);
+simd_binop!(F64x2Ge, f64x2_ge);
+simd_unop!(V128Not, v128_not);
+simd_binop!(V128And, v128_and);
+simd_binop!(V128AndNot, v128_and_not);
+simd_binop!(V128Or, v128_or);
+simd_binop!(V128Xor, v128_xor);
+simd_unop!(V128AnyTrue, v128_any_true, V128 -> I32);
+simd_unop!(I8x16Abs, i8x16_abs);
+simd_unop!(I8x16Neg, i8x16_neg);
+simd_unop!(I8x16Popcnt, i8x16_popcnt);
+simd_unop!(I8x16AllTrue, i8x16_all_true, V128 -> I32);
+simd_unop!(I8x16Bitmask, i8x16_bitmask, V128 -> I32);
+simd_binop!(I8x16NarrowI16x8S, i8x16_narrow_i16x8s);
+simd_binop!(I8x16NarrowI16x8U, i8x16_narrow_i16x8u);
+simd_shift!(I8x16Shl, i8x16_shl);
+simd_shift!(I8x16ShrS, i8x16_shr_s);
+simd_shift!(I8x16ShrU, i8x16_shr_u);
+simd_binop!(I8x16Add, i8x16_add);
+simd_binop!(I8x16AddSatS, i8x16_add_sat_s);
+simd_binop!(I8x16AddSatU, i8x16_add_sat_u);
+simd_binop!(I8x16Sub, i8x16_sub);
+simd_binop!(I8x16SubSatS, i8x16_sub_sat_s);
+simd_binop!(I8x16SubSatU, i8x16_sub_sat_u);
+simd_binop!(I8x16MinS, i8x16_min_s);
+simd_binop!(I8x16MinU, i8x16_min_u);
+simd_binop!(I8x16MaxS, i8x16_max_s);
+simd_binop!(I8x16MaxU, i8x16_max_u);
+simd_binop!(I8x16AvgrU, i8x16_avgr_u);
+simd_unop!(I16x8ExtAddPairwiseI8x16S, i16x8_extadd_pairwise_i8x16s);
+simd_unop!(I16x8ExtAddPairwiseI8x16U, i16x8_extadd_pairwise_i8x16u);
+simd_unop!(I16x8Abs, i16x8_abs);
+simd_unop!(I16x8Neg, i16x8_neg);
+simd_binop!(I16x8Q15MulrSatS, i16x8q15_mulr_sat_s);
+simd_unop!(I16x8AllTrue, i16x8_all_true, V128 -> I32);
+simd_unop!(I16x8Bitmask, i16x8_bitmask, V128 -> I32);
+simd_binop!(I16x8NarrowI32x4S, i16x8_narrow_i32x4s);
+simd_binop!(I16x8NarrowI32x4U, i16x8_narrow_i32x4u);
+simd_unop!(I16x8ExtendLowI8x16S, i16x8_extend_low_i8x16s);
+simd_unop!(I16x8ExtendHighI8x16S, i16x8_extend_high_i8x16s);
+simd_unop!(I16x8ExtendLowI8x16U, i16x8_extend_low_i8x16u);
+simd_unop!(I16x8ExtendHighI8x16U, i16x8_extend_high_i8x16u);
+simd_shift!(I16x8Shl, i16x8_shl);
+simd_shift!(I16x8ShrS, i16x8_shr_s);
+simd_shift!(I16x8ShrU, i16x8_shr_u);
+simd_binop!(I16x8Add, i16x8_add);
+simd_binop!(I16x8AddSatS, i16x8_add_sat_s);
+simd_binop!(I16x8AddSatU, i16x8_add_sat_u);
+simd_binop!(I16x8Sub, i16x8_sub);
+simd_binop!(I16x8SubSatS, i16x8_sub_sat_s);
+simd_binop!(I16x8SubSatU, i16x8_sub_sat_u);
+simd_binop!(I16x8Mul, i16x8_mul);
+simd_binop!(I16x8MinS, i16x8_min_s);
+simd_binop!(I16x8MinU, i16x8_min_u);
+simd_binop!(I16x8MaxS, i16x8_max_s);
+simd_binop!(I16x8MaxU, i16x8_max_u);
+simd_binop!(I16x8AvgrU, i16x8_avgr_u);
+simd_binop!(I16x8ExtMulLowI8x16S, i16x8_extmul_low_i8x16s);
+simd_binop!(I16x8ExtMulHighI8x16S, i16x8_extmul_high_i8x16s);
+simd_binop!(I16x8ExtMulLowI8x16U, i16x8_extmul_low_i8x16u);
+simd_binop!(I16x8ExtMulHighI8x16U, i16x8_extmul_high_i8x16u);
+simd_unop!(I32x4ExtAddPairwiseI16x8S, i32x4_extadd_pairwise_i16x8s);
+simd_unop!(I32x4ExtAddPairwiseI16x8U, i32x4_extadd_pairwise_i16x8u);
+simd_unop!(I32x4Abs, i32x4_abs);
+simd_unop!(I32x4Neg, i32x4_neg);
+simd_unop!(I32x4AllTrue, i32x4_all_true, V128 -> I32);
+simd_unop!(I32x4Bitmask, i32x4_bitmask, V128 -> I32);
+simd_unop!(I32x4ExtendLowI16x8S, i32x4_extend_low_i16x8s);
+simd_unop!(I32x4ExtendHighI16x8S, i32x4_extend_high_i16x8s);
+simd_unop!(I32x4ExtendLowI16x8U, i32x4_extend_low_i16x8u);
+simd_unop!(I32x4ExtendHighI16x8U, i32x4_extend_high_i16x8u);
+simd_shift!(I32x4Shl, i32x4_shl);
+simd_shift!(I32x4ShrS, i32x4_shr_s);
+simd_shift!(I32x4ShrU, i32x4_shr_u);
+simd_binop!(I32x4Add, i32x4_add);
+simd_binop!(I32x4Sub, i32x4_sub);
+simd_binop!(I32x4Mul, i32x4_mul);
+simd_binop!(I32x4MinS, i32x4_min_s);
+simd_binop!(I32x4MinU, i32x4_min_u);
+simd_binop!(I32x4MaxS, i32x4_max_s);
+simd_binop!(I32x4MaxU, i32x4_max_u);
+simd_binop!(I32x4DotI16x8S, i32x4_dot_i16x8s);
+simd_binop!(I32x4ExtMulLowI16x8S, i32x4_extmul_low_i16x8s);
+simd_binop!(I32x4ExtMulHighI16x8S, i32x4_extmul_high_i16x8s);
+simd_binop!(I32x4ExtMulLowI16x8U, i32x4_extmul_low_i16x8u);
+simd_binop!(I32x4ExtMulHighI16x8U, i32x4_extmul_high_i16x8u);
+simd_unop!(I64x2Abs, i64x2_abs);
+simd_unop!(I64x2Neg, i64x2_neg);
+simd_unop!(I64x2AllTrue, i64x2_all_true, V128 -> I32);
+simd_unop!(I64x2Bitmask, i64x2_bitmask, V128 -> I32);
+simd_unop!(I64x2ExtendLowI32x4S, i64x2_extend_low_i32x4s);
+simd_unop!(I64x2ExtendHighI32x4S, i64x2_extend_high_i32x4s);
+simd_unop!(I64x2ExtendLowI32x4U, i64x2_extend_low_i32x4u);
+simd_unop!(I64x2ExtendHighI32x4U, i64x2_extend_high_i32x4u);
+simd_shift!(I64x2Shl, i64x2_shl);
+simd_shift!(I64x2ShrS, i64x2_shr_s);
+simd_shift!(I64x2ShrU, i64x2_shr_u);
+simd_binop!(I64x2Add, i64x2_add);
+simd_binop!(I64x2Sub, i64x2_sub);
+simd_binop!(I64x2Mul, i64x2_mul);
+simd_binop!(I64x2ExtMulLowI32x4S, i64x2_extmul_low_i32x4s);
+simd_binop!(I64x2ExtMulHighI32x4S, i64x2_extmul_high_i32x4s);
+simd_binop!(I64x2ExtMulLowI32x4U, i64x2_extmul_low_i32x4u);
+simd_binop!(I64x2ExtMulHighI32x4U, i64x2_extmul_high_i32x4u);
+simd_unop!(F32x4Ceil, f32x4_ceil);
+simd_unop!(F32x4Floor, f32x4_floor);
+simd_unop!(F32x4Trunc, f32x4_trunc);
+simd_unop!(F32x4Nearest, f32x4_nearest);
+simd_unop!(F32x4Abs, f32x4_abs);
+simd_unop!(F32x4Neg, f32x4_neg);
+simd_unop!(F32x4Sqrt, f32x4_sqrt);
+simd_binop!(F32x4Add, f32x4_add);
+simd_binop!(F32x4Sub, f32x4_sub);
+simd_binop!(F32x4Mul, f32x4_mul);
+simd_binop!(F32x4Div, f32x4_div);
+simd_binop!(F32x4Min, f32x4_min);
+simd_binop!(F32x4Max, f32x4_max);
+simd_binop!(F32x4PMin, f32x4p_min);
+simd_binop!(F32x4PMax, f32x4p_max);
+simd_unop!(F64x2Ceil, f64x2_ceil);
+simd_unop!(F64x2Floor, f64x2_floor);
+simd_unop!(F64x2Trunc, f64x2_trunc);
+simd_unop!(F64x2Nearest, f64x2_nearest);
+simd_unop!(F64x2Abs, f64x2_abs);
+simd_unop!(F64x2Neg, f64x2_neg);
+simd_unop!(F64x2Sqrt, f64x2_sqrt);
+simd_binop!(F64x2Add, f64x2_add);
+simd_binop!(F64x2Sub, f64x2_sub);
+simd_binop!(F64x2Mul, f64x2_mul);
+simd_binop!(F64x2Div, f64x2_div);
+simd_binop!(F64x2Min, f64x2_min);
+simd_binop!(F64x2Max, f64x2_max);
+simd_binop!(F64x2PMin, f64x2p_min);
+simd_binop!(F64x2PMax, f64x2p_max);
+simd_unop!(I32x4TruncSatF32x4S, i32x4_trunc_sat_f32x4s);
+simd_unop!(I32x4TruncSatF32x4U, i32x4_trunc_sat_f32x4u);
+simd_unop!(F32x4ConvertI32x4S, f32x4_convert_i32x4s);
+simd_unop!(F32x4ConvertI32x4U, f32x4_convert_i32x4u);
+simd_unop!(I32x4TruncSatF64x2SZero, i32x4_trunc_sat_f64x2s_zero);
+simd_unop!(I32x4TruncSatF64x2UZero, i32x4_trunc_sat_f64x2u_zero);
+simd_unop!(F64x2ConvertLowI32x4S, f64x2_convert_low_i32x4s);
+simd_unop!(F64x2ConvertLowI32x4U, f64x2_convert_low_i32x4u);
+simd_unop!(F32x4DemoteF64x2Zero, f32x4_demote_f64x2_zero);
+simd_unop!(F64x2PromoteLowF32x4, f64x2_promote_low_f32x4);
+simd_ternop!(V128Bitselect, v128_bitselect);
+simd_binop!(I8x16RelaxedSwizzle, i8x16_relaxed_swizzle);
+simd_unop!(I32x4RelaxedTruncF32x4S, i32x4_relaxed_trunc_f32x4s);
+simd_unop!(I32x4RelaxedTruncF32x4U, i32x4_relaxed_trunc_f32x4u);
+simd_unop!(I32x4RelaxedTruncF64x2SZero, i32x4_relaxed_trunc_f64x2s_zero);
+simd_unop!(I32x4RelaxedTruncF64x2UZero, i32x4_relaxed_trunc_f64x2u_zero);
+simd_ternop!(F32x4RelaxedMadd, f32x4_relaxed_madd);
+simd_ternop!(F32x4RelaxedNmadd, f32x4_relaxed_nmadd);
+simd_ternop!(F64x2RelaxedMadd, f64x2_relaxed_madd);
+simd_ternop!(F64x2RelaxedNmadd, f64x2_relaxed_nmadd);
+simd_ternop!(I8x16RelaxedLaneselect, i8x16_relaxed_laneselect);
+simd_ternop!(I16x8RelaxedLaneselect, i16x8_relaxed_laneselect);
+simd_ternop!(I32x4RelaxedLaneselect, i32x4_relaxed_laneselect);
+simd_ternop!(I64x2RelaxedLaneselect, i64x2_relaxed_laneselect);
+simd_binop!(F32x4RelaxedMin, f32x4_relaxed_min);
+simd_binop!(F32x4RelaxedMax, f32x4_relaxed_max);
+simd_binop!(F64x2RelaxedMin, f64x2_relaxed_min);
+simd_binop!(F64x2RelaxedMax, f64x2_relaxed_max);
+simd_binop!(I16x8RelaxedQ15mulrS, i16x8_relaxed_q15mulr_s);
+simd_binop!(I16x8RelaxedDotI8x16I7x16S, i16x8_relaxed_dot_i8x16_i7x16_s);
+simd_ternop!(
+ I32x4RelaxedDotI8x16I7x16AddS,
+ i32x4_relaxed_dot_i8x16_i7x16_add_s
+);
diff --git a/third_party/rust/wasm-smith/src/core/code_builder/no_traps.rs b/third_party/rust/wasm-smith/src/core/code_builder/no_traps.rs
new file mode 100644
index 0000000000..a1232b6922
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core/code_builder/no_traps.rs
@@ -0,0 +1,644 @@
+use crate::core::*;
+use wasm_encoder::{BlockType, Instruction, ValType};
+
+use super::CodeBuilder;
+
+// For loads, we dynamically check whether the load will
+// trap, and if it will then we generate a dummy value to
+// use instead.
+pub(crate) fn load<'a>(
+ inst: Instruction<'a>,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ let memarg = get_memarg(&inst);
+ let memory = &module.memories[memarg.memory_index as usize];
+ let address_type = if memory.memory64 {
+ ValType::I64
+ } else {
+ ValType::I32
+ };
+ // Add a temporary local to hold this load's address.
+ let address_local = builder.alloc_local(address_type);
+
+ // Add a temporary local to hold the result of this load.
+ let load_type = type_of_memory_access(&inst);
+ let result_local = builder.alloc_local(load_type);
+
+ // [address:address_type]
+ insts.push(Instruction::LocalSet(address_local));
+ // []
+ insts.push(Instruction::Block(wasm_encoder::BlockType::Empty));
+ {
+ // []
+ insts.push(Instruction::Block(wasm_encoder::BlockType::Empty));
+ {
+ // []
+ insts.push(Instruction::MemorySize(memarg.memory_index));
+ // [mem_size_in_pages:address_type]
+ insts.push(int_const_inst(address_type, 65_536));
+ // [mem_size_in_pages:address_type wasm_page_size:address_type]
+ insts.push(int_mul_inst(address_type));
+ // [mem_size_in_bytes:address_type]
+ insts.push(int_const_inst(
+ address_type,
+ (memarg.offset + size_of_type_in_memory(load_type)) as i64,
+ ));
+ // [mem_size_in_bytes:address_type offset_and_size:address_type]
+ insts.push(Instruction::LocalGet(address_local));
+ // [mem_size_in_bytes:address_type offset_and_size:address_type address:address_type]
+ insts.push(int_add_inst(address_type));
+ // [mem_size_in_bytes:address_type highest_byte_accessed:address_type]
+ insts.push(int_le_u_inst(address_type));
+ // [load_will_trap:i32]
+ insts.push(Instruction::BrIf(0));
+ // []
+ insts.push(Instruction::LocalGet(address_local));
+ // [address:address_type]
+ insts.push(int_const_inst(address_type, 0));
+ // [address:address_type 0:address_type]
+ insts.push(int_le_s_inst(address_type));
+ // [load_will_trap:i32]
+ insts.push(Instruction::BrIf(0));
+ // []
+ insts.push(Instruction::LocalGet(address_local));
+ // [address:address_type]
+ insts.push(inst);
+ // [result:load_type]
+ insts.push(Instruction::LocalSet(result_local));
+ // []
+ insts.push(Instruction::Br(1));
+ // <unreachable>
+ }
+ // []
+ insts.push(Instruction::End);
+ // []
+ insts.push(dummy_value_inst(load_type));
+ // [dummy_value:load_type]
+ insts.push(Instruction::LocalSet(result_local));
+ // []
+ }
+ // []
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(result_local));
+ // [result:load_type]
+}
+
+// Stores are similar to loads: we check whether the store
+// will trap, and if it will then we just drop the value.
+pub(crate) fn store<'a>(
+ inst: Instruction<'a>,
+ module: &Module,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ let memarg = get_memarg(&inst);
+ let memory = &module.memories[memarg.memory_index as usize];
+ let address_type = if memory.memory64 {
+ ValType::I64
+ } else {
+ ValType::I32
+ };
+
+ // Add a temporary local to hold this store's address.
+ let address_local = builder.alloc_local(address_type);
+
+ // Add a temporary local to hold the value to store.
+ let store_type = type_of_memory_access(&inst);
+ let value_local = builder.alloc_local(store_type);
+
+ // [address:address_type value:store_type]
+ insts.push(Instruction::LocalSet(value_local));
+ // [address:address_type]
+ insts.push(Instruction::LocalSet(address_local));
+ // []
+ insts.push(Instruction::MemorySize(memarg.memory_index));
+ // [mem_size_in_pages:address_type]
+ insts.push(int_const_inst(address_type, 65_536));
+ // [mem_size_in_pages:address_type wasm_page_size:address_type]
+ insts.push(int_mul_inst(address_type));
+ // [mem_size_in_bytes:address_type]
+ insts.push(int_const_inst(
+ address_type,
+ (memarg.offset + size_of_type_in_memory(store_type)) as i64,
+ ));
+ // [mem_size_in_bytes:address_type offset_and_size:address_type]
+ insts.push(Instruction::LocalGet(address_local));
+ // [mem_size_in_bytes:address_type offset_and_size:address_type address:address_type]
+ insts.push(int_add_inst(address_type));
+ // [mem_size_in_bytes:address_type highest_byte_accessed:address_type]
+ insts.push(int_le_u_inst(address_type));
+ // [store_will_trap:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ insts.push(Instruction::Else);
+ {
+ // []
+ insts.push(Instruction::LocalGet(address_local));
+ // [address:address_type]
+ insts.push(int_const_inst(address_type, 0));
+ // [address:address_type 0:address_type]
+ insts.push(int_le_s_inst(address_type));
+ // [load_will_trap:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ insts.push(Instruction::Else);
+ {
+ // []
+ insts.push(Instruction::LocalGet(address_local));
+ // [address:address_type]
+ insts.push(Instruction::LocalGet(value_local));
+ // [address:address_type value:store_type]
+ insts.push(inst);
+ // []
+ }
+ insts.push(Instruction::End);
+ }
+ // []
+ insts.push(Instruction::End);
+}
+
+// Unsigned integer division and remainder will trap when
+// the divisor is 0. To avoid the trap, we will set any 0
+// divisors to 1 prior to the operation.
+//
+// The code below is equivalent to this expression:
+//
+// local.set $temp_divisor
+// (select (i32.eqz (local.get $temp_divisor) (i32.const 1) (local.get $temp_divisor))
+pub(crate) fn unsigned_div_rem<'a>(
+ inst: Instruction<'a>,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ let op_type = type_of_integer_operation(&inst);
+ let temp_divisor = builder.alloc_local(op_type);
+
+ // [dividend:op_type divisor:op_type]
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // [dividend:op_type]
+ insts.push(int_const_inst(op_type, 1));
+ // [dividend:op_type 1:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type 1:op_type divisor:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type 1:op_type divisor:op_type divisor:op_type]
+ insts.push(eqz_inst(op_type));
+ // [dividend:op_type 1:op_type divisor:op_type is_zero:i32]
+ insts.push(Instruction::Select);
+ // [dividend:op_type divisor:op_type]
+ insts.push(inst);
+ // [result:op_type]
+}
+
+pub(crate) fn trunc<'a>(
+ inst: Instruction<'a>,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ // If NaN or ±inf, replace with dummy value. Our method of checking for NaN
+ // is to use `ne` because NaN is the only value that is not equal to itself
+ let conv_type = type_of_float_conversion(&inst);
+ let temp_float = builder.alloc_local(conv_type);
+ // [input:conv_type]
+ insts.push(Instruction::LocalTee(temp_float));
+ // [input:conv_type]
+ insts.push(Instruction::LocalGet(temp_float));
+ // [input:conv_type input:conv_type]
+ insts.push(ne_inst(conv_type));
+ // [is_nan:i32]
+ insts.push(Instruction::LocalGet(temp_float));
+ // [is_nan:i32 input:conv_type]
+ insts.push(flt_inf_const_inst(conv_type));
+ // [is_nan:i32 input:conv_type inf:conv_type]
+ insts.push(eq_inst(conv_type));
+ // [is_nan:i32 is_inf:i32]
+ insts.push(Instruction::LocalGet(temp_float));
+ // [is_nan:i32 is_inf:i32 input:conv_type]
+ insts.push(flt_neg_inf_const_inst(conv_type));
+ // [is_nan:i32 is_inf:i32 input:conv_type neg_inf:conv_type]
+ insts.push(eq_inst(conv_type));
+ // [is_nan:i32 is_inf:i32 is_neg_inf:i32]
+ insts.push(Instruction::I32Or);
+ // [is_nan:i32 is_±inf:i32]
+ insts.push(Instruction::I32Or);
+ // [is_nan_or_inf:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ {
+ // []
+ insts.push(dummy_value_inst(conv_type));
+ // [0:conv_type]
+ insts.push(Instruction::LocalSet(temp_float));
+ // []
+ }
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(temp_float));
+ // [input_or_0:conv_type]
+
+ // first ensure that it is >= the min value of our target type
+ insts.push(min_input_const_for_trunc(&inst));
+ // [input_or_0:conv_type min_value_of_target_type:conv_type]
+ insts.push(flt_lt_inst(conv_type));
+ // [input_lt_min:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ {
+ // []
+ insts.push(min_input_const_for_trunc(&inst));
+ // [min_value_of_target_type:conv_type]
+ insts.push(Instruction::LocalSet(temp_float));
+ }
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(temp_float));
+ // [coerced_input:conv_type]
+
+ // next ensure that it is <= the max value of our target type
+ insts.push(max_input_const_for_trunc(&inst));
+ // [input_or_0:conv_type max_value_of_target_type:conv_type]
+ insts.push(flt_gt_inst(conv_type));
+ // [input_gt_min:i32]
+ insts.push(Instruction::If(BlockType::Empty));
+ {
+ // []
+ insts.push(max_input_const_for_trunc(&inst));
+ // [max_value_of_target_type:conv_type]
+ insts.push(Instruction::LocalSet(temp_float));
+ }
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(temp_float));
+ // [coerced_input:conv_type]
+ insts.push(inst);
+}
+
+// Signed division and remainder will trap in the following instances:
+// - The divisor is 0
+// - The result of the division is 2^(n-1)
+pub(crate) fn signed_div_rem<'a>(
+ inst: Instruction<'a>,
+ builder: &mut CodeBuilder,
+ insts: &mut Vec<Instruction<'a>>,
+) {
+ // If divisor is 0, replace with 1
+ let op_type = type_of_integer_operation(&inst);
+ let temp_divisor = builder.alloc_local(op_type);
+ // [dividend:op_type divisor:op_type]
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // [dividend:op_type]
+ insts.push(int_const_inst(op_type, 1));
+ // [dividend:op_type 1:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type 1:op_type divisor:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type 1:op_type divisor:op_type divisor:op_type]
+ insts.push(eqz_inst(op_type));
+ // [dividend:op_type 1:op_type divisor:op_type is_zero:i32]
+ insts.push(Instruction::Select);
+ // [dividend:op_type divisor:op_type]
+ // If dividend and divisor are -int.max and -1, replace
+ // divisor with 1.
+ let temp_dividend = builder.alloc_local(op_type);
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // [dividend:op_type]
+ insts.push(Instruction::LocalSet(temp_dividend));
+ // []
+ insts.push(Instruction::Block(wasm_encoder::BlockType::Empty));
+ {
+ insts.push(Instruction::Block(wasm_encoder::BlockType::Empty));
+ {
+ // []
+ insts.push(Instruction::LocalGet(temp_dividend));
+ // [dividend:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type divisor:op_type]
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // [dividend:op_type]
+ insts.push(Instruction::LocalTee(temp_dividend));
+ // [dividend:op_type]
+ insts.push(int_min_const_inst(op_type));
+ // [dividend:op_type int_min:op_type]
+ insts.push(ne_inst(op_type));
+ // [not_int_min:i32]
+ insts.push(Instruction::BrIf(0));
+ // []
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [divisor:op_type]
+ insts.push(int_const_inst(op_type, -1));
+ // [divisor:op_type -1:op_type]
+ insts.push(ne_inst(op_type));
+ // [not_neg_one:i32]
+ insts.push(Instruction::BrIf(0));
+ // []
+ insts.push(int_const_inst(op_type, 1));
+ // [divisor:op_type]
+ insts.push(Instruction::LocalSet(temp_divisor));
+ // []
+ insts.push(Instruction::Br(1));
+ }
+ // []
+ insts.push(Instruction::End);
+ }
+ // []
+ insts.push(Instruction::End);
+ // []
+ insts.push(Instruction::LocalGet(temp_dividend));
+ // [dividend:op_type]
+ insts.push(Instruction::LocalGet(temp_divisor));
+ // [dividend:op_type divisor:op_type]
+ insts.push(inst);
+}
+
+fn get_memarg(inst: &Instruction) -> wasm_encoder::MemArg {
+ match *inst {
+ Instruction::I32Load(memarg)
+ | Instruction::I64Load(memarg)
+ | Instruction::F32Load(memarg)
+ | Instruction::F64Load(memarg)
+ | Instruction::I32Load8S(memarg)
+ | Instruction::I32Load8U(memarg)
+ | Instruction::I32Load16S(memarg)
+ | Instruction::I32Load16U(memarg)
+ | Instruction::I64Load8S(memarg)
+ | Instruction::I64Load8U(memarg)
+ | Instruction::I64Load16S(memarg)
+ | Instruction::I64Load16U(memarg)
+ | Instruction::I64Load32S(memarg)
+ | Instruction::I64Load32U(memarg)
+ | Instruction::V128Load(memarg)
+ | Instruction::V128Load8x8S(memarg)
+ | Instruction::V128Load8x8U(memarg)
+ | Instruction::V128Load16x4S(memarg)
+ | Instruction::V128Load16x4U(memarg)
+ | Instruction::V128Load32x2S(memarg)
+ | Instruction::V128Load32x2U(memarg)
+ | Instruction::V128Load8Splat(memarg)
+ | Instruction::V128Load16Splat(memarg)
+ | Instruction::V128Load32Splat(memarg)
+ | Instruction::V128Load64Splat(memarg)
+ | Instruction::V128Load32Zero(memarg)
+ | Instruction::V128Load64Zero(memarg)
+ | Instruction::I32Store(memarg)
+ | Instruction::I64Store(memarg)
+ | Instruction::F32Store(memarg)
+ | Instruction::F64Store(memarg)
+ | Instruction::I32Store8(memarg)
+ | Instruction::I32Store16(memarg)
+ | Instruction::I64Store8(memarg)
+ | Instruction::I64Store16(memarg)
+ | Instruction::I64Store32(memarg)
+ | Instruction::V128Store(memarg) => memarg,
+ _ => unreachable!(),
+ }
+}
+
+fn dummy_value_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Const(0),
+ ValType::I64 => Instruction::I64Const(0),
+ ValType::F32 => Instruction::F32Const(0.0),
+ ValType::F64 => Instruction::F64Const(0.0),
+ ValType::V128 => Instruction::V128Const(0),
+ ValType::Ref(ty) => {
+ assert!(ty.nullable);
+ Instruction::RefNull(ty.heap_type)
+ }
+ }
+}
+
+fn eq_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Eq,
+ ValType::F64 => Instruction::F64Eq,
+ ValType::I32 => Instruction::I32Eq,
+ ValType::I64 => Instruction::I64Eq,
+ _ => panic!("not a numeric type"),
+ }
+}
+
+fn eqz_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Eqz,
+ ValType::I64 => Instruction::I64Eqz,
+ _ => panic!("not an integer type"),
+ }
+}
+
+fn type_of_integer_operation(inst: &Instruction) -> ValType {
+ match inst {
+ Instruction::I32DivU
+ | Instruction::I32DivS
+ | Instruction::I32RemU
+ | Instruction::I32RemS => ValType::I32,
+ Instruction::I64RemU
+ | Instruction::I64DivU
+ | Instruction::I64DivS
+ | Instruction::I64RemS => ValType::I64,
+ _ => panic!("not integer division or remainder"),
+ }
+}
+
+fn type_of_float_conversion(inst: &Instruction) -> ValType {
+ match inst {
+ Instruction::I32TruncF32S
+ | Instruction::I32TruncF32U
+ | Instruction::I64TruncF32S
+ | Instruction::I64TruncF32U => ValType::F32,
+ Instruction::I32TruncF64S
+ | Instruction::I32TruncF64U
+ | Instruction::I64TruncF64S
+ | Instruction::I64TruncF64U => ValType::F64,
+ _ => panic!("not a float -> integer conversion"),
+ }
+}
+
+fn min_input_const_for_trunc<'a>(inst: &Instruction) -> Instruction<'a> {
+ // This is the minimum float value that is representable as an i64
+ let min_f64 = -9_223_372_036_854_775_000f64;
+ let min_f32 = -9_223_372_000_000_000_000f32;
+
+ // This is the minimum float value that is representable as as i32
+ let min_f32_as_i32 = -2_147_483_500f32;
+ match inst {
+ Instruction::I32TruncF32S => Instruction::F32Const(min_f32_as_i32),
+ Instruction::I32TruncF32U => Instruction::F32Const(0.0),
+ Instruction::I64TruncF32S => Instruction::F32Const(min_f32),
+ Instruction::I64TruncF32U => Instruction::F32Const(0.0),
+ Instruction::I32TruncF64S => Instruction::F64Const(i32::MIN as f64),
+ Instruction::I32TruncF64U => Instruction::F64Const(0.0),
+ Instruction::I64TruncF64S => Instruction::F64Const(min_f64),
+ Instruction::I64TruncF64U => Instruction::F64Const(0.0),
+ _ => panic!("not a trunc instruction"),
+ }
+}
+
+fn max_input_const_for_trunc<'a>(inst: &Instruction) -> Instruction<'a> {
+ // This is the maximum float value that is representable as as i64
+ let max_f64_as_i64 = 9_223_372_036_854_775_000f64;
+ let max_f32_as_i64 = 9_223_371_500_000_000_000f32;
+
+ // This is the maximum float value that is representable as as i32
+ let max_f32_as_i32 = 2_147_483_500f32;
+ match inst {
+ Instruction::I32TruncF32S | Instruction::I32TruncF32U => {
+ Instruction::F32Const(max_f32_as_i32)
+ }
+ Instruction::I64TruncF32S | Instruction::I64TruncF32U => {
+ Instruction::F32Const(max_f32_as_i64)
+ }
+ Instruction::I32TruncF64S | Instruction::I32TruncF64U => {
+ Instruction::F64Const(i32::MAX as f64)
+ }
+ Instruction::I64TruncF64S | Instruction::I64TruncF64U => {
+ Instruction::F64Const(max_f64_as_i64)
+ }
+ _ => panic!("not a trunc instruction"),
+ }
+}
+
+fn type_of_memory_access(inst: &Instruction) -> ValType {
+ match inst {
+ Instruction::I32Load(_)
+ | Instruction::I32Load8S(_)
+ | Instruction::I32Load8U(_)
+ | Instruction::I32Load16S(_)
+ | Instruction::I32Load16U(_)
+ | Instruction::I32Store(_)
+ | Instruction::I32Store8(_)
+ | Instruction::I32Store16(_) => ValType::I32,
+
+ Instruction::I64Load(_)
+ | Instruction::I64Load8S(_)
+ | Instruction::I64Load8U(_)
+ | Instruction::I64Load16S(_)
+ | Instruction::I64Load16U(_)
+ | Instruction::I64Load32S(_)
+ | Instruction::I64Load32U(_)
+ | Instruction::I64Store(_)
+ | Instruction::I64Store8(_)
+ | Instruction::I64Store16(_)
+ | Instruction::I64Store32(_) => ValType::I64,
+
+ Instruction::F32Load(_) | Instruction::F32Store(_) => ValType::F32,
+
+ Instruction::F64Load(_) | Instruction::F64Store(_) => ValType::F64,
+
+ Instruction::V128Load { .. }
+ | Instruction::V128Load8x8S { .. }
+ | Instruction::V128Load8x8U { .. }
+ | Instruction::V128Load16x4S { .. }
+ | Instruction::V128Load16x4U { .. }
+ | Instruction::V128Load32x2S { .. }
+ | Instruction::V128Load32x2U { .. }
+ | Instruction::V128Load8Splat { .. }
+ | Instruction::V128Load16Splat { .. }
+ | Instruction::V128Load32Splat { .. }
+ | Instruction::V128Load64Splat { .. }
+ | Instruction::V128Load32Zero { .. }
+ | Instruction::V128Load64Zero { .. }
+ | Instruction::V128Store { .. } => ValType::V128,
+
+ _ => panic!("not a memory access instruction"),
+ }
+}
+
+fn int_min_const_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Const(i32::MIN),
+ ValType::I64 => Instruction::I64Const(i64::MIN),
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_const_inst<'a>(ty: ValType, x: i64) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Const(x as i32),
+ ValType::I64 => Instruction::I64Const(x),
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_mul_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Mul,
+ ValType::I64 => Instruction::I64Mul,
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_add_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Add,
+ ValType::I64 => Instruction::I64Add,
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_le_u_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32LeU,
+ ValType::I64 => Instruction::I64LeU,
+ _ => panic!("not an int type"),
+ }
+}
+
+fn int_le_s_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32LeS,
+ ValType::I64 => Instruction::I64LeS,
+ _ => panic!("not an int type"),
+ }
+}
+
+fn ne_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::I32 => Instruction::I32Ne,
+ ValType::I64 => Instruction::I64Ne,
+ ValType::F32 => Instruction::F32Ne,
+ ValType::F64 => Instruction::F64Ne,
+ _ => panic!("not a numeric type"),
+ }
+}
+
+fn flt_lt_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Lt,
+ ValType::F64 => Instruction::F64Lt,
+ _ => panic!("not a float type"),
+ }
+}
+
+fn flt_gt_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Gt,
+ ValType::F64 => Instruction::F64Gt,
+ _ => panic!("not a float type"),
+ }
+}
+
+fn flt_inf_const_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Const(f32::INFINITY),
+ ValType::F64 => Instruction::F64Const(f64::INFINITY),
+ _ => panic!("not a float type"),
+ }
+}
+
+fn flt_neg_inf_const_inst<'a>(ty: ValType) -> Instruction<'a> {
+ match ty {
+ ValType::F32 => Instruction::F32Const(f32::NEG_INFINITY),
+ ValType::F64 => Instruction::F64Const(f64::NEG_INFINITY),
+ _ => panic!("not a float type"),
+ }
+}
+
+fn size_of_type_in_memory(ty: ValType) -> u64 {
+ match ty {
+ ValType::I32 => 4,
+ ValType::I64 => 8,
+ ValType::F32 => 4,
+ ValType::F64 => 8,
+ ValType::V128 => 16,
+ ValType::Ref(_) => panic!("not a memory type"),
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/core/encode.rs b/third_party/rust/wasm-smith/src/core/encode.rs
new file mode 100644
index 0000000000..7c781dc731
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core/encode.rs
@@ -0,0 +1,299 @@
+use super::*;
+use std::convert::TryFrom;
+
+impl Module {
+ /// Encode this Wasm module into bytes.
+ pub fn to_bytes(&self) -> Vec<u8> {
+ self.encoded().finish()
+ }
+
+ fn encoded(&self) -> wasm_encoder::Module {
+ let mut module = wasm_encoder::Module::new();
+
+ self.encode_types(&mut module);
+ self.encode_imports(&mut module);
+ self.encode_funcs(&mut module);
+ self.encode_tables(&mut module);
+ self.encode_memories(&mut module);
+ self.encode_tags(&mut module);
+ self.encode_globals(&mut module);
+ self.encode_exports(&mut module);
+ self.encode_start(&mut module);
+ self.encode_elems(&mut module);
+ self.encode_data_count(&mut module);
+ self.encode_code(&mut module);
+ self.encode_data(&mut module);
+
+ module
+ }
+
+ fn encode_types(&self, module: &mut wasm_encoder::Module) {
+ if !self.should_encode_types {
+ return;
+ }
+
+ let mut section = wasm_encoder::TypeSection::new();
+
+ for group in &self.rec_groups {
+ if group.end - group.start == 1 {
+ let ty = &self.types[group.start];
+ section.subtype(&wasm_encoder::SubType {
+ is_final: ty.is_final,
+ supertype_idx: ty.supertype,
+ composite_type: match &ty.composite_type {
+ CompositeType::Array(a) => wasm_encoder::CompositeType::Array(a.clone()),
+ CompositeType::Func(f) => {
+ wasm_encoder::CompositeType::Func(wasm_encoder::FuncType::new(
+ f.params.iter().cloned(),
+ f.results.iter().cloned(),
+ ))
+ }
+ CompositeType::Struct(s) => wasm_encoder::CompositeType::Struct(s.clone()),
+ },
+ });
+ } else {
+ section.rec(
+ self.types[group.clone()]
+ .iter()
+ .map(|ty| wasm_encoder::SubType {
+ is_final: ty.is_final,
+ supertype_idx: ty.supertype,
+ composite_type: match &ty.composite_type {
+ CompositeType::Array(a) => {
+ wasm_encoder::CompositeType::Array(a.clone())
+ }
+ CompositeType::Func(f) => {
+ wasm_encoder::CompositeType::Func(wasm_encoder::FuncType::new(
+ f.params.iter().cloned(),
+ f.results.iter().cloned(),
+ ))
+ }
+ CompositeType::Struct(s) => {
+ wasm_encoder::CompositeType::Struct(s.clone())
+ }
+ },
+ }),
+ );
+ }
+ }
+
+ module.section(&section);
+ }
+
+ fn encode_imports(&self, module: &mut wasm_encoder::Module) {
+ if !self.should_encode_imports {
+ return;
+ }
+
+ let mut section = wasm_encoder::ImportSection::new();
+ for im in &self.imports {
+ section.import(
+ &im.module,
+ &im.field,
+ translate_entity_type(&im.entity_type),
+ );
+ }
+ module.section(&section);
+ }
+
+ fn encode_tags(&self, module: &mut wasm_encoder::Module) {
+ if self.num_defined_tags == 0 {
+ return;
+ }
+ let mut tags = wasm_encoder::TagSection::new();
+ for tag in self.tags[self.tags.len() - self.num_defined_tags..].iter() {
+ tags.tag(wasm_encoder::TagType {
+ kind: wasm_encoder::TagKind::Exception,
+ func_type_idx: tag.func_type_idx,
+ });
+ }
+ module.section(&tags);
+ }
+
+ fn encode_funcs(&self, module: &mut wasm_encoder::Module) {
+ if self.num_defined_funcs == 0 {
+ return;
+ }
+ let mut funcs = wasm_encoder::FunctionSection::new();
+ for (ty, _) in self.funcs[self.funcs.len() - self.num_defined_funcs..].iter() {
+ funcs.function(*ty);
+ }
+ module.section(&funcs);
+ }
+
+ fn encode_tables(&self, module: &mut wasm_encoder::Module) {
+ if self.num_defined_tables == 0 {
+ return;
+ }
+ let mut tables = wasm_encoder::TableSection::new();
+ for t in self.tables[self.tables.len() - self.num_defined_tables..].iter() {
+ tables.table(*t);
+ }
+ module.section(&tables);
+ }
+
+ fn encode_memories(&self, module: &mut wasm_encoder::Module) {
+ if self.num_defined_memories == 0 {
+ return;
+ }
+ let mut mems = wasm_encoder::MemorySection::new();
+ for m in self.memories[self.memories.len() - self.num_defined_memories..].iter() {
+ mems.memory(*m);
+ }
+ module.section(&mems);
+ }
+
+ fn encode_globals(&self, module: &mut wasm_encoder::Module) {
+ if self.globals.is_empty() {
+ return;
+ }
+ let mut globals = wasm_encoder::GlobalSection::new();
+ for (idx, expr) in &self.defined_globals {
+ let ty = &self.globals[*idx as usize];
+ match expr {
+ GlobalInitExpr::ConstExpr(expr) => globals.global(*ty, expr),
+ GlobalInitExpr::FuncRef(func) => globals.global(*ty, &ConstExpr::ref_func(*func)),
+ };
+ }
+ module.section(&globals);
+ }
+
+ fn encode_exports(&self, module: &mut wasm_encoder::Module) {
+ if self.exports.is_empty() {
+ return;
+ }
+ let mut exports = wasm_encoder::ExportSection::new();
+ for (name, kind, idx) in &self.exports {
+ exports.export(name, *kind, *idx);
+ }
+ module.section(&exports);
+ }
+
+ fn encode_start(&self, module: &mut wasm_encoder::Module) {
+ if let Some(f) = self.start {
+ module.section(&wasm_encoder::StartSection { function_index: f });
+ }
+ }
+
+ fn encode_elems(&self, module: &mut wasm_encoder::Module) {
+ if self.elems.is_empty() {
+ return;
+ }
+ let mut elems = wasm_encoder::ElementSection::new();
+ let mut exps = vec![];
+ for el in &self.elems {
+ let elements = match &el.items {
+ Elements::Expressions(es) => {
+ exps.clear();
+ exps.extend(es.iter().map(|e| {
+ // TODO(nagisa): generate global.get of imported ref globals too.
+ match e {
+ Some(i) => match el.ty {
+ RefType::FUNCREF => wasm_encoder::ConstExpr::ref_func(*i),
+ _ => unreachable!(),
+ },
+ None => wasm_encoder::ConstExpr::ref_null(el.ty.heap_type),
+ }
+ }));
+ wasm_encoder::Elements::Expressions(el.ty, &exps)
+ }
+ Elements::Functions(fs) => wasm_encoder::Elements::Functions(fs),
+ };
+ match &el.kind {
+ ElementKind::Active { table, offset } => {
+ let offset = match *offset {
+ Offset::Const32(n) => ConstExpr::i32_const(n),
+ Offset::Const64(n) => ConstExpr::i64_const(n),
+ Offset::Global(g) => ConstExpr::global_get(g),
+ };
+ elems.active(*table, &offset, elements);
+ }
+ ElementKind::Passive => {
+ elems.passive(elements);
+ }
+ ElementKind::Declared => {
+ elems.declared(elements);
+ }
+ }
+ }
+ module.section(&elems);
+ }
+
+ fn encode_data_count(&self, module: &mut wasm_encoder::Module) {
+ // Without bulk memory there's no need for a data count section,
+ if !self.config.bulk_memory_enabled {
+ return;
+ }
+ // ... and also if there's no data no need for a data count section.
+ if self.data.is_empty() {
+ return;
+ }
+ module.section(&wasm_encoder::DataCountSection {
+ count: u32::try_from(self.data.len()).unwrap(),
+ });
+ }
+
+ fn encode_code(&self, module: &mut wasm_encoder::Module) {
+ if self.code.is_empty() {
+ return;
+ }
+ let mut code = wasm_encoder::CodeSection::new();
+ for c in &self.code {
+ // Skip the run-length encoding because it is a little
+ // annoying to compute; use a length of one for every local.
+ let mut func = wasm_encoder::Function::new(c.locals.iter().map(|l| (1, *l)));
+ match &c.instructions {
+ Instructions::Generated(instrs) => {
+ for instr in instrs {
+ func.instruction(instr);
+ }
+ func.instruction(&wasm_encoder::Instruction::End);
+ }
+ Instructions::Arbitrary(body) => {
+ func.raw(body.iter().copied());
+ }
+ }
+ code.function(&func);
+ }
+ module.section(&code);
+ }
+
+ fn encode_data(&self, module: &mut wasm_encoder::Module) {
+ if self.data.is_empty() {
+ return;
+ }
+ let mut data = wasm_encoder::DataSection::new();
+ for seg in &self.data {
+ match &seg.kind {
+ DataSegmentKind::Active {
+ memory_index,
+ offset,
+ } => {
+ let offset = match *offset {
+ Offset::Const32(n) => ConstExpr::i32_const(n),
+ Offset::Const64(n) => ConstExpr::i64_const(n),
+ Offset::Global(g) => ConstExpr::global_get(g),
+ };
+ data.active(*memory_index, &offset, seg.init.iter().copied());
+ }
+ DataSegmentKind::Passive => {
+ data.passive(seg.init.iter().copied());
+ }
+ }
+ }
+ module.section(&data);
+ }
+}
+
+pub(crate) fn translate_entity_type(ty: &EntityType) -> wasm_encoder::EntityType {
+ match ty {
+ EntityType::Tag(t) => wasm_encoder::EntityType::Tag(wasm_encoder::TagType {
+ kind: wasm_encoder::TagKind::Exception,
+ func_type_idx: t.func_type_idx,
+ }),
+ EntityType::Func(f, _) => wasm_encoder::EntityType::Function(*f),
+ EntityType::Table(ty) => (*ty).into(),
+ EntityType::Memory(m) => (*m).into(),
+ EntityType::Global(g) => (*g).into(),
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/core/terminate.rs b/third_party/rust/wasm-smith/src/core/terminate.rs
new file mode 100644
index 0000000000..adcfeed54f
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/core/terminate.rs
@@ -0,0 +1,70 @@
+use super::*;
+use std::mem;
+use wasm_encoder::BlockType;
+
+impl Module {
+ /// Ensure that all of this Wasm module's functions will terminate when
+ /// executed.
+ ///
+ /// This adds a new mutable, exported global to the module to keep track of
+ /// how much "fuel" is left. Fuel is decremented at the head of each loop
+ /// and function. When fuel reaches zero, a trap is raised.
+ ///
+ /// The index of the fuel global is returned, so that you may control how
+ /// much fuel the module is given.
+ pub fn ensure_termination(&mut self, default_fuel: u32) -> u32 {
+ let fuel_global = self.globals.len() as u32;
+ self.globals.push(GlobalType {
+ val_type: ValType::I32,
+ mutable: true,
+ });
+ self.defined_globals.push((
+ fuel_global,
+ GlobalInitExpr::ConstExpr(ConstExpr::i32_const(default_fuel as i32)),
+ ));
+
+ for code in &mut self.code {
+ let check_fuel = |insts: &mut Vec<Instruction>| {
+ // if fuel == 0 { trap }
+ insts.push(Instruction::GlobalGet(fuel_global));
+ insts.push(Instruction::I32Eqz);
+ insts.push(Instruction::If(BlockType::Empty));
+ insts.push(Instruction::Unreachable);
+ insts.push(Instruction::End);
+
+ // fuel -= 1
+ insts.push(Instruction::GlobalGet(fuel_global));
+ insts.push(Instruction::I32Const(1));
+ insts.push(Instruction::I32Sub);
+ insts.push(Instruction::GlobalSet(fuel_global));
+ };
+
+ let instrs = match &mut code.instructions {
+ Instructions::Generated(list) => list,
+ // only present on modules contained within
+ // `MaybeInvalidModule`, which doesn't expose its internal
+ // `Module`.
+ Instructions::Arbitrary(_) => unreachable!(),
+ };
+ let mut new_insts = Vec::with_capacity(instrs.len() * 2);
+
+ // Check fuel at the start of functions to deal with infinite
+ // recursion.
+ check_fuel(&mut new_insts);
+
+ for inst in mem::replace(instrs, vec![]) {
+ let is_loop = matches!(&inst, Instruction::Loop(_));
+ new_insts.push(inst);
+
+ // Check fuel at loop heads to deal with infinite loops.
+ if is_loop {
+ check_fuel(&mut new_insts);
+ }
+ }
+
+ *instrs = new_insts;
+ }
+
+ fuel_global
+ }
+}
diff --git a/third_party/rust/wasm-smith/src/lib.rs b/third_party/rust/wasm-smith/src/lib.rs
new file mode 100644
index 0000000000..8d17290473
--- /dev/null
+++ b/third_party/rust/wasm-smith/src/lib.rs
@@ -0,0 +1,192 @@
+//! A WebAssembly test case generator.
+//!
+//! ## Usage
+//!
+//! First, use [`cargo fuzz`](https://github.com/rust-fuzz/cargo-fuzz) to define
+//! a new fuzz target:
+//!
+//! ```shell
+//! $ cargo fuzz add my_wasm_smith_fuzz_target
+//! ```
+//!
+//! Next, add `wasm-smith` to your dependencies:
+//!
+//! ```shell
+//! $ cargo add wasm-smith
+//! ```
+//!
+//! Then, define your fuzz target so that it takes arbitrary
+//! `wasm_smith::Module`s as an argument, convert the module into serialized
+//! Wasm bytes via the `to_bytes` method, and then feed it into your system:
+//!
+//! ```no_run
+//! // fuzz/fuzz_targets/my_wasm_smith_fuzz_target.rs
+//!
+//! #![no_main]
+//!
+//! use libfuzzer_sys::fuzz_target;
+//! use wasm_smith::Module;
+//!
+//! fuzz_target!(|module: Module| {
+//! let wasm_bytes = module.to_bytes();
+//!
+//! // Your code here...
+//! });
+//! ```
+//!
+//! Finally, start fuzzing:
+//!
+//! ```shell
+//! $ cargo fuzz run my_wasm_smith_fuzz_target
+//! ```
+//!
+//! > **Note:** For a real world example, also check out [the `validate` fuzz
+//! > target](https://github.com/fitzgen/wasm-smith/blob/main/fuzz/fuzz_targets/validate.rs)
+//! > defined in this repository. Using the `wasmparser` crate, it checks that
+//! > every module generated by `wasm-smith` validates successfully.
+//!
+//! ## Design
+//!
+//! The design and implementation strategy of wasm-smith is outlined in
+//! [this article](https://fitzgeraldnick.com/2020/08/24/writing-a-test-case-generator.html).
+
+#![deny(missing_docs, missing_debug_implementations)]
+// Needed for the `instructions!` macro in `src/code_builder.rs`.
+#![recursion_limit = "512"]
+
+mod component;
+mod config;
+mod core;
+
+pub use crate::core::{InstructionKind, InstructionKinds, MaybeInvalidModule, Module};
+use arbitrary::{Result, Unstructured};
+pub use component::Component;
+pub use config::{Config, MemoryOffsetChoices};
+use std::{collections::HashSet, fmt::Write, str};
+
+#[cfg(feature = "_internal_cli")]
+pub use config::InternalOptionalConfig;
+
+/// Do something an arbitrary number of times.
+///
+/// The callback can return `false` to exit the loop early.
+pub(crate) fn arbitrary_loop<'a>(
+ u: &mut Unstructured<'a>,
+ min: usize,
+ max: usize,
+ mut f: impl FnMut(&mut Unstructured<'a>) -> Result<bool>,
+) -> Result<()> {
+ assert!(max >= min);
+ for _ in 0..min {
+ if !f(u)? {
+ return Err(arbitrary::Error::IncorrectFormat);
+ }
+ }
+ for _ in 0..(max - min) {
+ let keep_going = u.arbitrary().unwrap_or(false);
+ if !keep_going {
+ break;
+ }
+
+ if !f(u)? {
+ break;
+ }
+ }
+
+ Ok(())
+}
+
+// Mirror what happens in `Arbitrary for String`, but do so with a clamped size.
+pub(crate) fn limited_str<'a>(max_size: usize, u: &mut Unstructured<'a>) -> Result<&'a str> {
+ let size = u.arbitrary_len::<u8>()?;
+ let size = std::cmp::min(size, max_size);
+ match str::from_utf8(u.peek_bytes(size).unwrap()) {
+ Ok(s) => {
+ u.bytes(size).unwrap();
+ Ok(s)
+ }
+ Err(e) => {
+ let i = e.valid_up_to();
+ let valid = u.bytes(i).unwrap();
+ let s = unsafe {
+ debug_assert!(str::from_utf8(valid).is_ok());
+ str::from_utf8_unchecked(valid)
+ };
+ Ok(s)
+ }
+ }
+}
+
+pub(crate) fn limited_string(max_size: usize, u: &mut Unstructured) -> Result<String> {
+ Ok(limited_str(max_size, u)?.into())
+}
+
+pub(crate) fn unique_string(
+ max_size: usize,
+ names: &mut HashSet<String>,
+ u: &mut Unstructured,
+) -> Result<String> {
+ let mut name = limited_string(max_size, u)?;
+ while names.contains(&name) {
+ write!(&mut name, "{}", names.len()).unwrap();
+ }
+ names.insert(name.clone());
+ Ok(name)
+}
+
+pub(crate) fn unique_kebab_string(
+ max_size: usize,
+ names: &mut HashSet<String>,
+ u: &mut Unstructured,
+) -> Result<String> {
+ let size = std::cmp::min(u.arbitrary_len::<u8>()?, max_size);
+ let mut name = String::with_capacity(size);
+ let mut require_alpha = true;
+ for _ in 0..size {
+ name.push(match u.int_in_range::<u8>(0..=36)? {
+ x if (0..26).contains(&x) => {
+ require_alpha = false;
+ (b'a' + x) as char
+ }
+ x if (26..36).contains(&x) => {
+ if require_alpha {
+ require_alpha = false;
+ (b'a' + (x - 26)) as char
+ } else {
+ (b'0' + (x - 26)) as char
+ }
+ }
+ x if x == 36 => {
+ if require_alpha {
+ require_alpha = false;
+ 'a'
+ } else {
+ require_alpha = true;
+ '-'
+ }
+ }
+ _ => unreachable!(),
+ });
+ }
+
+ if name.is_empty() || name.ends_with('-') {
+ name.push('a');
+ }
+
+ while names.contains(&name) {
+ write!(&mut name, "{}", names.len()).unwrap();
+ }
+
+ names.insert(name.clone());
+
+ Ok(name)
+}
+
+pub(crate) fn unique_url(
+ max_size: usize,
+ names: &mut HashSet<String>,
+ u: &mut Unstructured,
+) -> Result<String> {
+ let path = unique_kebab_string(max_size, names, u)?;
+ Ok(format!("https://example.com/{path}"))
+}