summaryrefslogtreecommitdiffstats
path: root/third_party/rust/cranelift-codegen/src/ir
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/cranelift-codegen/src/ir
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/cranelift-codegen/src/ir')
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/atomic_rmw_op.rs52
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/builder.rs266
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/constant.rs532
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/dfg.rs1314
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/entities.rs522
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/extfunc.rs521
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/extname.rs163
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/function.rs441
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/globalvalue.rs155
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/heap.rs62
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/immediates.rs1312
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/instructions.rs898
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/jumptable.rs119
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/layout.rs1220
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/libcall.rs260
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/memflags.rs117
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/mod.rs115
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/progpoint.rs164
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/sourceloc.rs66
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/stackslot.rs443
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/table.rs36
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/trapcode.rs134
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/types.rs533
-rw-r--r--third_party/rust/cranelift-codegen/src/ir/valueloc.rs166
24 files changed, 9611 insertions, 0 deletions
diff --git a/third_party/rust/cranelift-codegen/src/ir/atomic_rmw_op.rs b/third_party/rust/cranelift-codegen/src/ir/atomic_rmw_op.rs
new file mode 100644
index 0000000000..c93756147a
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/atomic_rmw_op.rs
@@ -0,0 +1,52 @@
+/// Describes the arithmetic operation in an atomic memory read-modify-write operation.
+use core::fmt::{self, Display, Formatter};
+use core::str::FromStr;
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+/// Describes the arithmetic operation in an atomic memory read-modify-write operation.
+pub enum AtomicRmwOp {
+ /// Add
+ Add,
+ /// Sub
+ Sub,
+ /// And
+ And,
+ /// Or
+ Or,
+ /// Xor
+ Xor,
+ /// Exchange
+ Xchg,
+}
+
+impl Display for AtomicRmwOp {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ let s = match self {
+ AtomicRmwOp::Add => "add",
+ AtomicRmwOp::Sub => "sub",
+ AtomicRmwOp::And => "and",
+ AtomicRmwOp::Or => "or",
+ AtomicRmwOp::Xor => "xor",
+ AtomicRmwOp::Xchg => "xchg",
+ };
+ f.write_str(s)
+ }
+}
+
+impl FromStr for AtomicRmwOp {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "add" => Ok(AtomicRmwOp::Add),
+ "sub" => Ok(AtomicRmwOp::Sub),
+ "and" => Ok(AtomicRmwOp::And),
+ "or" => Ok(AtomicRmwOp::Or),
+ "xor" => Ok(AtomicRmwOp::Xor),
+ "xchg" => Ok(AtomicRmwOp::Xchg),
+ _ => Err(()),
+ }
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/builder.rs b/third_party/rust/cranelift-codegen/src/ir/builder.rs
new file mode 100644
index 0000000000..63054928f2
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/builder.rs
@@ -0,0 +1,266 @@
+//! Cranelift instruction builder.
+//!
+//! A `Builder` provides a convenient interface for inserting instructions into a Cranelift
+//! function. Many of its methods are generated from the meta language instruction definitions.
+
+use crate::ir;
+use crate::ir::types;
+use crate::ir::{DataFlowGraph, InstructionData};
+use crate::ir::{Inst, Opcode, Type, Value};
+use crate::isa;
+
+/// Base trait for instruction builders.
+///
+/// The `InstBuilderBase` trait provides the basic functionality required by the methods of the
+/// generated `InstBuilder` trait. These methods should not normally be used directly. Use the
+/// methods in the `InstBuilder` trait instead.
+///
+/// Any data type that implements `InstBuilderBase` also gets all the methods of the `InstBuilder`
+/// trait.
+pub trait InstBuilderBase<'f>: Sized {
+ /// Get an immutable reference to the data flow graph that will hold the constructed
+ /// instructions.
+ fn data_flow_graph(&self) -> &DataFlowGraph;
+ /// Get a mutable reference to the data flow graph that will hold the constructed
+ /// instructions.
+ fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph;
+
+ /// Insert an instruction and return a reference to it, consuming the builder.
+ ///
+ /// The result types may depend on a controlling type variable. For non-polymorphic
+ /// instructions with multiple results, pass `INVALID` for the `ctrl_typevar` argument.
+ fn build(self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph);
+}
+
+// Include trait code generated by `cranelift-codegen/meta/src/gen_inst.rs`.
+//
+// This file defines the `InstBuilder` trait as an extension of `InstBuilderBase` with methods per
+// instruction format and per opcode.
+include!(concat!(env!("OUT_DIR"), "/inst_builder.rs"));
+
+/// Any type implementing `InstBuilderBase` gets all the `InstBuilder` methods for free.
+impl<'f, T: InstBuilderBase<'f>> InstBuilder<'f> for T {}
+
+/// Base trait for instruction inserters.
+///
+/// This is an alternative base trait for an instruction builder to implement.
+///
+/// An instruction inserter can be adapted into an instruction builder by wrapping it in an
+/// `InsertBuilder`. This provides some common functionality for instruction builders that insert
+/// new instructions, as opposed to the `ReplaceBuilder` which overwrites existing instructions.
+pub trait InstInserterBase<'f>: Sized {
+ /// Get an immutable reference to the data flow graph.
+ fn data_flow_graph(&self) -> &DataFlowGraph;
+
+ /// Get a mutable reference to the data flow graph.
+ fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph;
+
+ /// Insert a new instruction which belongs to the DFG.
+ fn insert_built_inst(self, inst: Inst, ctrl_typevar: Type) -> &'f mut DataFlowGraph;
+}
+
+use core::marker::PhantomData;
+
+/// Builder that inserts an instruction at the current position.
+///
+/// An `InsertBuilder` is a wrapper for an `InstInserterBase` that turns it into an instruction
+/// builder with some additional facilities for creating instructions that reuse existing values as
+/// their results.
+pub struct InsertBuilder<'f, IIB: InstInserterBase<'f>> {
+ inserter: IIB,
+ unused: PhantomData<&'f u32>,
+}
+
+impl<'f, IIB: InstInserterBase<'f>> InsertBuilder<'f, IIB> {
+ /// Create a new builder which inserts instructions at `pos`.
+ /// The `dfg` and `pos.layout` references should be from the same `Function`.
+ pub fn new(inserter: IIB) -> Self {
+ Self {
+ inserter,
+ unused: PhantomData,
+ }
+ }
+
+ /// Reuse result values in `reuse`.
+ ///
+ /// Convert this builder into one that will reuse the provided result values instead of
+ /// allocating new ones. The provided values for reuse must not be attached to anything. Any
+ /// missing result values will be allocated as normal.
+ ///
+ /// The `reuse` argument is expected to be an array of `Option<Value>`.
+ pub fn with_results<Array>(self, reuse: Array) -> InsertReuseBuilder<'f, IIB, Array>
+ where
+ Array: AsRef<[Option<Value>]>,
+ {
+ InsertReuseBuilder {
+ inserter: self.inserter,
+ reuse,
+ unused: PhantomData,
+ }
+ }
+
+ /// Reuse a single result value.
+ ///
+ /// Convert this into a builder that will reuse `v` as the single result value. The reused
+ /// result value `v` must not be attached to anything.
+ ///
+ /// This method should only be used when building an instruction with exactly one result. Use
+ /// `with_results()` for the more general case.
+ pub fn with_result(self, v: Value) -> InsertReuseBuilder<'f, IIB, [Option<Value>; 1]> {
+ // TODO: Specialize this to return a different builder that just attaches `v` instead of
+ // calling `make_inst_results_reusing()`.
+ self.with_results([Some(v)])
+ }
+}
+
+impl<'f, IIB: InstInserterBase<'f>> InstBuilderBase<'f> for InsertBuilder<'f, IIB> {
+ fn data_flow_graph(&self) -> &DataFlowGraph {
+ self.inserter.data_flow_graph()
+ }
+
+ fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph {
+ self.inserter.data_flow_graph_mut()
+ }
+
+ fn build(mut self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph) {
+ let inst;
+ {
+ let dfg = self.inserter.data_flow_graph_mut();
+ inst = dfg.make_inst(data);
+ dfg.make_inst_results(inst, ctrl_typevar);
+ }
+ (inst, self.inserter.insert_built_inst(inst, ctrl_typevar))
+ }
+}
+
+/// Builder that inserts a new instruction like `InsertBuilder`, but reusing result values.
+pub struct InsertReuseBuilder<'f, IIB, Array>
+where
+ IIB: InstInserterBase<'f>,
+ Array: AsRef<[Option<Value>]>,
+{
+ inserter: IIB,
+ reuse: Array,
+ unused: PhantomData<&'f u32>,
+}
+
+impl<'f, IIB, Array> InstBuilderBase<'f> for InsertReuseBuilder<'f, IIB, Array>
+where
+ IIB: InstInserterBase<'f>,
+ Array: AsRef<[Option<Value>]>,
+{
+ fn data_flow_graph(&self) -> &DataFlowGraph {
+ self.inserter.data_flow_graph()
+ }
+
+ fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph {
+ self.inserter.data_flow_graph_mut()
+ }
+
+ fn build(mut self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph) {
+ let inst;
+ {
+ let dfg = self.inserter.data_flow_graph_mut();
+ inst = dfg.make_inst(data);
+ // Make an `Iterator<Item = Option<Value>>`.
+ let ru = self.reuse.as_ref().iter().cloned();
+ dfg.make_inst_results_reusing(inst, ctrl_typevar, ru);
+ }
+ (inst, self.inserter.insert_built_inst(inst, ctrl_typevar))
+ }
+}
+
+/// Instruction builder that replaces an existing instruction.
+///
+/// The inserted instruction will have the same `Inst` number as the old one.
+///
+/// If the old instruction still has result values attached, it is assumed that the new instruction
+/// produces the same number and types of results. The old result values are preserved. If the
+/// replacement instruction format does not support multiple results, the builder panics. It is a
+/// bug to leave result values dangling.
+pub struct ReplaceBuilder<'f> {
+ dfg: &'f mut DataFlowGraph,
+ inst: Inst,
+}
+
+impl<'f> ReplaceBuilder<'f> {
+ /// Create a `ReplaceBuilder` that will overwrite `inst`.
+ pub fn new(dfg: &'f mut DataFlowGraph, inst: Inst) -> Self {
+ Self { dfg, inst }
+ }
+}
+
+impl<'f> InstBuilderBase<'f> for ReplaceBuilder<'f> {
+ fn data_flow_graph(&self) -> &DataFlowGraph {
+ self.dfg
+ }
+
+ fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph {
+ self.dfg
+ }
+
+ fn build(self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph) {
+ // Splat the new instruction on top of the old one.
+ self.dfg[self.inst] = data;
+
+ if !self.dfg.has_results(self.inst) {
+ // The old result values were either detached or non-existent.
+ // Construct new ones.
+ self.dfg.make_inst_results(self.inst, ctrl_typevar);
+ }
+
+ (self.inst, self.dfg)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::cursor::{Cursor, FuncCursor};
+ use crate::ir::condcodes::*;
+ use crate::ir::types::*;
+ use crate::ir::{Function, InstBuilder, ValueDef};
+
+ #[test]
+ fn types() {
+ let mut func = Function::new();
+ let block0 = func.dfg.make_block();
+ let arg0 = func.dfg.append_block_param(block0, I32);
+ let mut pos = FuncCursor::new(&mut func);
+ pos.insert_block(block0);
+
+ // Explicit types.
+ let v0 = pos.ins().iconst(I32, 3);
+ assert_eq!(pos.func.dfg.value_type(v0), I32);
+
+ // Inferred from inputs.
+ let v1 = pos.ins().iadd(arg0, v0);
+ assert_eq!(pos.func.dfg.value_type(v1), I32);
+
+ // Formula.
+ let cmp = pos.ins().icmp(IntCC::Equal, arg0, v0);
+ assert_eq!(pos.func.dfg.value_type(cmp), B1);
+ }
+
+ #[test]
+ fn reuse_results() {
+ let mut func = Function::new();
+ let block0 = func.dfg.make_block();
+ let arg0 = func.dfg.append_block_param(block0, I32);
+ let mut pos = FuncCursor::new(&mut func);
+ pos.insert_block(block0);
+
+ let v0 = pos.ins().iadd_imm(arg0, 17);
+ assert_eq!(pos.func.dfg.value_type(v0), I32);
+ let iadd = pos.prev_inst().unwrap();
+ assert_eq!(pos.func.dfg.value_def(v0), ValueDef::Result(iadd, 0));
+
+ // Detach v0 and reuse it for a different instruction.
+ pos.func.dfg.clear_results(iadd);
+ let v0b = pos.ins().with_result(v0).iconst(I32, 3);
+ assert_eq!(v0, v0b);
+ assert_eq!(pos.current_inst(), Some(iadd));
+ let iconst = pos.prev_inst().unwrap();
+ assert!(iadd != iconst);
+ assert_eq!(pos.func.dfg.value_def(v0), ValueDef::Result(iconst, 0));
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/constant.rs b/third_party/rust/cranelift-codegen/src/ir/constant.rs
new file mode 100644
index 0000000000..bd84e25ee3
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/constant.rs
@@ -0,0 +1,532 @@
+//! Constants
+//!
+//! The constant pool defined here allows Cranelift to avoid emitting the same constant multiple
+//! times. As constants are inserted in the pool, a handle is returned; the handle is a Cranelift
+//! Entity. Inserting the same data multiple times will always return the same handle.
+//!
+//! Future work could include:
+//! - ensuring alignment of constants within the pool,
+//! - bucketing constants by size.
+
+use crate::ir::immediates::{IntoBytes, V128Imm};
+use crate::ir::Constant;
+use crate::HashMap;
+use alloc::collections::BTreeMap;
+use alloc::vec::Vec;
+use core::fmt;
+use core::iter::FromIterator;
+use core::slice::Iter;
+use core::str::{from_utf8, FromStr};
+use cranelift_entity::EntityRef;
+
+/// This type describes the actual constant data. Note that the bytes stored in this structure are
+/// expected to be in little-endian order; this is due to ease-of-use when interacting with
+/// WebAssembly values, which are [little-endian by design].
+///
+/// [little-endian by design]: https://github.com/WebAssembly/design/blob/master/Portability.md
+#[derive(Clone, Hash, Eq, PartialEq, Debug, Default)]
+pub struct ConstantData(Vec<u8>);
+
+impl FromIterator<u8> for ConstantData {
+ fn from_iter<T: IntoIterator<Item = u8>>(iter: T) -> Self {
+ let v = iter.into_iter().collect();
+ Self(v)
+ }
+}
+
+impl From<Vec<u8>> for ConstantData {
+ fn from(v: Vec<u8>) -> Self {
+ Self(v)
+ }
+}
+
+impl From<&[u8]> for ConstantData {
+ fn from(v: &[u8]) -> Self {
+ Self(v.to_vec())
+ }
+}
+
+impl From<V128Imm> for ConstantData {
+ fn from(v: V128Imm) -> Self {
+ Self(v.to_vec())
+ }
+}
+
+impl ConstantData {
+ /// Return the number of bytes in the constant.
+ pub fn len(&self) -> usize {
+ self.0.len()
+ }
+
+ /// Check if the constant contains any bytes.
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ /// Return the data as a slice.
+ pub fn as_slice(&self) -> &[u8] {
+ self.0.as_slice()
+ }
+
+ /// Convert the data to a vector.
+ pub fn into_vec(self) -> Vec<u8> {
+ self.0
+ }
+
+ /// Iterate over the constant's bytes.
+ pub fn iter(&self) -> Iter<u8> {
+ self.0.iter()
+ }
+
+ /// Add new bytes to the constant data.
+ pub fn append(mut self, bytes: impl IntoBytes) -> Self {
+ let mut to_add = bytes.into_bytes();
+ self.0.append(&mut to_add);
+ self
+ }
+
+ /// Expand the size of the constant data to `expected_size` number of bytes by adding zeroes
+ /// in the high-order byte slots.
+ pub fn expand_to(mut self, expected_size: usize) -> Self {
+ if self.len() > expected_size {
+ panic!(
+ "The constant data is already expanded beyond {} bytes",
+ expected_size
+ )
+ }
+ self.0.resize(expected_size, 0);
+ self
+ }
+}
+
+impl fmt::Display for ConstantData {
+ /// Print the constant data in hexadecimal format, e.g. 0x000102030405060708090a0b0c0d0e0f.
+ /// This function will flip the stored order of bytes--little-endian--to the more readable
+ /// big-endian ordering.
+ ///
+ /// ```
+ /// use cranelift_codegen::ir::ConstantData;
+ /// let data = ConstantData::from([3, 2, 1, 0, 0].as_ref()); // note the little-endian order
+ /// assert_eq!(data.to_string(), "0x0000010203");
+ /// ```
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ if !self.is_empty() {
+ write!(f, "0x")?;
+ for b in self.0.iter().rev() {
+ write!(f, "{:02x}", b)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl FromStr for ConstantData {
+ type Err = &'static str;
+
+ /// Parse a hexadecimal string to `ConstantData`. This is the inverse of `Display::fmt`.
+ ///
+ /// ```
+ /// use cranelift_codegen::ir::ConstantData;
+ /// let c: ConstantData = "0x000102".parse().unwrap();
+ /// assert_eq!(c.into_vec(), [2, 1, 0]);
+ /// ```
+ fn from_str(s: &str) -> Result<Self, &'static str> {
+ if s.len() <= 2 || &s[0..2] != "0x" {
+ return Err("Expected a hexadecimal string, e.g. 0x1234");
+ }
+
+ // clean and check the string
+ let cleaned: Vec<u8> = s[2..]
+ .as_bytes()
+ .iter()
+ .filter(|&&b| b as char != '_')
+ .cloned()
+ .collect(); // remove 0x prefix and any intervening _ characters
+
+ if cleaned.is_empty() {
+ Err("Hexadecimal string must have some digits")
+ } else if cleaned.len() % 2 != 0 {
+ Err("Hexadecimal string must have an even number of digits")
+ } else if cleaned.len() > 32 {
+ Err("Hexadecimal string has too many digits to fit in a 128-bit vector")
+ } else {
+ let mut buffer = Vec::with_capacity((s.len() - 2) / 2);
+ for i in (0..cleaned.len()).step_by(2) {
+ let pair = from_utf8(&cleaned[i..i + 2])
+ .or_else(|_| Err("Unable to parse hexadecimal pair as UTF-8"))?;
+ let byte = u8::from_str_radix(pair, 16)
+ .or_else(|_| Err("Unable to parse as hexadecimal"))?;
+ buffer.insert(0, byte);
+ }
+ Ok(Self(buffer))
+ }
+ }
+}
+
+/// This type describes an offset in bytes within a constant pool.
+pub type ConstantOffset = u32;
+
+/// Inner type for storing data and offset together in the constant pool. The offset is optional
+/// because it must be set relative to the function code size (i.e. constants are emitted after the
+/// function body); because the function is not yet compiled when constants are inserted,
+/// [`set_offset`](crate::ir::ConstantPool::set_offset) must be called once a constant's offset
+/// from the beginning of the function is known (see
+/// `relaxation` in `relaxation.rs`).
+#[derive(Clone)]
+pub struct ConstantPoolEntry {
+ data: ConstantData,
+ offset: Option<ConstantOffset>,
+}
+
+impl ConstantPoolEntry {
+ fn new(data: ConstantData) -> Self {
+ Self { data, offset: None }
+ }
+
+ /// Return the size of the constant at this entry.
+ pub fn len(&self) -> usize {
+ self.data.len()
+ }
+
+ /// Assign a new offset to the constant at this entry.
+ pub fn set_offset(&mut self, offset: ConstantOffset) {
+ self.offset = Some(offset)
+ }
+}
+
+/// Maintains the mapping between a constant handle (i.e. [`Constant`](crate::ir::Constant)) and
+/// its constant data (i.e. [`ConstantData`](crate::ir::ConstantData)).
+#[derive(Clone)]
+pub struct ConstantPool {
+ /// This mapping maintains the insertion order as long as Constants are created with
+ /// sequentially increasing integers.
+ handles_to_values: BTreeMap<Constant, ConstantPoolEntry>,
+
+ /// This mapping is unordered (no need for lexicographic ordering) but allows us to map
+ /// constant data back to handles.
+ values_to_handles: HashMap<ConstantData, Constant>,
+}
+
+impl ConstantPool {
+ /// Create a new constant pool instance.
+ pub fn new() -> Self {
+ Self {
+ handles_to_values: BTreeMap::new(),
+ values_to_handles: HashMap::new(),
+ }
+ }
+
+ /// Empty the constant pool of all data.
+ pub fn clear(&mut self) {
+ self.handles_to_values.clear();
+ self.values_to_handles.clear();
+ }
+
+ /// Insert constant data into the pool, returning a handle for later referencing; when constant
+ /// data is inserted that is a duplicate of previous constant data, the existing handle will be
+ /// returned.
+ pub fn insert(&mut self, constant_value: ConstantData) -> Constant {
+ if self.values_to_handles.contains_key(&constant_value) {
+ *self.values_to_handles.get(&constant_value).unwrap()
+ } else {
+ let constant_handle = Constant::new(self.len());
+ self.set(constant_handle, constant_value);
+ constant_handle
+ }
+ }
+
+ /// Retrieve the constant data given a handle.
+ pub fn get(&self, constant_handle: Constant) -> &ConstantData {
+ assert!(self.handles_to_values.contains_key(&constant_handle));
+ &self.handles_to_values.get(&constant_handle).unwrap().data
+ }
+
+ /// Link a constant handle to its value. This does not de-duplicate data but does avoid
+ /// replacing any existing constant values. use `set` to tie a specific `const42` to its value;
+ /// use `insert` to add a value and return the next available `const` entity.
+ pub fn set(&mut self, constant_handle: Constant, constant_value: ConstantData) {
+ let replaced = self.handles_to_values.insert(
+ constant_handle,
+ ConstantPoolEntry::new(constant_value.clone()),
+ );
+ assert!(
+ replaced.is_none(),
+ "attempted to overwrite an existing constant {:?}: {:?} => {:?}",
+ constant_handle,
+ &constant_value,
+ replaced.unwrap().data
+ );
+ self.values_to_handles
+ .insert(constant_value, constant_handle);
+ }
+
+ /// Assign an offset to a given constant, where the offset is the number of bytes from the
+ /// beginning of the function to the beginning of the constant data inside the pool.
+ pub fn set_offset(&mut self, constant_handle: Constant, constant_offset: ConstantOffset) {
+ assert!(
+ self.handles_to_values.contains_key(&constant_handle),
+ "A constant handle must have already been inserted into the pool; perhaps a \
+ constant pool was created outside of the pool?"
+ );
+ self.handles_to_values
+ .entry(constant_handle)
+ .and_modify(|e| e.offset = Some(constant_offset));
+ }
+
+ /// Retrieve the offset of a given constant, where the offset is the number of bytes from the
+ /// beginning of the function to the beginning of the constant data inside the pool.
+ pub fn get_offset(&self, constant_handle: Constant) -> ConstantOffset {
+ self.handles_to_values
+ .get(&constant_handle)
+ .expect(
+ "A constant handle must have a corresponding constant value; was a constant \
+ handle created outside of the pool?",
+ )
+ .offset
+ .expect(
+ "A constant offset has not yet been set; verify that `set_offset` has been \
+ called before this point",
+ )
+ }
+
+ /// Iterate over the constants in insertion order.
+ pub fn iter(&self) -> impl Iterator<Item = (&Constant, &ConstantData)> {
+ self.handles_to_values.iter().map(|(h, e)| (h, &e.data))
+ }
+
+ /// Iterate over mutable entries in the constant pool in insertion order.
+ pub fn entries_mut(&mut self) -> impl Iterator<Item = &mut ConstantPoolEntry> {
+ self.handles_to_values.values_mut()
+ }
+
+ /// Return the number of constants in the pool.
+ pub fn len(&self) -> usize {
+ self.handles_to_values.len()
+ }
+
+ /// Return the combined size of all of the constant values in the pool.
+ pub fn byte_size(&self) -> usize {
+ self.values_to_handles.keys().map(|c| c.len()).sum()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::string::ToString;
+
+ #[test]
+ fn empty() {
+ let sut = ConstantPool::new();
+ assert_eq!(sut.len(), 0);
+ }
+
+ #[test]
+ fn insert() {
+ let mut sut = ConstantPool::new();
+ sut.insert(vec![1, 2, 3].into());
+ sut.insert(vec![4, 5, 6].into());
+ assert_eq!(sut.len(), 2);
+ }
+
+ #[test]
+ fn insert_duplicate() {
+ let mut sut = ConstantPool::new();
+ let a = sut.insert(vec![1, 2, 3].into());
+ sut.insert(vec![4, 5, 6].into());
+ let b = sut.insert(vec![1, 2, 3].into());
+ assert_eq!(a, b);
+ }
+
+ #[test]
+ fn clear() {
+ let mut sut = ConstantPool::new();
+ sut.insert(vec![1, 2, 3].into());
+ assert_eq!(sut.len(), 1);
+
+ sut.clear();
+ assert_eq!(sut.len(), 0);
+ }
+
+ #[test]
+ fn iteration_order() {
+ let mut sut = ConstantPool::new();
+ sut.insert(vec![1, 2, 3].into());
+ sut.insert(vec![4, 5, 6].into());
+ sut.insert(vec![1, 2, 3].into());
+ let data = sut.iter().map(|(_, v)| v).collect::<Vec<&ConstantData>>();
+ assert_eq!(data, vec![&vec![1, 2, 3].into(), &vec![4, 5, 6].into()]);
+ }
+
+ #[test]
+ fn get() {
+ let mut sut = ConstantPool::new();
+ let data = vec![1, 2, 3];
+ let handle = sut.insert(data.clone().into());
+ assert_eq!(sut.get(handle), &data.into());
+ }
+
+ #[test]
+ fn set() {
+ let mut sut = ConstantPool::new();
+ let handle = Constant::with_number(42).unwrap();
+ let data = vec![1, 2, 3];
+ sut.set(handle, data.clone().into());
+ assert_eq!(sut.get(handle), &data.into());
+ }
+
+ #[test]
+ #[should_panic]
+ fn disallow_overwriting_constant() {
+ let mut sut = ConstantPool::new();
+ let handle = Constant::with_number(42).unwrap();
+ sut.set(handle, vec![].into());
+ sut.set(handle, vec![1].into());
+ }
+
+ #[test]
+ #[should_panic]
+ fn get_nonexistent_constant() {
+ let sut = ConstantPool::new();
+ let a = Constant::with_number(42).unwrap();
+ sut.get(a); // panics, only use constants returned by ConstantPool
+ }
+
+ #[test]
+ fn get_offset() {
+ let mut sut = ConstantPool::new();
+ let a = sut.insert(vec![1].into());
+ sut.set_offset(a, 42);
+ assert_eq!(sut.get_offset(a), 42)
+ }
+
+ #[test]
+ #[should_panic]
+ fn get_nonexistent_offset() {
+ let mut sut = ConstantPool::new();
+ let a = sut.insert(vec![1].into());
+ sut.get_offset(a); // panics, set_offset should have been called
+ }
+
+ #[test]
+ fn display_constant_data() {
+ assert_eq!(ConstantData::from([0].as_ref()).to_string(), "0x00");
+ assert_eq!(ConstantData::from([42].as_ref()).to_string(), "0x2a");
+ assert_eq!(
+ ConstantData::from([3, 2, 1, 0].as_ref()).to_string(),
+ "0x00010203"
+ );
+ assert_eq!(
+ ConstantData::from(3735928559u32.to_le_bytes().as_ref()).to_string(),
+ "0xdeadbeef"
+ );
+ assert_eq!(
+ ConstantData::from(0x0102030405060708u64.to_le_bytes().as_ref()).to_string(),
+ "0x0102030405060708"
+ );
+ }
+
+ #[test]
+ fn iterate_over_constant_data() {
+ let c = ConstantData::from([1, 2, 3].as_ref());
+ let mut iter = c.iter();
+ assert_eq!(iter.next(), Some(&1));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.next(), Some(&3));
+ assert_eq!(iter.next(), None);
+ }
+
+ #[test]
+ fn add_to_constant_data() {
+ let d = ConstantData::from([1, 2].as_ref());
+ let e = d.append(i16::from(3u8));
+ assert_eq!(e.into_vec(), vec![1, 2, 3, 0])
+ }
+
+ #[test]
+ fn extend_constant_data() {
+ let d = ConstantData::from([1, 2].as_ref());
+ assert_eq!(d.expand_to(4).into_vec(), vec![1, 2, 0, 0])
+ }
+
+ #[test]
+ #[should_panic]
+ fn extend_constant_data_to_invalid_length() {
+ ConstantData::from([1, 2].as_ref()).expand_to(1);
+ }
+
+ #[test]
+ fn parse_constant_data_and_restringify() {
+ // Verify that parsing of `from` succeeds and stringifies to `to`.
+ fn parse_ok(from: &str, to: &str) {
+ let parsed = from.parse::<ConstantData>().unwrap();
+ assert_eq!(parsed.to_string(), to);
+ }
+
+ // Verify that parsing of `from` fails with `error_msg`.
+ fn parse_err(from: &str, error_msg: &str) {
+ let parsed = from.parse::<ConstantData>();
+ assert!(
+ parsed.is_err(),
+ "Expected a parse error but parsing succeeded: {}",
+ from
+ );
+ assert_eq!(parsed.err().unwrap(), error_msg);
+ }
+
+ parse_ok("0x00", "0x00");
+ parse_ok("0x00000042", "0x00000042");
+ parse_ok(
+ "0x0102030405060708090a0b0c0d0e0f00",
+ "0x0102030405060708090a0b0c0d0e0f00",
+ );
+ parse_ok("0x_0000_0043_21", "0x0000004321");
+
+ parse_err("", "Expected a hexadecimal string, e.g. 0x1234");
+ parse_err("0x", "Expected a hexadecimal string, e.g. 0x1234");
+ parse_err(
+ "0x042",
+ "Hexadecimal string must have an even number of digits",
+ );
+ parse_err(
+ "0x00000000000000000000000000000000000000000000000000",
+ "Hexadecimal string has too many digits to fit in a 128-bit vector",
+ );
+ parse_err("0xrstu", "Unable to parse as hexadecimal");
+ parse_err("0x__", "Hexadecimal string must have some digits");
+ }
+
+ #[test]
+ fn verify_stored_bytes_in_constant_data() {
+ assert_eq!("0x01".parse::<ConstantData>().unwrap().into_vec(), [1]);
+ assert_eq!(ConstantData::from([1, 0].as_ref()).0, [1, 0]);
+ assert_eq!(ConstantData::from(vec![1, 0, 0, 0]).0, [1, 0, 0, 0]);
+ }
+
+ #[test]
+ fn check_constant_data_endianness_as_uimm128() {
+ fn parse_to_uimm128(from: &str) -> Vec<u8> {
+ from.parse::<ConstantData>()
+ .unwrap()
+ .expand_to(16)
+ .into_vec()
+ }
+
+ assert_eq!(
+ parse_to_uimm128("0x42"),
+ [0x42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ );
+ assert_eq!(
+ parse_to_uimm128("0x00"),
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ );
+ assert_eq!(
+ parse_to_uimm128("0x12345678"),
+ [0x78, 0x56, 0x34, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ );
+ assert_eq!(
+ parse_to_uimm128("0x1234_5678"),
+ [0x78, 0x56, 0x34, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ );
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/dfg.rs b/third_party/rust/cranelift-codegen/src/ir/dfg.rs
new file mode 100644
index 0000000000..58d101aace
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/dfg.rs
@@ -0,0 +1,1314 @@
+//! Data flow graph tracking Instructions, Values, and blocks.
+
+use crate::entity::{self, PrimaryMap, SecondaryMap};
+use crate::ir;
+use crate::ir::builder::ReplaceBuilder;
+use crate::ir::extfunc::ExtFuncData;
+use crate::ir::instructions::{BranchInfo, CallInfo, InstructionData};
+use crate::ir::{types, ConstantData, ConstantPool, Immediate};
+use crate::ir::{
+ Block, FuncRef, Inst, SigRef, Signature, Type, Value, ValueLabelAssignments, ValueList,
+ ValueListPool,
+};
+use crate::isa::TargetIsa;
+use crate::packed_option::ReservedValue;
+use crate::write::write_operands;
+use crate::HashMap;
+use alloc::vec::Vec;
+use core::fmt;
+use core::iter;
+use core::mem;
+use core::ops::{Index, IndexMut};
+use core::u16;
+
+/// A data flow graph defines all instructions and basic blocks in a function as well as
+/// the data flow dependencies between them. The DFG also tracks values which can be either
+/// instruction results or block parameters.
+///
+/// The layout of blocks in the function and of instructions in each block is recorded by the
+/// `Layout` data structure which forms the other half of the function representation.
+///
+#[derive(Clone)]
+pub struct DataFlowGraph {
+ /// Data about all of the instructions in the function, including opcodes and operands.
+ /// The instructions in this map are not in program order. That is tracked by `Layout`, along
+ /// with the block containing each instruction.
+ insts: PrimaryMap<Inst, InstructionData>,
+
+ /// List of result values for each instruction.
+ ///
+ /// This map gets resized automatically by `make_inst()` so it is always in sync with the
+ /// primary `insts` map.
+ results: SecondaryMap<Inst, ValueList>,
+
+ /// basic blocks in the function and their parameters.
+ ///
+ /// This map is not in program order. That is handled by `Layout`, and so is the sequence of
+ /// instructions contained in each block.
+ blocks: PrimaryMap<Block, BlockData>,
+
+ /// Memory pool of value lists.
+ ///
+ /// The `ValueList` references into this pool appear in many places:
+ ///
+ /// - Instructions in `insts` that don't have room for their entire argument list inline.
+ /// - Instruction result values in `results`.
+ /// - block parameters in `blocks`.
+ pub value_lists: ValueListPool,
+
+ /// Primary value table with entries for all values.
+ values: PrimaryMap<Value, ValueData>,
+
+ /// Function signature table. These signatures are referenced by indirect call instructions as
+ /// well as the external function references.
+ pub signatures: PrimaryMap<SigRef, Signature>,
+
+ /// The pre-legalization signature for each entry in `signatures`, if any.
+ pub old_signatures: SecondaryMap<SigRef, Option<Signature>>,
+
+ /// External function references. These are functions that can be called directly.
+ pub ext_funcs: PrimaryMap<FuncRef, ExtFuncData>,
+
+ /// Saves Value labels.
+ pub values_labels: Option<HashMap<Value, ValueLabelAssignments>>,
+
+ /// Constants used within the function
+ pub constants: ConstantPool,
+
+ /// Stores large immediates that otherwise will not fit on InstructionData
+ pub immediates: PrimaryMap<Immediate, ConstantData>,
+}
+
+impl DataFlowGraph {
+ /// Create a new empty `DataFlowGraph`.
+ pub fn new() -> Self {
+ Self {
+ insts: PrimaryMap::new(),
+ results: SecondaryMap::new(),
+ blocks: PrimaryMap::new(),
+ value_lists: ValueListPool::new(),
+ values: PrimaryMap::new(),
+ signatures: PrimaryMap::new(),
+ old_signatures: SecondaryMap::new(),
+ ext_funcs: PrimaryMap::new(),
+ values_labels: None,
+ constants: ConstantPool::new(),
+ immediates: PrimaryMap::new(),
+ }
+ }
+
+ /// Clear everything.
+ pub fn clear(&mut self) {
+ self.insts.clear();
+ self.results.clear();
+ self.blocks.clear();
+ self.value_lists.clear();
+ self.values.clear();
+ self.signatures.clear();
+ self.old_signatures.clear();
+ self.ext_funcs.clear();
+ self.values_labels = None;
+ self.constants.clear();
+ self.immediates.clear();
+ }
+
+ /// Get the total number of instructions created in this function, whether they are currently
+ /// inserted in the layout or not.
+ ///
+ /// This is intended for use with `SecondaryMap::with_capacity`.
+ pub fn num_insts(&self) -> usize {
+ self.insts.len()
+ }
+
+ /// Returns `true` if the given instruction reference is valid.
+ pub fn inst_is_valid(&self, inst: Inst) -> bool {
+ self.insts.is_valid(inst)
+ }
+
+ /// Get the total number of basic blocks created in this function, whether they are
+ /// currently inserted in the layout or not.
+ ///
+ /// This is intended for use with `SecondaryMap::with_capacity`.
+ pub fn num_blocks(&self) -> usize {
+ self.blocks.len()
+ }
+
+ /// Returns `true` if the given block reference is valid.
+ pub fn block_is_valid(&self, block: Block) -> bool {
+ self.blocks.is_valid(block)
+ }
+
+ /// Get the total number of values.
+ pub fn num_values(&self) -> usize {
+ self.values.len()
+ }
+
+ /// Starts collection of debug information.
+ pub fn collect_debug_info(&mut self) {
+ if self.values_labels.is_none() {
+ self.values_labels = Some(HashMap::new());
+ }
+ }
+}
+
+/// Resolve value aliases.
+///
+/// Find the original SSA value that `value` aliases, or None if an
+/// alias cycle is detected.
+fn maybe_resolve_aliases(values: &PrimaryMap<Value, ValueData>, value: Value) -> Option<Value> {
+ let mut v = value;
+
+ // Note that values may be empty here.
+ for _ in 0..=values.len() {
+ if let ValueData::Alias { original, .. } = values[v] {
+ v = original;
+ } else {
+ return Some(v);
+ }
+ }
+
+ None
+}
+
+/// Resolve value aliases.
+///
+/// Find the original SSA value that `value` aliases.
+fn resolve_aliases(values: &PrimaryMap<Value, ValueData>, value: Value) -> Value {
+ if let Some(v) = maybe_resolve_aliases(values, value) {
+ v
+ } else {
+ panic!("Value alias loop detected for {}", value);
+ }
+}
+
+/// Iterator over all Values in a DFG
+pub struct Values<'a> {
+ inner: entity::Iter<'a, Value, ValueData>,
+}
+
+/// Check for non-values
+fn valid_valuedata(data: &ValueData) -> bool {
+ if let ValueData::Alias {
+ ty: types::INVALID,
+ original,
+ } = *data
+ {
+ if original == Value::reserved_value() {
+ return false;
+ }
+ }
+ true
+}
+
+impl<'a> Iterator for Values<'a> {
+ type Item = Value;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.inner
+ .by_ref()
+ .find(|kv| valid_valuedata(kv.1))
+ .map(|kv| kv.0)
+ }
+}
+
+/// Handling values.
+///
+/// Values are either block parameters or instruction results.
+impl DataFlowGraph {
+ /// Allocate an extended value entry.
+ fn make_value(&mut self, data: ValueData) -> Value {
+ self.values.push(data)
+ }
+
+ /// Get an iterator over all values.
+ pub fn values<'a>(&'a self) -> Values {
+ Values {
+ inner: self.values.iter(),
+ }
+ }
+
+ /// Check if a value reference is valid.
+ pub fn value_is_valid(&self, v: Value) -> bool {
+ self.values.is_valid(v)
+ }
+
+ /// Get the type of a value.
+ pub fn value_type(&self, v: Value) -> Type {
+ self.values[v].ty()
+ }
+
+ /// Get the definition of a value.
+ ///
+ /// This is either the instruction that defined it or the Block that has the value as an
+ /// parameter.
+ pub fn value_def(&self, v: Value) -> ValueDef {
+ match self.values[v] {
+ ValueData::Inst { inst, num, .. } => ValueDef::Result(inst, num as usize),
+ ValueData::Param { block, num, .. } => ValueDef::Param(block, num as usize),
+ ValueData::Alias { original, .. } => {
+ // Make sure we only recurse one level. `resolve_aliases` has safeguards to
+ // detect alias loops without overrunning the stack.
+ self.value_def(self.resolve_aliases(original))
+ }
+ }
+ }
+
+ /// Determine if `v` is an attached instruction result / block parameter.
+ ///
+ /// An attached value can't be attached to something else without first being detached.
+ ///
+ /// Value aliases are not considered to be attached to anything. Use `resolve_aliases()` to
+ /// determine if the original aliased value is attached.
+ pub fn value_is_attached(&self, v: Value) -> bool {
+ use self::ValueData::*;
+ match self.values[v] {
+ Inst { inst, num, .. } => Some(&v) == self.inst_results(inst).get(num as usize),
+ Param { block, num, .. } => Some(&v) == self.block_params(block).get(num as usize),
+ Alias { .. } => false,
+ }
+ }
+
+ /// Resolve value aliases.
+ ///
+ /// Find the original SSA value that `value` aliases.
+ pub fn resolve_aliases(&self, value: Value) -> Value {
+ resolve_aliases(&self.values, value)
+ }
+
+ /// Resolve all aliases among inst's arguments.
+ ///
+ /// For each argument of inst which is defined by an alias, replace the
+ /// alias with the aliased value.
+ pub fn resolve_aliases_in_arguments(&mut self, inst: Inst) {
+ for arg in self.insts[inst].arguments_mut(&mut self.value_lists) {
+ let resolved = resolve_aliases(&self.values, *arg);
+ if resolved != *arg {
+ *arg = resolved;
+ }
+ }
+ }
+
+ /// Turn a value into an alias of another.
+ ///
+ /// Change the `dest` value to behave as an alias of `src`. This means that all uses of `dest`
+ /// will behave as if they used that value `src`.
+ ///
+ /// The `dest` value can't be attached to an instruction or block.
+ pub fn change_to_alias(&mut self, dest: Value, src: Value) {
+ debug_assert!(!self.value_is_attached(dest));
+ // Try to create short alias chains by finding the original source value.
+ // This also avoids the creation of loops.
+ let original = self.resolve_aliases(src);
+ debug_assert_ne!(
+ dest, original,
+ "Aliasing {} to {} would create a loop",
+ dest, src
+ );
+ let ty = self.value_type(original);
+ debug_assert_eq!(
+ self.value_type(dest),
+ ty,
+ "Aliasing {} to {} would change its type {} to {}",
+ dest,
+ src,
+ self.value_type(dest),
+ ty
+ );
+ debug_assert_ne!(ty, types::INVALID);
+
+ self.values[dest] = ValueData::Alias { ty, original };
+ }
+
+ /// Replace the results of one instruction with aliases to the results of another.
+ ///
+ /// Change all the results of `dest_inst` to behave as aliases of
+ /// corresponding results of `src_inst`, as if calling change_to_alias for
+ /// each.
+ ///
+ /// After calling this instruction, `dest_inst` will have had its results
+ /// cleared, so it likely needs to be removed from the graph.
+ ///
+ pub fn replace_with_aliases(&mut self, dest_inst: Inst, src_inst: Inst) {
+ debug_assert_ne!(
+ dest_inst, src_inst,
+ "Replacing {} with itself would create a loop",
+ dest_inst
+ );
+ debug_assert_eq!(
+ self.results[dest_inst].len(&self.value_lists),
+ self.results[src_inst].len(&self.value_lists),
+ "Replacing {} with {} would produce a different number of results.",
+ dest_inst,
+ src_inst
+ );
+
+ for (&dest, &src) in self.results[dest_inst]
+ .as_slice(&self.value_lists)
+ .iter()
+ .zip(self.results[src_inst].as_slice(&self.value_lists))
+ {
+ let original = src;
+ let ty = self.value_type(original);
+ debug_assert_eq!(
+ self.value_type(dest),
+ ty,
+ "Aliasing {} to {} would change its type {} to {}",
+ dest,
+ src,
+ self.value_type(dest),
+ ty
+ );
+ debug_assert_ne!(ty, types::INVALID);
+
+ self.values[dest] = ValueData::Alias { ty, original };
+ }
+
+ self.clear_results(dest_inst);
+ }
+}
+
+/// Where did a value come from?
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum ValueDef {
+ /// Value is the n'th result of an instruction.
+ Result(Inst, usize),
+ /// Value is the n'th parameter to a block.
+ Param(Block, usize),
+}
+
+impl ValueDef {
+ /// Unwrap the instruction where the value was defined, or panic.
+ pub fn unwrap_inst(&self) -> Inst {
+ self.inst().expect("Value is not an instruction result")
+ }
+
+ /// Get the instruction where the value was defined, if any.
+ pub fn inst(&self) -> Option<Inst> {
+ match *self {
+ Self::Result(inst, _) => Some(inst),
+ _ => None,
+ }
+ }
+
+ /// Unwrap the block there the parameter is defined, or panic.
+ pub fn unwrap_block(&self) -> Block {
+ match *self {
+ Self::Param(block, _) => block,
+ _ => panic!("Value is not a block parameter"),
+ }
+ }
+
+ /// Get the program point where the value was defined.
+ pub fn pp(self) -> ir::ExpandedProgramPoint {
+ self.into()
+ }
+
+ /// Get the number component of this definition.
+ ///
+ /// When multiple values are defined at the same program point, this indicates the index of
+ /// this value.
+ pub fn num(self) -> usize {
+ match self {
+ Self::Result(_, n) | Self::Param(_, n) => n,
+ }
+ }
+}
+
+/// Internal table storage for extended values.
+#[derive(Clone, Debug)]
+enum ValueData {
+ /// Value is defined by an instruction.
+ Inst { ty: Type, num: u16, inst: Inst },
+
+ /// Value is a block parameter.
+ Param { ty: Type, num: u16, block: Block },
+
+ /// Value is an alias of another value.
+ /// An alias value can't be linked as an instruction result or block parameter. It is used as a
+ /// placeholder when the original instruction or block has been rewritten or modified.
+ Alias { ty: Type, original: Value },
+}
+
+impl ValueData {
+ fn ty(&self) -> Type {
+ match *self {
+ ValueData::Inst { ty, .. }
+ | ValueData::Param { ty, .. }
+ | ValueData::Alias { ty, .. } => ty,
+ }
+ }
+}
+
+/// Instructions.
+///
+impl DataFlowGraph {
+ /// Create a new instruction.
+ ///
+ /// The type of the first result is indicated by `data.ty`. If the instruction produces
+ /// multiple results, also call `make_inst_results` to allocate value table entries.
+ pub fn make_inst(&mut self, data: InstructionData) -> Inst {
+ let n = self.num_insts() + 1;
+ self.results.resize(n);
+ self.insts.push(data)
+ }
+
+ /// Returns an object that displays `inst`.
+ pub fn display_inst<'a, I: Into<Option<&'a dyn TargetIsa>>>(
+ &'a self,
+ inst: Inst,
+ isa: I,
+ ) -> DisplayInst<'a> {
+ DisplayInst(self, isa.into(), inst)
+ }
+
+ /// Get all value arguments on `inst` as a slice.
+ pub fn inst_args(&self, inst: Inst) -> &[Value] {
+ self.insts[inst].arguments(&self.value_lists)
+ }
+
+ /// Get all value arguments on `inst` as a mutable slice.
+ pub fn inst_args_mut(&mut self, inst: Inst) -> &mut [Value] {
+ self.insts[inst].arguments_mut(&mut self.value_lists)
+ }
+
+ /// Get the fixed value arguments on `inst` as a slice.
+ pub fn inst_fixed_args(&self, inst: Inst) -> &[Value] {
+ let num_fixed_args = self[inst]
+ .opcode()
+ .constraints()
+ .num_fixed_value_arguments();
+ &self.inst_args(inst)[..num_fixed_args]
+ }
+
+ /// Get the fixed value arguments on `inst` as a mutable slice.
+ pub fn inst_fixed_args_mut(&mut self, inst: Inst) -> &mut [Value] {
+ let num_fixed_args = self[inst]
+ .opcode()
+ .constraints()
+ .num_fixed_value_arguments();
+ &mut self.inst_args_mut(inst)[..num_fixed_args]
+ }
+
+ /// Get the variable value arguments on `inst` as a slice.
+ pub fn inst_variable_args(&self, inst: Inst) -> &[Value] {
+ let num_fixed_args = self[inst]
+ .opcode()
+ .constraints()
+ .num_fixed_value_arguments();
+ &self.inst_args(inst)[num_fixed_args..]
+ }
+
+ /// Get the variable value arguments on `inst` as a mutable slice.
+ pub fn inst_variable_args_mut(&mut self, inst: Inst) -> &mut [Value] {
+ let num_fixed_args = self[inst]
+ .opcode()
+ .constraints()
+ .num_fixed_value_arguments();
+ &mut self.inst_args_mut(inst)[num_fixed_args..]
+ }
+
+ /// Create result values for an instruction that produces multiple results.
+ ///
+ /// Instructions that produce no result values only need to be created with `make_inst`,
+ /// otherwise call `make_inst_results` to allocate value table entries for the results.
+ ///
+ /// The result value types are determined from the instruction's value type constraints and the
+ /// provided `ctrl_typevar` type for polymorphic instructions. For non-polymorphic
+ /// instructions, `ctrl_typevar` is ignored, and `INVALID` can be used.
+ ///
+ /// The type of the first result value is also set, even if it was already set in the
+ /// `InstructionData` passed to `make_inst`. If this function is called with a single-result
+ /// instruction, that is the only effect.
+ pub fn make_inst_results(&mut self, inst: Inst, ctrl_typevar: Type) -> usize {
+ self.make_inst_results_reusing(inst, ctrl_typevar, iter::empty())
+ }
+
+ /// Create result values for `inst`, reusing the provided detached values.
+ ///
+ /// Create a new set of result values for `inst` using `ctrl_typevar` to determine the result
+ /// types. Any values provided by `reuse` will be reused. When `reuse` is exhausted or when it
+ /// produces `None`, a new value is created.
+ pub fn make_inst_results_reusing<I>(
+ &mut self,
+ inst: Inst,
+ ctrl_typevar: Type,
+ reuse: I,
+ ) -> usize
+ where
+ I: Iterator<Item = Option<Value>>,
+ {
+ let mut reuse = reuse.fuse();
+
+ self.results[inst].clear(&mut self.value_lists);
+
+ // Get the call signature if this is a function call.
+ if let Some(sig) = self.call_signature(inst) {
+ // Create result values corresponding to the call return types.
+ debug_assert_eq!(
+ self.insts[inst].opcode().constraints().num_fixed_results(),
+ 0
+ );
+ let num_results = self.signatures[sig].returns.len();
+ for res_idx in 0..num_results {
+ let ty = self.signatures[sig].returns[res_idx].value_type;
+ if let Some(Some(v)) = reuse.next() {
+ debug_assert_eq!(self.value_type(v), ty, "Reused {} is wrong type", ty);
+ self.attach_result(inst, v);
+ } else {
+ self.append_result(inst, ty);
+ }
+ }
+ num_results
+ } else {
+ // Create result values corresponding to the opcode's constraints.
+ let constraints = self.insts[inst].opcode().constraints();
+ let num_results = constraints.num_fixed_results();
+ for res_idx in 0..num_results {
+ let ty = constraints.result_type(res_idx, ctrl_typevar);
+ if let Some(Some(v)) = reuse.next() {
+ debug_assert_eq!(self.value_type(v), ty, "Reused {} is wrong type", ty);
+ self.attach_result(inst, v);
+ } else {
+ self.append_result(inst, ty);
+ }
+ }
+ num_results
+ }
+ }
+
+ /// Create a `ReplaceBuilder` that will replace `inst` with a new instruction in place.
+ pub fn replace(&mut self, inst: Inst) -> ReplaceBuilder {
+ ReplaceBuilder::new(self, inst)
+ }
+
+ /// Detach the list of result values from `inst` and return it.
+ ///
+ /// This leaves `inst` without any result values. New result values can be created by calling
+ /// `make_inst_results` or by using a `replace(inst)` builder.
+ pub fn detach_results(&mut self, inst: Inst) -> ValueList {
+ self.results[inst].take()
+ }
+
+ /// Clear the list of result values from `inst`.
+ ///
+ /// This leaves `inst` without any result values. New result values can be created by calling
+ /// `make_inst_results` or by using a `replace(inst)` builder.
+ pub fn clear_results(&mut self, inst: Inst) {
+ self.results[inst].clear(&mut self.value_lists)
+ }
+
+ /// Attach an existing value to the result value list for `inst`.
+ ///
+ /// The `res` value is appended to the end of the result list.
+ ///
+ /// This is a very low-level operation. Usually, instruction results with the correct types are
+ /// created automatically. The `res` value must not be attached to anything else.
+ pub fn attach_result(&mut self, inst: Inst, res: Value) {
+ debug_assert!(!self.value_is_attached(res));
+ let num = self.results[inst].push(res, &mut self.value_lists);
+ debug_assert!(num <= u16::MAX as usize, "Too many result values");
+ let ty = self.value_type(res);
+ self.values[res] = ValueData::Inst {
+ ty,
+ num: num as u16,
+ inst,
+ };
+ }
+
+ /// Replace an instruction result with a new value of type `new_type`.
+ ///
+ /// The `old_value` must be an attached instruction result.
+ ///
+ /// The old value is left detached, so it should probably be changed into something else.
+ ///
+ /// Returns the new value.
+ pub fn replace_result(&mut self, old_value: Value, new_type: Type) -> Value {
+ let (num, inst) = match self.values[old_value] {
+ ValueData::Inst { num, inst, .. } => (num, inst),
+ _ => panic!("{} is not an instruction result value", old_value),
+ };
+ let new_value = self.make_value(ValueData::Inst {
+ ty: new_type,
+ num,
+ inst,
+ });
+ let num = num as usize;
+ let attached = mem::replace(
+ self.results[inst]
+ .get_mut(num, &mut self.value_lists)
+ .expect("Replacing detached result"),
+ new_value,
+ );
+ debug_assert_eq!(
+ attached,
+ old_value,
+ "{} wasn't detached from {}",
+ old_value,
+ self.display_inst(inst, None)
+ );
+ new_value
+ }
+
+ /// Append a new instruction result value to `inst`.
+ pub fn append_result(&mut self, inst: Inst, ty: Type) -> Value {
+ let res = self.values.next_key();
+ let num = self.results[inst].push(res, &mut self.value_lists);
+ debug_assert!(num <= u16::MAX as usize, "Too many result values");
+ self.make_value(ValueData::Inst {
+ ty,
+ inst,
+ num: num as u16,
+ })
+ }
+
+ /// Append a new value argument to an instruction.
+ ///
+ /// Panics if the instruction doesn't support arguments.
+ pub fn append_inst_arg(&mut self, inst: Inst, new_arg: Value) {
+ let mut branch_values = self.insts[inst]
+ .take_value_list()
+ .expect("the instruction doesn't have value arguments");
+ branch_values.push(new_arg, &mut self.value_lists);
+ self.insts[inst].put_value_list(branch_values)
+ }
+
+ /// Get the first result of an instruction.
+ ///
+ /// This function panics if the instruction doesn't have any result.
+ pub fn first_result(&self, inst: Inst) -> Value {
+ self.results[inst]
+ .first(&self.value_lists)
+ .expect("Instruction has no results")
+ }
+
+ /// Test if `inst` has any result values currently.
+ pub fn has_results(&self, inst: Inst) -> bool {
+ !self.results[inst].is_empty()
+ }
+
+ /// Return all the results of an instruction.
+ pub fn inst_results(&self, inst: Inst) -> &[Value] {
+ self.results[inst].as_slice(&self.value_lists)
+ }
+
+ /// Get the call signature of a direct or indirect call instruction.
+ /// Returns `None` if `inst` is not a call instruction.
+ pub fn call_signature(&self, inst: Inst) -> Option<SigRef> {
+ match self.insts[inst].analyze_call(&self.value_lists) {
+ CallInfo::NotACall => None,
+ CallInfo::Direct(f, _) => Some(self.ext_funcs[f].signature),
+ CallInfo::Indirect(s, _) => Some(s),
+ }
+ }
+
+ /// Check if `inst` is a branch.
+ pub fn analyze_branch(&self, inst: Inst) -> BranchInfo {
+ self.insts[inst].analyze_branch(&self.value_lists)
+ }
+
+ /// Compute the type of an instruction result from opcode constraints and call signatures.
+ ///
+ /// This computes the same sequence of result types that `make_inst_results()` above would
+ /// assign to the created result values, but it does not depend on `make_inst_results()` being
+ /// called first.
+ ///
+ /// Returns `None` if asked about a result index that is too large.
+ pub fn compute_result_type(
+ &self,
+ inst: Inst,
+ result_idx: usize,
+ ctrl_typevar: Type,
+ ) -> Option<Type> {
+ let constraints = self.insts[inst].opcode().constraints();
+ let num_fixed_results = constraints.num_fixed_results();
+
+ if result_idx < num_fixed_results {
+ return Some(constraints.result_type(result_idx, ctrl_typevar));
+ }
+
+ // Not a fixed result, try to extract a return type from the call signature.
+ self.call_signature(inst).and_then(|sigref| {
+ self.signatures[sigref]
+ .returns
+ .get(result_idx - num_fixed_results)
+ .map(|&arg| arg.value_type)
+ })
+ }
+
+ /// Get the controlling type variable, or `INVALID` if `inst` isn't polymorphic.
+ pub fn ctrl_typevar(&self, inst: Inst) -> Type {
+ let constraints = self[inst].opcode().constraints();
+
+ if !constraints.is_polymorphic() {
+ types::INVALID
+ } else if constraints.requires_typevar_operand() {
+ // Not all instruction formats have a designated operand, but in that case
+ // `requires_typevar_operand()` should never be true.
+ self.value_type(
+ self[inst]
+ .typevar_operand(&self.value_lists)
+ .expect("Instruction format doesn't have a designated operand, bad opcode."),
+ )
+ } else {
+ self.value_type(self.first_result(inst))
+ }
+ }
+}
+
+/// Allow immutable access to instructions via indexing.
+impl Index<Inst> for DataFlowGraph {
+ type Output = InstructionData;
+
+ fn index(&self, inst: Inst) -> &InstructionData {
+ &self.insts[inst]
+ }
+}
+
+/// Allow mutable access to instructions via indexing.
+impl IndexMut<Inst> for DataFlowGraph {
+ fn index_mut(&mut self, inst: Inst) -> &mut InstructionData {
+ &mut self.insts[inst]
+ }
+}
+
+/// basic blocks.
+impl DataFlowGraph {
+ /// Create a new basic block.
+ pub fn make_block(&mut self) -> Block {
+ self.blocks.push(BlockData::new())
+ }
+
+ /// Get the number of parameters on `block`.
+ pub fn num_block_params(&self, block: Block) -> usize {
+ self.blocks[block].params.len(&self.value_lists)
+ }
+
+ /// Get the parameters on `block`.
+ pub fn block_params(&self, block: Block) -> &[Value] {
+ self.blocks[block].params.as_slice(&self.value_lists)
+ }
+
+ /// Get the types of the parameters on `block`.
+ pub fn block_param_types(&self, block: Block) -> Vec<Type> {
+ self.block_params(block)
+ .iter()
+ .map(|&v| self.value_type(v))
+ .collect()
+ }
+
+ /// Append a parameter with type `ty` to `block`.
+ pub fn append_block_param(&mut self, block: Block, ty: Type) -> Value {
+ let param = self.values.next_key();
+ let num = self.blocks[block].params.push(param, &mut self.value_lists);
+ debug_assert!(num <= u16::MAX as usize, "Too many parameters on block");
+ self.make_value(ValueData::Param {
+ ty,
+ num: num as u16,
+ block,
+ })
+ }
+
+ /// Removes `val` from `block`'s parameters by swapping it with the last parameter on `block`.
+ /// Returns the position of `val` before removal.
+ ///
+ /// *Important*: to ensure O(1) deletion, this method swaps the removed parameter with the
+ /// last `block` parameter. This can disrupt all the branch instructions jumping to this
+ /// `block` for which you have to change the branch argument order if necessary.
+ ///
+ /// Panics if `val` is not a block parameter.
+ pub fn swap_remove_block_param(&mut self, val: Value) -> usize {
+ let (block, num) = if let ValueData::Param { num, block, .. } = self.values[val] {
+ (block, num)
+ } else {
+ panic!("{} must be a block parameter", val);
+ };
+ self.blocks[block]
+ .params
+ .swap_remove(num as usize, &mut self.value_lists);
+ if let Some(last_arg_val) = self.blocks[block]
+ .params
+ .get(num as usize, &self.value_lists)
+ {
+ // We update the position of the old last arg.
+ if let ValueData::Param {
+ num: ref mut old_num,
+ ..
+ } = self.values[last_arg_val]
+ {
+ *old_num = num;
+ } else {
+ panic!("{} should be a Block parameter", last_arg_val);
+ }
+ }
+ num as usize
+ }
+
+ /// Removes `val` from `block`'s parameters by a standard linear time list removal which
+ /// preserves ordering. Also updates the values' data.
+ pub fn remove_block_param(&mut self, val: Value) {
+ let (block, num) = if let ValueData::Param { num, block, .. } = self.values[val] {
+ (block, num)
+ } else {
+ panic!("{} must be a block parameter", val);
+ };
+ self.blocks[block]
+ .params
+ .remove(num as usize, &mut self.value_lists);
+ for index in num..(self.num_block_params(block) as u16) {
+ match self.values[self.blocks[block]
+ .params
+ .get(index as usize, &self.value_lists)
+ .unwrap()]
+ {
+ ValueData::Param { ref mut num, .. } => {
+ *num -= 1;
+ }
+ _ => panic!(
+ "{} must be a block parameter",
+ self.blocks[block]
+ .params
+ .get(index as usize, &self.value_lists)
+ .unwrap()
+ ),
+ }
+ }
+ }
+
+ /// Append an existing value to `block`'s parameters.
+ ///
+ /// The appended value can't already be attached to something else.
+ ///
+ /// In almost all cases, you should be using `append_block_param()` instead of this method.
+ pub fn attach_block_param(&mut self, block: Block, param: Value) {
+ debug_assert!(!self.value_is_attached(param));
+ let num = self.blocks[block].params.push(param, &mut self.value_lists);
+ debug_assert!(num <= u16::MAX as usize, "Too many parameters on block");
+ let ty = self.value_type(param);
+ self.values[param] = ValueData::Param {
+ ty,
+ num: num as u16,
+ block,
+ };
+ }
+
+ /// Replace a block parameter with a new value of type `ty`.
+ ///
+ /// The `old_value` must be an attached block parameter. It is removed from its place in the list
+ /// of parameters and replaced by a new value of type `new_type`. The new value gets the same
+ /// position in the list, and other parameters are not disturbed.
+ ///
+ /// The old value is left detached, so it should probably be changed into something else.
+ ///
+ /// Returns the new value.
+ pub fn replace_block_param(&mut self, old_value: Value, new_type: Type) -> Value {
+ // Create new value identical to the old one except for the type.
+ let (block, num) = if let ValueData::Param { num, block, .. } = self.values[old_value] {
+ (block, num)
+ } else {
+ panic!("{} must be a block parameter", old_value);
+ };
+ let new_arg = self.make_value(ValueData::Param {
+ ty: new_type,
+ num,
+ block,
+ });
+
+ self.blocks[block]
+ .params
+ .as_mut_slice(&mut self.value_lists)[num as usize] = new_arg;
+ new_arg
+ }
+
+ /// Detach all the parameters from `block` and return them as a `ValueList`.
+ ///
+ /// This is a quite low-level operation. Sensible things to do with the detached block parameters
+ /// is to put them back on the same block with `attach_block_param()` or change them into aliases
+ /// with `change_to_alias()`.
+ pub fn detach_block_params(&mut self, block: Block) -> ValueList {
+ self.blocks[block].params.take()
+ }
+}
+
+/// Contents of a basic block.
+///
+/// Parameters on a basic block are values that dominate everything in the block. All
+/// branches to this block must provide matching arguments, and the arguments to the entry block must
+/// match the function arguments.
+#[derive(Clone)]
+struct BlockData {
+ /// List of parameters to this block.
+ params: ValueList,
+}
+
+impl BlockData {
+ fn new() -> Self {
+ Self {
+ params: ValueList::new(),
+ }
+ }
+}
+
+/// Object that can display an instruction.
+pub struct DisplayInst<'a>(&'a DataFlowGraph, Option<&'a dyn TargetIsa>, Inst);
+
+impl<'a> fmt::Display for DisplayInst<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let dfg = self.0;
+ let isa = self.1;
+ let inst = self.2;
+
+ if let Some((first, rest)) = dfg.inst_results(inst).split_first() {
+ write!(f, "{}", first)?;
+ for v in rest {
+ write!(f, ", {}", v)?;
+ }
+ write!(f, " = ")?;
+ }
+
+ let typevar = dfg.ctrl_typevar(inst);
+ if typevar.is_invalid() {
+ write!(f, "{}", dfg[inst].opcode())?;
+ } else {
+ write!(f, "{}.{}", dfg[inst].opcode(), typevar)?;
+ }
+ write_operands(f, dfg, isa, inst)
+ }
+}
+
+/// Parser routines. These routines should not be used outside the parser.
+impl DataFlowGraph {
+ /// Set the type of a value. This is only for use in the parser, which needs
+ /// to create invalid values for index padding which may be reassigned later.
+ #[cold]
+ fn set_value_type_for_parser(&mut self, v: Value, t: Type) {
+ assert_eq!(
+ self.value_type(v),
+ types::INVALID,
+ "this function is only for assigning types to previously invalid values"
+ );
+ match self.values[v] {
+ ValueData::Inst { ref mut ty, .. }
+ | ValueData::Param { ref mut ty, .. }
+ | ValueData::Alias { ref mut ty, .. } => *ty = t,
+ }
+ }
+
+ /// Create result values for `inst`, reusing the provided detached values.
+ /// This is similar to `make_inst_results_reusing` except it's only for use
+ /// in the parser, which needs to reuse previously invalid values.
+ #[cold]
+ pub fn make_inst_results_for_parser(
+ &mut self,
+ inst: Inst,
+ ctrl_typevar: Type,
+ reuse: &[Value],
+ ) -> usize {
+ // Get the call signature if this is a function call.
+ if let Some(sig) = self.call_signature(inst) {
+ assert_eq!(
+ self.insts[inst].opcode().constraints().num_fixed_results(),
+ 0
+ );
+ for res_idx in 0..self.signatures[sig].returns.len() {
+ let ty = self.signatures[sig].returns[res_idx].value_type;
+ if let Some(v) = reuse.get(res_idx) {
+ self.set_value_type_for_parser(*v, ty);
+ }
+ }
+ } else {
+ let constraints = self.insts[inst].opcode().constraints();
+ for res_idx in 0..constraints.num_fixed_results() {
+ let ty = constraints.result_type(res_idx, ctrl_typevar);
+ if let Some(v) = reuse.get(res_idx) {
+ self.set_value_type_for_parser(*v, ty);
+ }
+ }
+ }
+
+ self.make_inst_results_reusing(inst, ctrl_typevar, reuse.iter().map(|x| Some(*x)))
+ }
+
+ /// Similar to `append_block_param`, append a parameter with type `ty` to
+ /// `block`, but using value `val`. This is only for use by the parser to
+ /// create parameters with specific values.
+ #[cold]
+ pub fn append_block_param_for_parser(&mut self, block: Block, ty: Type, val: Value) {
+ let num = self.blocks[block].params.push(val, &mut self.value_lists);
+ assert!(num <= u16::MAX as usize, "Too many parameters on block");
+ self.values[val] = ValueData::Param {
+ ty,
+ num: num as u16,
+ block,
+ };
+ }
+
+ /// Create a new value alias. This is only for use by the parser to create
+ /// aliases with specific values, and the printer for testing.
+ #[cold]
+ pub fn make_value_alias_for_serialization(&mut self, src: Value, dest: Value) {
+ assert_ne!(src, Value::reserved_value());
+ assert_ne!(dest, Value::reserved_value());
+
+ let ty = if self.values.is_valid(src) {
+ self.value_type(src)
+ } else {
+ // As a special case, if we can't resolve the aliasee yet, use INVALID
+ // temporarily. It will be resolved later in parsing.
+ types::INVALID
+ };
+ let data = ValueData::Alias { ty, original: src };
+ self.values[dest] = data;
+ }
+
+ /// If `v` is already defined as an alias, return its destination value.
+ /// Otherwise return None. This allows the parser to coalesce identical
+ /// alias definitions, and the printer to identify an alias's immediate target.
+ #[cold]
+ pub fn value_alias_dest_for_serialization(&self, v: Value) -> Option<Value> {
+ if let ValueData::Alias { original, .. } = self.values[v] {
+ Some(original)
+ } else {
+ None
+ }
+ }
+
+ /// Compute the type of an alias. This is only for use in the parser.
+ /// Returns false if an alias cycle was encountered.
+ #[cold]
+ pub fn set_alias_type_for_parser(&mut self, v: Value) -> bool {
+ if let Some(resolved) = maybe_resolve_aliases(&self.values, v) {
+ let old_ty = self.value_type(v);
+ let new_ty = self.value_type(resolved);
+ if old_ty == types::INVALID {
+ self.set_value_type_for_parser(v, new_ty);
+ } else {
+ assert_eq!(old_ty, new_ty);
+ }
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Create an invalid value, to pad the index space. This is only for use by
+ /// the parser to pad out the value index space.
+ #[cold]
+ pub fn make_invalid_value_for_parser(&mut self) {
+ let data = ValueData::Alias {
+ ty: types::INVALID,
+ original: Value::reserved_value(),
+ };
+ self.make_value(data);
+ }
+
+ /// Check if a value reference is valid, while being aware of aliases which
+ /// may be unresolved while parsing.
+ #[cold]
+ pub fn value_is_valid_for_parser(&self, v: Value) -> bool {
+ if !self.value_is_valid(v) {
+ return false;
+ }
+ if let ValueData::Alias { ty, .. } = self.values[v] {
+ ty != types::INVALID
+ } else {
+ true
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::cursor::{Cursor, FuncCursor};
+ use crate::ir::types;
+ use crate::ir::{Function, InstructionData, Opcode, TrapCode};
+ use alloc::string::ToString;
+
+ #[test]
+ fn make_inst() {
+ let mut dfg = DataFlowGraph::new();
+
+ let idata = InstructionData::UnaryImm {
+ opcode: Opcode::Iconst,
+ imm: 0.into(),
+ };
+ let inst = dfg.make_inst(idata);
+
+ dfg.make_inst_results(inst, types::I32);
+ assert_eq!(inst.to_string(), "inst0");
+ assert_eq!(
+ dfg.display_inst(inst, None).to_string(),
+ "v0 = iconst.i32 0"
+ );
+
+ // Immutable reference resolution.
+ {
+ let immdfg = &dfg;
+ let ins = &immdfg[inst];
+ assert_eq!(ins.opcode(), Opcode::Iconst);
+ }
+
+ // Results.
+ let val = dfg.first_result(inst);
+ assert_eq!(dfg.inst_results(inst), &[val]);
+
+ assert_eq!(dfg.value_def(val), ValueDef::Result(inst, 0));
+ assert_eq!(dfg.value_type(val), types::I32);
+
+ // Replacing results.
+ assert!(dfg.value_is_attached(val));
+ let v2 = dfg.replace_result(val, types::F64);
+ assert!(!dfg.value_is_attached(val));
+ assert!(dfg.value_is_attached(v2));
+ assert_eq!(dfg.inst_results(inst), &[v2]);
+ assert_eq!(dfg.value_def(v2), ValueDef::Result(inst, 0));
+ assert_eq!(dfg.value_type(v2), types::F64);
+ }
+
+ #[test]
+ fn no_results() {
+ let mut dfg = DataFlowGraph::new();
+
+ let idata = InstructionData::Trap {
+ opcode: Opcode::Trap,
+ code: TrapCode::User(0),
+ };
+ let inst = dfg.make_inst(idata);
+ assert_eq!(dfg.display_inst(inst, None).to_string(), "trap user0");
+
+ // Result slice should be empty.
+ assert_eq!(dfg.inst_results(inst), &[]);
+ }
+
+ #[test]
+ fn block() {
+ let mut dfg = DataFlowGraph::new();
+
+ let block = dfg.make_block();
+ assert_eq!(block.to_string(), "block0");
+ assert_eq!(dfg.num_block_params(block), 0);
+ assert_eq!(dfg.block_params(block), &[]);
+ assert!(dfg.detach_block_params(block).is_empty());
+ assert_eq!(dfg.num_block_params(block), 0);
+ assert_eq!(dfg.block_params(block), &[]);
+
+ let arg1 = dfg.append_block_param(block, types::F32);
+ assert_eq!(arg1.to_string(), "v0");
+ assert_eq!(dfg.num_block_params(block), 1);
+ assert_eq!(dfg.block_params(block), &[arg1]);
+
+ let arg2 = dfg.append_block_param(block, types::I16);
+ assert_eq!(arg2.to_string(), "v1");
+ assert_eq!(dfg.num_block_params(block), 2);
+ assert_eq!(dfg.block_params(block), &[arg1, arg2]);
+
+ assert_eq!(dfg.value_def(arg1), ValueDef::Param(block, 0));
+ assert_eq!(dfg.value_def(arg2), ValueDef::Param(block, 1));
+ assert_eq!(dfg.value_type(arg1), types::F32);
+ assert_eq!(dfg.value_type(arg2), types::I16);
+
+ // Swap the two block parameters.
+ let vlist = dfg.detach_block_params(block);
+ assert_eq!(dfg.num_block_params(block), 0);
+ assert_eq!(dfg.block_params(block), &[]);
+ assert_eq!(vlist.as_slice(&dfg.value_lists), &[arg1, arg2]);
+ dfg.attach_block_param(block, arg2);
+ let arg3 = dfg.append_block_param(block, types::I32);
+ dfg.attach_block_param(block, arg1);
+ assert_eq!(dfg.block_params(block), &[arg2, arg3, arg1]);
+ }
+
+ #[test]
+ fn replace_block_params() {
+ let mut dfg = DataFlowGraph::new();
+
+ let block = dfg.make_block();
+ let arg1 = dfg.append_block_param(block, types::F32);
+
+ let new1 = dfg.replace_block_param(arg1, types::I64);
+ assert_eq!(dfg.value_type(arg1), types::F32);
+ assert_eq!(dfg.value_type(new1), types::I64);
+ assert_eq!(dfg.block_params(block), &[new1]);
+
+ dfg.attach_block_param(block, arg1);
+ assert_eq!(dfg.block_params(block), &[new1, arg1]);
+
+ let new2 = dfg.replace_block_param(arg1, types::I8);
+ assert_eq!(dfg.value_type(arg1), types::F32);
+ assert_eq!(dfg.value_type(new2), types::I8);
+ assert_eq!(dfg.block_params(block), &[new1, new2]);
+
+ dfg.attach_block_param(block, arg1);
+ assert_eq!(dfg.block_params(block), &[new1, new2, arg1]);
+
+ let new3 = dfg.replace_block_param(new2, types::I16);
+ assert_eq!(dfg.value_type(new1), types::I64);
+ assert_eq!(dfg.value_type(new2), types::I8);
+ assert_eq!(dfg.value_type(new3), types::I16);
+ assert_eq!(dfg.block_params(block), &[new1, new3, arg1]);
+ }
+
+ #[test]
+ fn swap_remove_block_params() {
+ let mut dfg = DataFlowGraph::new();
+
+ let block = dfg.make_block();
+ let arg1 = dfg.append_block_param(block, types::F32);
+ let arg2 = dfg.append_block_param(block, types::F32);
+ let arg3 = dfg.append_block_param(block, types::F32);
+ assert_eq!(dfg.block_params(block), &[arg1, arg2, arg3]);
+
+ dfg.swap_remove_block_param(arg1);
+ assert_eq!(dfg.value_is_attached(arg1), false);
+ assert_eq!(dfg.value_is_attached(arg2), true);
+ assert_eq!(dfg.value_is_attached(arg3), true);
+ assert_eq!(dfg.block_params(block), &[arg3, arg2]);
+ dfg.swap_remove_block_param(arg2);
+ assert_eq!(dfg.value_is_attached(arg2), false);
+ assert_eq!(dfg.value_is_attached(arg3), true);
+ assert_eq!(dfg.block_params(block), &[arg3]);
+ dfg.swap_remove_block_param(arg3);
+ assert_eq!(dfg.value_is_attached(arg3), false);
+ assert_eq!(dfg.block_params(block), &[]);
+ }
+
+ #[test]
+ fn aliases() {
+ use crate::ir::InstBuilder;
+
+ let mut func = Function::new();
+ let block0 = func.dfg.make_block();
+ let mut pos = FuncCursor::new(&mut func);
+ pos.insert_block(block0);
+
+ // Build a little test program.
+ let v1 = pos.ins().iconst(types::I32, 42);
+
+ // Make sure we can resolve value aliases even when values is empty.
+ assert_eq!(pos.func.dfg.resolve_aliases(v1), v1);
+
+ let arg0 = pos.func.dfg.append_block_param(block0, types::I32);
+ let (s, c) = pos.ins().iadd_ifcout(v1, arg0);
+ let iadd = match pos.func.dfg.value_def(s) {
+ ValueDef::Result(i, 0) => i,
+ _ => panic!(),
+ };
+
+ // Remove `c` from the result list.
+ pos.func.dfg.clear_results(iadd);
+ pos.func.dfg.attach_result(iadd, s);
+
+ // Replace `iadd_ifcout` with a normal `iadd` and an `ifcmp`.
+ pos.func.dfg.replace(iadd).iadd(v1, arg0);
+ let c2 = pos.ins().ifcmp(s, v1);
+ pos.func.dfg.change_to_alias(c, c2);
+
+ assert_eq!(pos.func.dfg.resolve_aliases(c2), c2);
+ assert_eq!(pos.func.dfg.resolve_aliases(c), c2);
+
+ // Make a copy of the alias.
+ let c3 = pos.ins().copy(c);
+ // This does not see through copies.
+ assert_eq!(pos.func.dfg.resolve_aliases(c3), c3);
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/entities.rs b/third_party/rust/cranelift-codegen/src/ir/entities.rs
new file mode 100644
index 0000000000..e629475532
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/entities.rs
@@ -0,0 +1,522 @@
+//! Cranelift IR entity references.
+//!
+//! Instructions in Cranelift IR need to reference other entities in the function. This can be other
+//! parts of the function like basic blocks or stack slots, or it can be external entities
+//! that are declared in the function preamble in the text format.
+//!
+//! These entity references in instruction operands are not implemented as Rust references both
+//! because Rust's ownership and mutability rules make it difficult, and because 64-bit pointers
+//! take up a lot of space, and we want a compact in-memory representation. Instead, entity
+//! references are structs wrapping a `u32` index into a table in the `Function` main data
+//! structure. There is a separate index type for each entity type, so we don't lose type safety.
+//!
+//! The `entities` module defines public types for the entity references along with constants
+//! representing an invalid reference. We prefer to use `Option<EntityRef>` whenever possible, but
+//! unfortunately that type is twice as large as the 32-bit index type on its own. Thus, compact
+//! data structures use the `PackedOption<EntityRef>` representation, while function arguments and
+//! return values prefer the more Rust-like `Option<EntityRef>` variant.
+//!
+//! The entity references all implement the `Display` trait in a way that matches the textual IR
+//! format.
+
+use crate::entity::entity_impl;
+use core::fmt;
+use core::u32;
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+/// An opaque reference to a [basic block](https://en.wikipedia.org/wiki/Basic_block) in a
+/// [`Function`](super::function::Function).
+///
+/// You can get a `Block` using
+/// [`FunctionBuilder::create_block`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_block)
+///
+/// While the order is stable, it is arbitrary and does not necessarily resemble the layout order.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct Block(u32);
+entity_impl!(Block, "block");
+
+impl Block {
+ /// Create a new block reference from its number. This corresponds to the `blockNN` representation.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to an SSA value.
+///
+/// You can get a constant `Value` from the following
+/// [`InstBuilder`](super::InstBuilder) instructions:
+///
+/// - [`iconst`](super::InstBuilder::iconst) for integer constants
+/// - [`f32const`](super::InstBuilder::f32const) for 32-bit float constants
+/// - [`f64const`](super::InstBuilder::f64const) for 64-bit float constants
+/// - [`bconst`](super::InstBuilder::bconst) for boolean constants
+/// - [`vconst`](super::InstBuilder::vconst) for vector constants
+/// - [`null`](super::InstBuilder::null) for null reference constants
+///
+/// Any `InstBuilder` instruction that has an output will also return a `Value`.
+///
+/// While the order is stable, it is arbitrary.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct Value(u32);
+entity_impl!(Value, "v");
+
+impl Value {
+ /// Create a value from its number representation.
+ /// This is the number in the `vNN` notation.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX / 2 {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to an instruction in a [`Function`](super::Function).
+///
+/// Most usage of `Inst` is internal. `Inst`ructions are returned by
+/// [`InstBuilder`](super::InstBuilder) instructions that do not return a
+/// [`Value`], such as control flow and trap instructions.
+///
+/// If you look around the API, you can find many inventive uses for `Inst`,
+/// such as [annotating specific instructions with a comment][inst_comment]
+/// or [performing reflection at compile time](super::DataFlowGraph::analyze_branch)
+/// on the type of instruction.
+///
+/// [inst_comment]: https://github.com/bjorn3/rustc_codegen_cranelift/blob/0f8814fd6da3d436a90549d4bb19b94034f2b19c/src/pretty_clif.rs
+///
+/// While the order is stable, it is arbitrary and does not necessarily resemble the layout order.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct Inst(u32);
+entity_impl!(Inst, "inst");
+
+/// An opaque reference to a stack slot.
+///
+/// Stack slots represent an address on the
+/// [call stack](https://en.wikipedia.org/wiki/Call_stack).
+///
+/// `StackSlot`s can be created with
+/// [`FunctionBuilder::create_stackslot`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_stack_slot).
+///
+/// `StackSlot`s are most often used with
+/// [`stack_addr`](super::InstBuilder::stack_addr),
+/// [`stack_load`](super::InstBuilder::stack_load), and
+/// [`stack_store`](super::InstBuilder::stack_store).
+///
+/// While the order is stable, it is arbitrary and does not necessarily resemble the stack order.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct StackSlot(u32);
+entity_impl!(StackSlot, "ss");
+
+impl StackSlot {
+ /// Create a new stack slot reference from its number.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to a global value.
+///
+/// A `GlobalValue` is a [`Value`](Value) that will be live across the entire
+/// function lifetime. It can be preloaded from other global values.
+///
+/// You can create a `GlobalValue` in the following ways:
+///
+/// - When compiling to WASM, you can use it to load values from a
+/// [`VmContext`](super::GlobalValueData::VMContext) using
+/// [`FuncEnvironment::make_global`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.make_global).
+/// - When compiling to native code, you can use it for objects in static memory with
+/// [`Module::declare_data_in_func`](https://docs.rs/cranelift-module/*/cranelift_module/struct.Module.html#method.declare_data_in_func).
+/// - For any compilation target, it can be registered with
+/// [`FunctionBuilder::create_global_value`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_global_value).
+///
+/// `GlobalValue`s can be retrieved with
+/// [`InstBuilder:global_value`](super::InstBuilder::global_value).
+///
+/// While the order is stable, it is arbitrary.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct GlobalValue(u32);
+entity_impl!(GlobalValue, "gv");
+
+impl GlobalValue {
+ /// Create a new global value reference from its number.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to a constant.
+///
+/// You can store [`ConstantData`](super::ConstantData) in a
+/// [`ConstantPool`](super::ConstantPool) for efficient storage and retrieval.
+/// See [`ConstantPool::insert`](super::ConstantPool::insert).
+///
+/// While the order is stable, it is arbitrary and does not necessarily resemble the order in which
+/// the constants are written in the constant pool.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)]
+pub struct Constant(u32);
+entity_impl!(Constant, "const");
+
+impl Constant {
+ /// Create a const reference from its number.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to an immediate.
+///
+/// Some immediates (e.g. SIMD shuffle masks) are too large to store in the
+/// [`InstructionData`](super::instructions::InstructionData) struct and therefore must be
+/// tracked separately in [`DataFlowGraph::immediates`](super::dfg::DataFlowGraph). `Immediate`
+/// provides a way to reference values stored there.
+///
+/// While the order is stable, it is arbitrary.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct Immediate(u32);
+entity_impl!(Immediate, "imm");
+
+impl Immediate {
+ /// Create an immediate reference from its number.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to a [jump table](https://en.wikipedia.org/wiki/Branch_table).
+///
+/// `JumpTable`s are used for indirect branching and are specialized for dense,
+/// 0-based jump offsets. If you want a jump table which doesn't start at 0,
+/// or is not contiguous, consider using a [`Switch`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.Switch.html) instead.
+///
+/// `JumpTable` are used with [`br_table`](super::InstBuilder::br_table).
+///
+/// `JumpTable`s can be created with
+/// [`create_jump_table`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_jump_table).
+///
+/// While the order is stable, it is arbitrary.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct JumpTable(u32);
+entity_impl!(JumpTable, "jt");
+
+impl JumpTable {
+ /// Create a new jump table reference from its number.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to another [`Function`](super::Function).
+///
+/// `FuncRef`s are used for [direct](super::InstBuilder::call) function calls
+/// and by [`func_addr`](super::InstBuilder::func_addr) for use in
+/// [indirect](super::InstBuilder::call_indirect) function calls.
+///
+/// `FuncRef`s can be created with
+///
+/// - [`FunctionBuilder::import_function`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.import_function)
+/// for external functions
+/// - [`Module::declare_func_in_func`](https://docs.rs/cranelift-module/*/cranelift_module/struct.Module.html#method.declare_func_in_func)
+/// for functions declared elsewhere in the same native
+/// [`Module`](https://docs.rs/cranelift-module/*/cranelift_module/struct.Module.html)
+/// - [`FuncEnvironment::make_direct_func`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.make_direct_func)
+/// for functions declared in the same WebAssembly
+/// [`FuncEnvironment`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.make_direct_func)
+///
+/// While the order is stable, it is arbitrary.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct FuncRef(u32);
+entity_impl!(FuncRef, "fn");
+
+impl FuncRef {
+ /// Create a new external function reference from its number.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to a function [`Signature`](super::Signature).
+///
+/// `SigRef`s are used to declare a function with
+/// [`FunctionBuiler::import_function`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.import_function)
+/// as well as to make an [indirect function call](super::InstBuilder::call_indirect).
+///
+/// `SigRef`s can be created with
+/// [`FunctionBuilder::import_signature`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.import_signature).
+///
+/// You can retrieve the [`Signature`](super::Signature) that was used to create a `SigRef` with
+/// [`FunctionBuilder::signature`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.signature) or
+/// [`func.dfg.signatures`](super::dfg::DataFlowGraph::signatures).
+///
+/// While the order is stable, it is arbitrary.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct SigRef(u32);
+entity_impl!(SigRef, "sig");
+
+impl SigRef {
+ /// Create a new function signature reference from its number.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to a [heap](https://en.wikipedia.org/wiki/Memory_management#DYNAMIC).
+///
+/// Heaps are used to access dynamically allocated memory through
+/// [`heap_addr`](super::InstBuilder::heap_addr).
+///
+/// To create a heap, use [`FunctionBuilder::create_heap`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_heap).
+///
+/// While the order is stable, it is arbitrary.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct Heap(u32);
+entity_impl!(Heap, "heap");
+
+impl Heap {
+ /// Create a new heap reference from its number.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to a [WebAssembly
+/// table](https://developer.mozilla.org/en-US/docs/WebAssembly/Understanding_the_text_format#WebAssembly_tables).
+///
+/// `Table`s are used to store a list of function references.
+/// They can be created with [`FuncEnvironment::make_table`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.make_table).
+/// They can be used with
+/// [`FuncEnvironment::translate_call_indirect`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.translate_call_indirect).
+///
+/// While the order is stable, it is arbitrary.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct Table(u32);
+entity_impl!(Table, "table");
+
+impl Table {
+ /// Create a new table reference from its number.
+ ///
+ /// This method is for use by the parser.
+ pub fn with_number(n: u32) -> Option<Self> {
+ if n < u32::MAX {
+ Some(Self(n))
+ } else {
+ None
+ }
+ }
+}
+
+/// An opaque reference to any of the entities defined in this module that can appear in CLIF IR.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub enum AnyEntity {
+ /// The whole function.
+ Function,
+ /// a basic block.
+ Block(Block),
+ /// An instruction.
+ Inst(Inst),
+ /// An SSA value.
+ Value(Value),
+ /// A stack slot.
+ StackSlot(StackSlot),
+ /// A Global value.
+ GlobalValue(GlobalValue),
+ /// A jump table.
+ JumpTable(JumpTable),
+ /// A constant.
+ Constant(Constant),
+ /// An external function.
+ FuncRef(FuncRef),
+ /// A function call signature.
+ SigRef(SigRef),
+ /// A heap.
+ Heap(Heap),
+ /// A table.
+ Table(Table),
+ /// A function's stack limit
+ StackLimit,
+}
+
+impl fmt::Display for AnyEntity {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ Self::Function => write!(f, "function"),
+ Self::Block(r) => r.fmt(f),
+ Self::Inst(r) => r.fmt(f),
+ Self::Value(r) => r.fmt(f),
+ Self::StackSlot(r) => r.fmt(f),
+ Self::GlobalValue(r) => r.fmt(f),
+ Self::JumpTable(r) => r.fmt(f),
+ Self::Constant(r) => r.fmt(f),
+ Self::FuncRef(r) => r.fmt(f),
+ Self::SigRef(r) => r.fmt(f),
+ Self::Heap(r) => r.fmt(f),
+ Self::Table(r) => r.fmt(f),
+ Self::StackLimit => write!(f, "stack_limit"),
+ }
+ }
+}
+
+impl fmt::Debug for AnyEntity {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ (self as &dyn fmt::Display).fmt(f)
+ }
+}
+
+impl From<Block> for AnyEntity {
+ fn from(r: Block) -> Self {
+ Self::Block(r)
+ }
+}
+
+impl From<Inst> for AnyEntity {
+ fn from(r: Inst) -> Self {
+ Self::Inst(r)
+ }
+}
+
+impl From<Value> for AnyEntity {
+ fn from(r: Value) -> Self {
+ Self::Value(r)
+ }
+}
+
+impl From<StackSlot> for AnyEntity {
+ fn from(r: StackSlot) -> Self {
+ Self::StackSlot(r)
+ }
+}
+
+impl From<GlobalValue> for AnyEntity {
+ fn from(r: GlobalValue) -> Self {
+ Self::GlobalValue(r)
+ }
+}
+
+impl From<JumpTable> for AnyEntity {
+ fn from(r: JumpTable) -> Self {
+ Self::JumpTable(r)
+ }
+}
+
+impl From<Constant> for AnyEntity {
+ fn from(r: Constant) -> Self {
+ Self::Constant(r)
+ }
+}
+
+impl From<FuncRef> for AnyEntity {
+ fn from(r: FuncRef) -> Self {
+ Self::FuncRef(r)
+ }
+}
+
+impl From<SigRef> for AnyEntity {
+ fn from(r: SigRef) -> Self {
+ Self::SigRef(r)
+ }
+}
+
+impl From<Heap> for AnyEntity {
+ fn from(r: Heap) -> Self {
+ Self::Heap(r)
+ }
+}
+
+impl From<Table> for AnyEntity {
+ fn from(r: Table) -> Self {
+ Self::Table(r)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloc::string::ToString;
+ use core::u32;
+
+ #[test]
+ fn value_with_number() {
+ assert_eq!(Value::with_number(0).unwrap().to_string(), "v0");
+ assert_eq!(Value::with_number(1).unwrap().to_string(), "v1");
+
+ assert_eq!(Value::with_number(u32::MAX / 2), None);
+ assert!(Value::with_number(u32::MAX / 2 - 1).is_some());
+ }
+
+ #[test]
+ fn memory() {
+ use crate::packed_option::PackedOption;
+ use core::mem;
+ // This is the whole point of `PackedOption`.
+ assert_eq!(
+ mem::size_of::<Value>(),
+ mem::size_of::<PackedOption<Value>>()
+ );
+ }
+
+ #[test]
+ fn constant_with_number() {
+ assert_eq!(Constant::with_number(0).unwrap().to_string(), "const0");
+ assert_eq!(Constant::with_number(1).unwrap().to_string(), "const1");
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/extfunc.rs b/third_party/rust/cranelift-codegen/src/ir/extfunc.rs
new file mode 100644
index 0000000000..d8cccf8653
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/extfunc.rs
@@ -0,0 +1,521 @@
+//! External function calls.
+//!
+//! To a Cranelift function, all functions are "external". Directly called functions must be
+//! declared in the preamble, and all function calls must have a signature.
+//!
+//! This module declares the data types used to represent external functions and call signatures.
+
+use crate::ir::{ArgumentLoc, ExternalName, SigRef, Type};
+use crate::isa::{CallConv, RegInfo, RegUnit};
+use crate::machinst::RelocDistance;
+use alloc::vec::Vec;
+use core::fmt;
+use core::str::FromStr;
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+/// Function signature.
+///
+/// The function signature describes the types of formal parameters and return values along with
+/// other details that are needed to call a function correctly.
+///
+/// A signature can optionally include ISA-specific ABI information which specifies exactly how
+/// arguments and return values are passed.
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct Signature {
+ /// The arguments passed to the function.
+ pub params: Vec<AbiParam>,
+ /// Values returned from the function.
+ pub returns: Vec<AbiParam>,
+
+ /// Calling convention.
+ pub call_conv: CallConv,
+}
+
+impl Signature {
+ /// Create a new blank signature.
+ pub fn new(call_conv: CallConv) -> Self {
+ Self {
+ params: Vec::new(),
+ returns: Vec::new(),
+ call_conv,
+ }
+ }
+
+ /// Clear the signature so it is identical to a fresh one returned by `new()`.
+ pub fn clear(&mut self, call_conv: CallConv) {
+ self.params.clear();
+ self.returns.clear();
+ self.call_conv = call_conv;
+ }
+
+ /// Return an object that can display `self` with correct register names.
+ pub fn display<'a, R: Into<Option<&'a RegInfo>>>(&'a self, regs: R) -> DisplaySignature<'a> {
+ DisplaySignature(self, regs.into())
+ }
+
+ /// Find the index of a presumed unique special-purpose parameter.
+ pub fn special_param_index(&self, purpose: ArgumentPurpose) -> Option<usize> {
+ self.params.iter().rposition(|arg| arg.purpose == purpose)
+ }
+
+ /// Find the index of a presumed unique special-purpose parameter.
+ pub fn special_return_index(&self, purpose: ArgumentPurpose) -> Option<usize> {
+ self.returns.iter().rposition(|arg| arg.purpose == purpose)
+ }
+
+ /// Does this signature have a parameter whose `ArgumentPurpose` is
+ /// `purpose`?
+ pub fn uses_special_param(&self, purpose: ArgumentPurpose) -> bool {
+ self.special_param_index(purpose).is_some()
+ }
+
+ /// Does this signature have a return whose `ArgumentPurpose` is `purpose`?
+ pub fn uses_special_return(&self, purpose: ArgumentPurpose) -> bool {
+ self.special_return_index(purpose).is_some()
+ }
+
+ /// How many special parameters does this function have?
+ pub fn num_special_params(&self) -> usize {
+ self.params
+ .iter()
+ .filter(|p| p.purpose != ArgumentPurpose::Normal)
+ .count()
+ }
+
+ /// How many special returns does this function have?
+ pub fn num_special_returns(&self) -> usize {
+ self.returns
+ .iter()
+ .filter(|r| r.purpose != ArgumentPurpose::Normal)
+ .count()
+ }
+
+ /// Does this signature take an struct return pointer parameter?
+ pub fn uses_struct_return_param(&self) -> bool {
+ self.uses_special_param(ArgumentPurpose::StructReturn)
+ }
+
+ /// Does this return more than one normal value? (Pre-struct return
+ /// legalization)
+ pub fn is_multi_return(&self) -> bool {
+ self.returns
+ .iter()
+ .filter(|r| r.purpose == ArgumentPurpose::Normal)
+ .count()
+ > 1
+ }
+}
+
+/// Wrapper type capable of displaying a `Signature` with correct register names.
+pub struct DisplaySignature<'a>(&'a Signature, Option<&'a RegInfo>);
+
+fn write_list(f: &mut fmt::Formatter, args: &[AbiParam], regs: Option<&RegInfo>) -> fmt::Result {
+ match args.split_first() {
+ None => {}
+ Some((first, rest)) => {
+ write!(f, "{}", first.display(regs))?;
+ for arg in rest {
+ write!(f, ", {}", arg.display(regs))?;
+ }
+ }
+ }
+ Ok(())
+}
+
+impl<'a> fmt::Display for DisplaySignature<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "(")?;
+ write_list(f, &self.0.params, self.1)?;
+ write!(f, ")")?;
+ if !self.0.returns.is_empty() {
+ write!(f, " -> ")?;
+ write_list(f, &self.0.returns, self.1)?;
+ }
+ write!(f, " {}", self.0.call_conv)
+ }
+}
+
+impl fmt::Display for Signature {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.display(None).fmt(f)
+ }
+}
+
+/// Function parameter or return value descriptor.
+///
+/// This describes the value type being passed to or from a function along with flags that affect
+/// how the argument is passed.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct AbiParam {
+ /// Type of the argument value.
+ pub value_type: Type,
+ /// Special purpose of argument, or `Normal`.
+ pub purpose: ArgumentPurpose,
+ /// Method for extending argument to a full register.
+ pub extension: ArgumentExtension,
+
+ /// ABI-specific location of this argument, or `Unassigned` for arguments that have not yet
+ /// been legalized.
+ pub location: ArgumentLoc,
+ /// Was the argument converted to pointer during legalization?
+ pub legalized_to_pointer: bool,
+}
+
+impl AbiParam {
+ /// Create a parameter with default flags.
+ pub fn new(vt: Type) -> Self {
+ Self {
+ value_type: vt,
+ extension: ArgumentExtension::None,
+ purpose: ArgumentPurpose::Normal,
+ location: Default::default(),
+ legalized_to_pointer: false,
+ }
+ }
+
+ /// Create a special-purpose parameter that is not (yet) bound to a specific register.
+ pub fn special(vt: Type, purpose: ArgumentPurpose) -> Self {
+ Self {
+ value_type: vt,
+ extension: ArgumentExtension::None,
+ purpose,
+ location: Default::default(),
+ legalized_to_pointer: false,
+ }
+ }
+
+ /// Create a parameter for a special-purpose register.
+ pub fn special_reg(vt: Type, purpose: ArgumentPurpose, regunit: RegUnit) -> Self {
+ Self {
+ value_type: vt,
+ extension: ArgumentExtension::None,
+ purpose,
+ location: ArgumentLoc::Reg(regunit),
+ legalized_to_pointer: false,
+ }
+ }
+
+ /// Convert `self` to a parameter with the `uext` flag set.
+ pub fn uext(self) -> Self {
+ debug_assert!(self.value_type.is_int(), "uext on {} arg", self.value_type);
+ Self {
+ extension: ArgumentExtension::Uext,
+ ..self
+ }
+ }
+
+ /// Convert `self` to a parameter type with the `sext` flag set.
+ pub fn sext(self) -> Self {
+ debug_assert!(self.value_type.is_int(), "sext on {} arg", self.value_type);
+ Self {
+ extension: ArgumentExtension::Sext,
+ ..self
+ }
+ }
+
+ /// Return an object that can display `self` with correct register names.
+ pub fn display<'a, R: Into<Option<&'a RegInfo>>>(&'a self, regs: R) -> DisplayAbiParam<'a> {
+ DisplayAbiParam(self, regs.into())
+ }
+}
+
+/// Wrapper type capable of displaying a `AbiParam` with correct register names.
+pub struct DisplayAbiParam<'a>(&'a AbiParam, Option<&'a RegInfo>);
+
+impl<'a> fmt::Display for DisplayAbiParam<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}", self.0.value_type)?;
+ if self.0.legalized_to_pointer {
+ write!(f, " ptr")?;
+ }
+ match self.0.extension {
+ ArgumentExtension::None => {}
+ ArgumentExtension::Uext => write!(f, " uext")?,
+ ArgumentExtension::Sext => write!(f, " sext")?,
+ }
+ if self.0.purpose != ArgumentPurpose::Normal {
+ write!(f, " {}", self.0.purpose)?;
+ }
+
+ if self.0.location.is_assigned() {
+ write!(f, " [{}]", self.0.location.display(self.1))?;
+ }
+
+ Ok(())
+ }
+}
+
+impl fmt::Display for AbiParam {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.display(None).fmt(f)
+ }
+}
+
+/// Function argument extension options.
+///
+/// On some architectures, small integer function arguments are extended to the width of a
+/// general-purpose register.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub enum ArgumentExtension {
+ /// No extension, high bits are indeterminate.
+ None,
+ /// Unsigned extension: high bits in register are 0.
+ Uext,
+ /// Signed extension: high bits in register replicate sign bit.
+ Sext,
+}
+
+/// The special purpose of a function argument.
+///
+/// Function arguments and return values are used to pass user program values between functions,
+/// but they are also used to represent special registers with significance to the ABI such as
+/// frame pointers and callee-saved registers.
+///
+/// The argument purpose is used to indicate any special meaning of an argument or return value.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub enum ArgumentPurpose {
+ /// A normal user program value passed to or from a function.
+ Normal,
+
+ /// A C struct passed as argument.
+ StructArgument(u32),
+
+ /// Struct return pointer.
+ ///
+ /// When a function needs to return more data than will fit in registers, the caller passes a
+ /// pointer to a memory location where the return value can be written. In some ABIs, this
+ /// struct return pointer is passed in a specific register.
+ ///
+ /// This argument kind can also appear as a return value for ABIs that require a function with
+ /// a `StructReturn` pointer argument to also return that pointer in a register.
+ StructReturn,
+
+ /// The link register.
+ ///
+ /// Most RISC architectures implement calls by saving the return address in a designated
+ /// register rather than pushing it on the stack. This is represented with a `Link` argument.
+ ///
+ /// Similarly, some return instructions expect the return address in a register represented as
+ /// a `Link` return value.
+ Link,
+
+ /// The frame pointer.
+ ///
+ /// This indicates the frame pointer register which has a special meaning in some ABIs.
+ ///
+ /// The frame pointer appears as an argument and as a return value since it is a callee-saved
+ /// register.
+ FramePointer,
+
+ /// A callee-saved register.
+ ///
+ /// Some calling conventions have registers that must be saved by the callee. These registers
+ /// are represented as `CalleeSaved` arguments and return values.
+ CalleeSaved,
+
+ /// A VM context pointer.
+ ///
+ /// This is a pointer to a context struct containing details about the current sandbox. It is
+ /// used as a base pointer for `vmctx` global values.
+ VMContext,
+
+ /// A signature identifier.
+ ///
+ /// This is a special-purpose argument used to identify the calling convention expected by the
+ /// caller in an indirect call. The callee can verify that the expected signature ID matches.
+ SignatureId,
+
+ /// A stack limit pointer.
+ ///
+ /// This is a pointer to a stack limit. It is used to check the current stack pointer
+ /// against. Can only appear once in a signature.
+ StackLimit,
+
+ /// A callee TLS value.
+ ///
+ /// In the Baldrdash-2020 calling convention, the stack upon entry to the callee contains the
+ /// TLS-register values for the caller and the callee. This argument is used to provide the
+ /// value for the callee.
+ CalleeTLS,
+
+ /// A caller TLS value.
+ ///
+ /// In the Baldrdash-2020 calling convention, the stack upon entry to the callee contains the
+ /// TLS-register values for the caller and the callee. This argument is used to provide the
+ /// value for the caller.
+ CallerTLS,
+}
+
+impl fmt::Display for ArgumentPurpose {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_str(match self {
+ Self::Normal => "normal",
+ Self::StructArgument(size) => return write!(f, "sarg({})", size),
+ Self::StructReturn => "sret",
+ Self::Link => "link",
+ Self::FramePointer => "fp",
+ Self::CalleeSaved => "csr",
+ Self::VMContext => "vmctx",
+ Self::SignatureId => "sigid",
+ Self::StackLimit => "stack_limit",
+ Self::CalleeTLS => "callee_tls",
+ Self::CallerTLS => "caller_tls",
+ })
+ }
+}
+
+impl FromStr for ArgumentPurpose {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Self, ()> {
+ match s {
+ "normal" => Ok(Self::Normal),
+ "sret" => Ok(Self::StructReturn),
+ "link" => Ok(Self::Link),
+ "fp" => Ok(Self::FramePointer),
+ "csr" => Ok(Self::CalleeSaved),
+ "vmctx" => Ok(Self::VMContext),
+ "sigid" => Ok(Self::SignatureId),
+ "stack_limit" => Ok(Self::StackLimit),
+ _ if s.starts_with("sarg(") => {
+ if !s.ends_with(")") {
+ return Err(());
+ }
+ // Parse 'sarg(size)'
+ let size: u32 = s["sarg(".len()..s.len() - 1].parse().map_err(|_| ())?;
+ Ok(Self::StructArgument(size))
+ }
+ _ => Err(()),
+ }
+ }
+}
+
+/// An external function.
+///
+/// Information about a function that can be called directly with a direct `call` instruction.
+#[derive(Clone, Debug)]
+pub struct ExtFuncData {
+ /// Name of the external function.
+ pub name: ExternalName,
+ /// Call signature of function.
+ pub signature: SigRef,
+ /// Will this function be defined nearby, such that it will always be a certain distance away,
+ /// after linking? If so, references to it can avoid going through a GOT or PLT. Note that
+ /// symbols meant to be preemptible cannot be considered colocated.
+ ///
+ /// If `true`, some backends may use relocation forms that have limited range. The exact
+ /// distance depends on the code model in use. Currently on AArch64, for example, Cranelift
+ /// uses a custom code model supporting up to +/- 128MB displacements. If it is unknown how
+ /// far away the target will be, it is best not to set the `colocated` flag; in general, this
+ /// flag is best used when the target is known to be in the same unit of code generation, such
+ /// as a Wasm module.
+ ///
+ /// See the documentation for [`RelocDistance`](crate::machinst::RelocDistance) for more details. A
+ /// `colocated` flag value of `true` implies `RelocDistance::Near`.
+ pub colocated: bool,
+}
+
+impl fmt::Display for ExtFuncData {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ if self.colocated {
+ write!(f, "colocated ")?;
+ }
+ write!(f, "{} {}", self.name, self.signature)
+ }
+}
+
+impl ExtFuncData {
+ /// Return an estimate of the distance to the referred-to function symbol.
+ pub fn reloc_distance(&self) -> RelocDistance {
+ if self.colocated {
+ RelocDistance::Near
+ } else {
+ RelocDistance::Far
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::ir::types::{B8, F32, I32};
+ use alloc::string::ToString;
+
+ #[test]
+ fn argument_type() {
+ let t = AbiParam::new(I32);
+ assert_eq!(t.to_string(), "i32");
+ let mut t = t.uext();
+ assert_eq!(t.to_string(), "i32 uext");
+ assert_eq!(t.sext().to_string(), "i32 sext");
+ t.purpose = ArgumentPurpose::StructReturn;
+ assert_eq!(t.to_string(), "i32 uext sret");
+ t.legalized_to_pointer = true;
+ assert_eq!(t.to_string(), "i32 ptr uext sret");
+ }
+
+ #[test]
+ fn argument_purpose() {
+ let all_purpose = [
+ (ArgumentPurpose::Normal, "normal"),
+ (ArgumentPurpose::StructReturn, "sret"),
+ (ArgumentPurpose::Link, "link"),
+ (ArgumentPurpose::FramePointer, "fp"),
+ (ArgumentPurpose::CalleeSaved, "csr"),
+ (ArgumentPurpose::VMContext, "vmctx"),
+ (ArgumentPurpose::SignatureId, "sigid"),
+ (ArgumentPurpose::StackLimit, "stack_limit"),
+ (ArgumentPurpose::StructArgument(42), "sarg(42)"),
+ ];
+ for &(e, n) in &all_purpose {
+ assert_eq!(e.to_string(), n);
+ assert_eq!(Ok(e), n.parse());
+ }
+ }
+
+ #[test]
+ fn call_conv() {
+ for &cc in &[
+ CallConv::Fast,
+ CallConv::Cold,
+ CallConv::SystemV,
+ CallConv::WindowsFastcall,
+ CallConv::BaldrdashSystemV,
+ CallConv::BaldrdashWindows,
+ CallConv::Baldrdash2020,
+ ] {
+ assert_eq!(Ok(cc), cc.to_string().parse())
+ }
+ }
+
+ #[test]
+ fn signatures() {
+ let mut sig = Signature::new(CallConv::BaldrdashSystemV);
+ assert_eq!(sig.to_string(), "() baldrdash_system_v");
+ sig.params.push(AbiParam::new(I32));
+ assert_eq!(sig.to_string(), "(i32) baldrdash_system_v");
+ sig.returns.push(AbiParam::new(F32));
+ assert_eq!(sig.to_string(), "(i32) -> f32 baldrdash_system_v");
+ sig.params.push(AbiParam::new(I32.by(4).unwrap()));
+ assert_eq!(sig.to_string(), "(i32, i32x4) -> f32 baldrdash_system_v");
+ sig.returns.push(AbiParam::new(B8));
+ assert_eq!(
+ sig.to_string(),
+ "(i32, i32x4) -> f32, b8 baldrdash_system_v"
+ );
+
+ // Order does not matter.
+ sig.params[0].location = ArgumentLoc::Stack(24);
+ sig.params[1].location = ArgumentLoc::Stack(8);
+
+ // Writing ABI-annotated signatures.
+ assert_eq!(
+ sig.to_string(),
+ "(i32 [24], i32x4 [8]) -> f32, b8 baldrdash_system_v"
+ );
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/extname.rs b/third_party/rust/cranelift-codegen/src/ir/extname.rs
new file mode 100644
index 0000000000..c12a873d26
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/extname.rs
@@ -0,0 +1,163 @@
+//! External names.
+//!
+//! These are identifiers for declaring entities defined outside the current
+//! function. The name of an external declaration doesn't have any meaning to
+//! Cranelift, which compiles functions independently.
+
+use crate::ir::LibCall;
+use core::cmp;
+use core::fmt::{self, Write};
+use core::str::FromStr;
+
+const TESTCASE_NAME_LENGTH: usize = 16;
+
+/// The name of an external is either a reference to a user-defined symbol
+/// table, or a short sequence of ascii bytes so that test cases do not have
+/// to keep track of a symbol table.
+///
+/// External names are primarily used as keys by code using Cranelift to map
+/// from a `cranelift_codegen::ir::FuncRef` or similar to additional associated
+/// data.
+///
+/// External names can also serve as a primitive testing and debugging tool.
+/// In particular, many `.clif` test files use function names to identify
+/// functions.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum ExternalName {
+ /// A name in a user-defined symbol table. Cranelift does not interpret
+ /// these numbers in any way.
+ User {
+ /// Arbitrary.
+ namespace: u32,
+ /// Arbitrary.
+ index: u32,
+ },
+ /// A test case function name of up to a hardcoded amount of ascii
+ /// characters. This is not intended to be used outside test cases.
+ TestCase {
+ /// How many of the bytes in `ascii` are valid?
+ length: u8,
+ /// Ascii bytes of the name.
+ ascii: [u8; TESTCASE_NAME_LENGTH],
+ },
+ /// A well-known runtime library function.
+ LibCall(LibCall),
+}
+
+impl ExternalName {
+ /// Creates a new external name from a sequence of bytes. Caller is expected
+ /// to guarantee bytes are only ascii alphanumeric or `_`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # use cranelift_codegen::ir::ExternalName;
+ /// // Create `ExternalName` from a string.
+ /// let name = ExternalName::testcase("hello");
+ /// assert_eq!(name.to_string(), "%hello");
+ /// ```
+ pub fn testcase<T: AsRef<[u8]>>(v: T) -> Self {
+ let vec = v.as_ref();
+ let len = cmp::min(vec.len(), TESTCASE_NAME_LENGTH);
+ let mut bytes = [0u8; TESTCASE_NAME_LENGTH];
+ bytes[0..len].copy_from_slice(&vec[0..len]);
+
+ Self::TestCase {
+ length: len as u8,
+ ascii: bytes,
+ }
+ }
+
+ /// Create a new external name from user-provided integer indices.
+ ///
+ /// # Examples
+ /// ```rust
+ /// # use cranelift_codegen::ir::ExternalName;
+ /// // Create `ExternalName` from integer indices
+ /// let name = ExternalName::user(123, 456);
+ /// assert_eq!(name.to_string(), "u123:456");
+ /// ```
+ pub fn user(namespace: u32, index: u32) -> Self {
+ Self::User { namespace, index }
+ }
+}
+
+impl Default for ExternalName {
+ fn default() -> Self {
+ Self::user(0, 0)
+ }
+}
+
+impl fmt::Display for ExternalName {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ Self::User { namespace, index } => write!(f, "u{}:{}", namespace, index),
+ Self::TestCase { length, ascii } => {
+ f.write_char('%')?;
+ for byte in ascii.iter().take(length as usize) {
+ f.write_char(*byte as char)?;
+ }
+ Ok(())
+ }
+ Self::LibCall(lc) => write!(f, "%{}", lc),
+ }
+ }
+}
+
+impl FromStr for ExternalName {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ // Try to parse as a libcall name, otherwise it's a test case.
+ match s.parse() {
+ Ok(lc) => Ok(Self::LibCall(lc)),
+ Err(_) => Ok(Self::testcase(s.as_bytes())),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::ExternalName;
+ use crate::ir::LibCall;
+ use alloc::string::ToString;
+ use core::u32;
+
+ #[test]
+ fn display_testcase() {
+ assert_eq!(ExternalName::testcase("").to_string(), "%");
+ assert_eq!(ExternalName::testcase("x").to_string(), "%x");
+ assert_eq!(ExternalName::testcase("x_1").to_string(), "%x_1");
+ assert_eq!(
+ ExternalName::testcase("longname12345678").to_string(),
+ "%longname12345678"
+ );
+ // Constructor will silently drop bytes beyond the 16th
+ assert_eq!(
+ ExternalName::testcase("longname123456789").to_string(),
+ "%longname12345678"
+ );
+ }
+
+ #[test]
+ fn display_user() {
+ assert_eq!(ExternalName::user(0, 0).to_string(), "u0:0");
+ assert_eq!(ExternalName::user(1, 1).to_string(), "u1:1");
+ assert_eq!(
+ ExternalName::user(u32::MAX, u32::MAX).to_string(),
+ "u4294967295:4294967295"
+ );
+ }
+
+ #[test]
+ fn parsing() {
+ assert_eq!(
+ "FloorF32".parse(),
+ Ok(ExternalName::LibCall(LibCall::FloorF32))
+ );
+ assert_eq!(
+ ExternalName::LibCall(LibCall::FloorF32).to_string(),
+ "%FloorF32"
+ );
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/function.rs b/third_party/rust/cranelift-codegen/src/ir/function.rs
new file mode 100644
index 0000000000..1833af27f5
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/function.rs
@@ -0,0 +1,441 @@
+//! Intermediate representation of a function.
+//!
+//! The `Function` struct defined in this module owns all of its basic blocks and
+//! instructions.
+
+use crate::binemit::CodeOffset;
+use crate::entity::{PrimaryMap, SecondaryMap};
+use crate::ir;
+use crate::ir::{
+ instructions::BranchInfo, Block, ExtFuncData, FuncRef, GlobalValue, GlobalValueData, Heap,
+ HeapData, Inst, InstructionData, JumpTable, JumpTableData, Opcode, SigRef, StackSlot,
+ StackSlotData, Table, TableData,
+};
+use crate::ir::{BlockOffsets, InstEncodings, SourceLocs, StackSlots, ValueLocations};
+use crate::ir::{DataFlowGraph, ExternalName, Layout, Signature};
+use crate::ir::{JumpTableOffsets, JumpTables};
+use crate::isa::{CallConv, EncInfo, Encoding, Legalize, TargetIsa};
+use crate::regalloc::{EntryRegDiversions, RegDiversions};
+use crate::value_label::ValueLabelsRanges;
+use crate::write::write_function;
+use alloc::vec::Vec;
+use core::fmt;
+
+/// A function.
+///
+/// Functions can be cloned, but it is not a very fast operation.
+/// The clone will have all the same entity numbers as the original.
+#[derive(Clone)]
+pub struct Function {
+ /// Name of this function. Mostly used by `.clif` files.
+ pub name: ExternalName,
+
+ /// Signature of this function.
+ pub signature: Signature,
+
+ /// The old signature of this function, before the most recent legalization,
+ /// if any.
+ pub old_signature: Option<Signature>,
+
+ /// Stack slots allocated in this function.
+ pub stack_slots: StackSlots,
+
+ /// Global values referenced.
+ pub global_values: PrimaryMap<ir::GlobalValue, ir::GlobalValueData>,
+
+ /// Heaps referenced.
+ pub heaps: PrimaryMap<ir::Heap, ir::HeapData>,
+
+ /// Tables referenced.
+ pub tables: PrimaryMap<ir::Table, ir::TableData>,
+
+ /// Jump tables used in this function.
+ pub jump_tables: JumpTables,
+
+ /// Data flow graph containing the primary definition of all instructions, blocks and values.
+ pub dfg: DataFlowGraph,
+
+ /// Layout of blocks and instructions in the function body.
+ pub layout: Layout,
+
+ /// Encoding recipe and bits for the legal instructions.
+ /// Illegal instructions have the `Encoding::default()` value.
+ pub encodings: InstEncodings,
+
+ /// Location assigned to every value.
+ pub locations: ValueLocations,
+
+ /// Non-default locations assigned to value at the entry of basic blocks.
+ ///
+ /// At the entry of each basic block, we might have values which are not in their default
+ /// ValueLocation. This field records these register-to-register moves as Diversions.
+ pub entry_diversions: EntryRegDiversions,
+
+ /// Code offsets of the block headers.
+ ///
+ /// This information is only transiently available after the `binemit::relax_branches` function
+ /// computes it, and it can easily be recomputed by calling that function. It is not included
+ /// in the textual IR format.
+ pub offsets: BlockOffsets,
+
+ /// Code offsets of Jump Table headers.
+ pub jt_offsets: JumpTableOffsets,
+
+ /// Source locations.
+ ///
+ /// Track the original source location for each instruction. The source locations are not
+ /// interpreted by Cranelift, only preserved.
+ pub srclocs: SourceLocs,
+
+ /// Instruction that marks the end (inclusive) of the function's prologue.
+ ///
+ /// This is used for some ABIs to generate unwind information.
+ pub prologue_end: Option<Inst>,
+
+ /// The instructions that mark the start (inclusive) of an epilogue in the function.
+ ///
+ /// This is used for some ABIs to generate unwind information.
+ pub epilogues_start: Vec<(Inst, Block)>,
+
+ /// An optional global value which represents an expression evaluating to
+ /// the stack limit for this function. This `GlobalValue` will be
+ /// interpreted in the prologue, if necessary, to insert a stack check to
+ /// ensure that a trap happens if the stack pointer goes below the
+ /// threshold specified here.
+ pub stack_limit: Option<ir::GlobalValue>,
+}
+
+impl Function {
+ /// Create a function with the given name and signature.
+ pub fn with_name_signature(name: ExternalName, sig: Signature) -> Self {
+ Self {
+ name,
+ signature: sig,
+ old_signature: None,
+ stack_slots: StackSlots::new(),
+ global_values: PrimaryMap::new(),
+ heaps: PrimaryMap::new(),
+ tables: PrimaryMap::new(),
+ jump_tables: PrimaryMap::new(),
+ dfg: DataFlowGraph::new(),
+ layout: Layout::new(),
+ encodings: SecondaryMap::new(),
+ locations: SecondaryMap::new(),
+ entry_diversions: EntryRegDiversions::new(),
+ offsets: SecondaryMap::new(),
+ jt_offsets: SecondaryMap::new(),
+ srclocs: SecondaryMap::new(),
+ prologue_end: None,
+ epilogues_start: Vec::new(),
+ stack_limit: None,
+ }
+ }
+
+ /// Clear all data structures in this function.
+ pub fn clear(&mut self) {
+ self.signature.clear(CallConv::Fast);
+ self.stack_slots.clear();
+ self.global_values.clear();
+ self.heaps.clear();
+ self.tables.clear();
+ self.jump_tables.clear();
+ self.dfg.clear();
+ self.layout.clear();
+ self.encodings.clear();
+ self.locations.clear();
+ self.entry_diversions.clear();
+ self.offsets.clear();
+ self.jt_offsets.clear();
+ self.srclocs.clear();
+ self.prologue_end = None;
+ self.epilogues_start.clear();
+ self.stack_limit = None;
+ }
+
+ /// Create a new empty, anonymous function with a Fast calling convention.
+ pub fn new() -> Self {
+ Self::with_name_signature(ExternalName::default(), Signature::new(CallConv::Fast))
+ }
+
+ /// Creates a jump table in the function, to be used by `br_table` instructions.
+ pub fn create_jump_table(&mut self, data: JumpTableData) -> JumpTable {
+ self.jump_tables.push(data)
+ }
+
+ /// Creates a stack slot in the function, to be used by `stack_load`, `stack_store` and
+ /// `stack_addr` instructions.
+ pub fn create_stack_slot(&mut self, data: StackSlotData) -> StackSlot {
+ self.stack_slots.push(data)
+ }
+
+ /// Adds a signature which can later be used to declare an external function import.
+ pub fn import_signature(&mut self, signature: Signature) -> SigRef {
+ self.dfg.signatures.push(signature)
+ }
+
+ /// Declare an external function import.
+ pub fn import_function(&mut self, data: ExtFuncData) -> FuncRef {
+ self.dfg.ext_funcs.push(data)
+ }
+
+ /// Declares a global value accessible to the function.
+ pub fn create_global_value(&mut self, data: GlobalValueData) -> GlobalValue {
+ self.global_values.push(data)
+ }
+
+ /// Declares a heap accessible to the function.
+ pub fn create_heap(&mut self, data: HeapData) -> Heap {
+ self.heaps.push(data)
+ }
+
+ /// Declares a table accessible to the function.
+ pub fn create_table(&mut self, data: TableData) -> Table {
+ self.tables.push(data)
+ }
+
+ /// Return an object that can display this function with correct ISA-specific annotations.
+ pub fn display<'a, I: Into<Option<&'a dyn TargetIsa>>>(
+ &'a self,
+ isa: I,
+ ) -> DisplayFunction<'a> {
+ DisplayFunction(self, isa.into().into())
+ }
+
+ /// Return an object that can display this function with correct ISA-specific annotations.
+ pub fn display_with<'a>(
+ &'a self,
+ annotations: DisplayFunctionAnnotations<'a>,
+ ) -> DisplayFunction<'a> {
+ DisplayFunction(self, annotations)
+ }
+
+ /// Find a presumed unique special-purpose function parameter value.
+ ///
+ /// Returns the value of the last `purpose` parameter, or `None` if no such parameter exists.
+ pub fn special_param(&self, purpose: ir::ArgumentPurpose) -> Option<ir::Value> {
+ let entry = self.layout.entry_block().expect("Function is empty");
+ self.signature
+ .special_param_index(purpose)
+ .map(|i| self.dfg.block_params(entry)[i])
+ }
+
+ /// Get an iterator over the instructions in `block`, including offsets and encoded instruction
+ /// sizes.
+ ///
+ /// The iterator returns `(offset, inst, size)` tuples, where `offset` if the offset in bytes
+ /// from the beginning of the function to the instruction, and `size` is the size of the
+ /// instruction in bytes, or 0 for unencoded instructions.
+ ///
+ /// This function can only be used after the code layout has been computed by the
+ /// `binemit::relax_branches()` function.
+ pub fn inst_offsets<'a>(&'a self, block: Block, encinfo: &EncInfo) -> InstOffsetIter<'a> {
+ assert!(
+ !self.offsets.is_empty(),
+ "Code layout must be computed first"
+ );
+ let mut divert = RegDiversions::new();
+ divert.at_block(&self.entry_diversions, block);
+ InstOffsetIter {
+ encinfo: encinfo.clone(),
+ func: self,
+ divert,
+ encodings: &self.encodings,
+ offset: self.offsets[block],
+ iter: self.layout.block_insts(block),
+ }
+ }
+
+ /// Wrapper around `encode` which assigns `inst` the resulting encoding.
+ pub fn update_encoding(&mut self, inst: ir::Inst, isa: &dyn TargetIsa) -> Result<(), Legalize> {
+ if isa.get_mach_backend().is_some() {
+ Ok(())
+ } else {
+ self.encode(inst, isa).map(|e| self.encodings[inst] = e)
+ }
+ }
+
+ /// Wrapper around `TargetIsa::encode` for encoding an existing instruction
+ /// in the `Function`.
+ pub fn encode(&self, inst: ir::Inst, isa: &dyn TargetIsa) -> Result<Encoding, Legalize> {
+ if isa.get_mach_backend().is_some() {
+ Ok(Encoding::new(0, 0))
+ } else {
+ isa.encode(&self, &self.dfg[inst], self.dfg.ctrl_typevar(inst))
+ }
+ }
+
+ /// Starts collection of debug information.
+ pub fn collect_debug_info(&mut self) {
+ self.dfg.collect_debug_info();
+ }
+
+ /// Changes the destination of a jump or branch instruction.
+ /// Does nothing if called with a non-jump or non-branch instruction.
+ ///
+ /// Note that this method ignores multi-destination branches like `br_table`.
+ pub fn change_branch_destination(&mut self, inst: Inst, new_dest: Block) {
+ match self.dfg[inst].branch_destination_mut() {
+ None => (),
+ Some(inst_dest) => *inst_dest = new_dest,
+ }
+ }
+
+ /// Rewrite the branch destination to `new_dest` if the destination matches `old_dest`.
+ /// Does nothing if called with a non-jump or non-branch instruction.
+ ///
+ /// Unlike [change_branch_destination](Function::change_branch_destination), this method rewrite the destinations of
+ /// multi-destination branches like `br_table`.
+ pub fn rewrite_branch_destination(&mut self, inst: Inst, old_dest: Block, new_dest: Block) {
+ match self.dfg.analyze_branch(inst) {
+ BranchInfo::SingleDest(dest, ..) => {
+ if dest == old_dest {
+ self.change_branch_destination(inst, new_dest);
+ }
+ }
+
+ BranchInfo::Table(table, default_dest) => {
+ self.jump_tables[table].iter_mut().for_each(|entry| {
+ if *entry == old_dest {
+ *entry = new_dest;
+ }
+ });
+
+ if default_dest == Some(old_dest) {
+ match &mut self.dfg[inst] {
+ InstructionData::BranchTable { destination, .. } => {
+ *destination = new_dest;
+ }
+ _ => panic!(
+ "Unexpected instruction {} having default destination",
+ self.dfg.display_inst(inst, None)
+ ),
+ }
+ }
+ }
+
+ BranchInfo::NotABranch => {}
+ }
+ }
+
+ /// Checks that the specified block can be encoded as a basic block.
+ ///
+ /// On error, returns the first invalid instruction and an error message.
+ pub fn is_block_basic(&self, block: Block) -> Result<(), (Inst, &'static str)> {
+ let dfg = &self.dfg;
+ let inst_iter = self.layout.block_insts(block);
+
+ // Ignore all instructions prior to the first branch.
+ let mut inst_iter = inst_iter.skip_while(|&inst| !dfg[inst].opcode().is_branch());
+
+ // A conditional branch is permitted in a basic block only when followed
+ // by a terminal jump or fallthrough instruction.
+ if let Some(_branch) = inst_iter.next() {
+ if let Some(next) = inst_iter.next() {
+ match dfg[next].opcode() {
+ Opcode::Fallthrough | Opcode::Jump => (),
+ _ => return Err((next, "post-branch instruction not fallthrough or jump")),
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Returns true if the function is function that doesn't call any other functions. This is not
+ /// to be confused with a "leaf function" in Windows terminology.
+ pub fn is_leaf(&self) -> bool {
+ // Conservative result: if there's at least one function signature referenced in this
+ // function, assume it is not a leaf.
+ self.dfg.signatures.is_empty()
+ }
+
+ /// Replace the `dst` instruction's data with the `src` instruction's data
+ /// and then remove `src`.
+ ///
+ /// `src` and its result values should not be used at all, as any uses would
+ /// be left dangling after calling this method.
+ ///
+ /// `src` and `dst` must have the same number of resulting values, and
+ /// `src`'s i^th value must have the same type as `dst`'s i^th value.
+ pub fn transplant_inst(&mut self, dst: Inst, src: Inst) {
+ debug_assert_eq!(
+ self.dfg.inst_results(dst).len(),
+ self.dfg.inst_results(src).len()
+ );
+ debug_assert!(self
+ .dfg
+ .inst_results(dst)
+ .iter()
+ .zip(self.dfg.inst_results(src))
+ .all(|(a, b)| self.dfg.value_type(*a) == self.dfg.value_type(*b)));
+
+ self.dfg[dst] = self.dfg[src].clone();
+ self.layout.remove_inst(src);
+ }
+}
+
+/// Additional annotations for function display.
+#[derive(Default)]
+pub struct DisplayFunctionAnnotations<'a> {
+ /// Enable ISA annotations.
+ pub isa: Option<&'a dyn TargetIsa>,
+
+ /// Enable value labels annotations.
+ pub value_ranges: Option<&'a ValueLabelsRanges>,
+}
+
+impl<'a> From<Option<&'a dyn TargetIsa>> for DisplayFunctionAnnotations<'a> {
+ fn from(isa: Option<&'a dyn TargetIsa>) -> DisplayFunctionAnnotations {
+ DisplayFunctionAnnotations {
+ isa,
+ value_ranges: None,
+ }
+ }
+}
+
+/// Wrapper type capable of displaying a `Function` with correct ISA annotations.
+pub struct DisplayFunction<'a>(&'a Function, DisplayFunctionAnnotations<'a>);
+
+impl<'a> fmt::Display for DisplayFunction<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write_function(fmt, self.0, &self.1)
+ }
+}
+
+impl fmt::Display for Function {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write_function(fmt, self, &DisplayFunctionAnnotations::default())
+ }
+}
+
+impl fmt::Debug for Function {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write_function(fmt, self, &DisplayFunctionAnnotations::default())
+ }
+}
+
+/// Iterator returning instruction offsets and sizes: `(offset, inst, size)`.
+pub struct InstOffsetIter<'a> {
+ encinfo: EncInfo,
+ divert: RegDiversions,
+ func: &'a Function,
+ encodings: &'a InstEncodings,
+ offset: CodeOffset,
+ iter: ir::layout::Insts<'a>,
+}
+
+impl<'a> Iterator for InstOffsetIter<'a> {
+ type Item = (CodeOffset, ir::Inst, CodeOffset);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next().map(|inst| {
+ self.divert.apply(&self.func.dfg[inst]);
+ let byte_size =
+ self.encinfo
+ .byte_size(self.encodings[inst], inst, &self.divert, self.func);
+ let offset = self.offset;
+ self.offset += byte_size;
+ (offset, inst, byte_size)
+ })
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/globalvalue.rs b/third_party/rust/cranelift-codegen/src/ir/globalvalue.rs
new file mode 100644
index 0000000000..6c9b2d7bcf
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/globalvalue.rs
@@ -0,0 +1,155 @@
+//! Global values.
+
+use crate::ir::immediates::{Imm64, Offset32};
+use crate::ir::{ExternalName, GlobalValue, Type};
+use crate::isa::TargetIsa;
+use crate::machinst::RelocDistance;
+use core::fmt;
+
+/// Information about a global value declaration.
+#[derive(Clone)]
+pub enum GlobalValueData {
+ /// Value is the address of the VM context struct.
+ VMContext,
+
+ /// Value is pointed to by another global value.
+ ///
+ /// The `base` global value is assumed to contain a pointer. This global value is computed
+ /// by loading from memory at that pointer value. The memory must be accessible, and
+ /// naturally aligned to hold a value of the type. The data at this address is assumed
+ /// to never change while the current function is executing.
+ Load {
+ /// The base pointer global value.
+ base: GlobalValue,
+
+ /// Offset added to the base pointer before doing the load.
+ offset: Offset32,
+
+ /// Type of the loaded value.
+ global_type: Type,
+
+ /// Specifies whether the memory that this refers to is readonly, allowing for the
+ /// elimination of redundant loads.
+ readonly: bool,
+ },
+
+ /// Value is an offset from another global value.
+ IAddImm {
+ /// The base pointer global value.
+ base: GlobalValue,
+
+ /// Byte offset to be added to the value.
+ offset: Imm64,
+
+ /// Type of the iadd.
+ global_type: Type,
+ },
+
+ /// Value is symbolic, meaning it's a name which will be resolved to an
+ /// actual value later (eg. by linking). Cranelift itself does not interpret
+ /// this name; it's used by embedders to link with other data structures.
+ ///
+ /// For now, symbolic values always have pointer type, and represent
+ /// addresses, however in the future they could be used to represent other
+ /// things as well.
+ Symbol {
+ /// The symbolic name.
+ name: ExternalName,
+
+ /// Offset from the symbol. This can be used instead of IAddImm to represent folding an
+ /// offset into a symbol.
+ offset: Imm64,
+
+ /// Will this symbol be defined nearby, such that it will always be a certain distance
+ /// away, after linking? If so, references to it can avoid going through a GOT. Note that
+ /// symbols meant to be preemptible cannot be colocated.
+ ///
+ /// If `true`, some backends may use relocation forms that have limited range: for example,
+ /// a +/- 2^27-byte range on AArch64. See the documentation for
+ /// [`RelocDistance`](crate::machinst::RelocDistance) for more details.
+ colocated: bool,
+
+ /// Does this symbol refer to a thread local storage value?
+ tls: bool,
+ },
+}
+
+impl GlobalValueData {
+ /// Assume that `self` is an `GlobalValueData::Symbol` and return its name.
+ pub fn symbol_name(&self) -> &ExternalName {
+ match *self {
+ Self::Symbol { ref name, .. } => name,
+ _ => panic!("only symbols have names"),
+ }
+ }
+
+ /// Return the type of this global.
+ pub fn global_type(&self, isa: &dyn TargetIsa) -> Type {
+ match *self {
+ Self::VMContext { .. } | Self::Symbol { .. } => isa.pointer_type(),
+ Self::IAddImm { global_type, .. } | Self::Load { global_type, .. } => global_type,
+ }
+ }
+
+ /// If this global references a symbol, return an estimate of the relocation distance,
+ /// based on the `colocated` flag.
+ pub fn maybe_reloc_distance(&self) -> Option<RelocDistance> {
+ match self {
+ &GlobalValueData::Symbol {
+ colocated: true, ..
+ } => Some(RelocDistance::Near),
+ &GlobalValueData::Symbol {
+ colocated: false, ..
+ } => Some(RelocDistance::Far),
+ _ => None,
+ }
+ }
+}
+
+impl fmt::Display for GlobalValueData {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ Self::VMContext => write!(f, "vmctx"),
+ Self::Load {
+ base,
+ offset,
+ global_type,
+ readonly,
+ } => write!(
+ f,
+ "load.{} notrap aligned {}{}{}",
+ global_type,
+ if readonly { "readonly " } else { "" },
+ base,
+ offset
+ ),
+ Self::IAddImm {
+ global_type,
+ base,
+ offset,
+ } => write!(f, "iadd_imm.{} {}, {}", global_type, base, offset),
+ Self::Symbol {
+ ref name,
+ offset,
+ colocated,
+ tls,
+ } => {
+ write!(
+ f,
+ "symbol {}{}{}",
+ if colocated { "colocated " } else { "" },
+ if tls { "tls " } else { "" },
+ name
+ )?;
+ let offset_val: i64 = offset.into();
+ if offset_val > 0 {
+ write!(f, "+")?;
+ }
+ if offset_val != 0 {
+ write!(f, "{}", offset)?;
+ }
+ Ok(())
+ }
+ }
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/heap.rs b/third_party/rust/cranelift-codegen/src/ir/heap.rs
new file mode 100644
index 0000000000..8a4b4e84b9
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/heap.rs
@@ -0,0 +1,62 @@
+//! Heaps.
+
+use crate::ir::immediates::Uimm64;
+use crate::ir::{GlobalValue, Type};
+use core::fmt;
+
+/// Information about a heap declaration.
+#[derive(Clone)]
+pub struct HeapData {
+ /// The address of the start of the heap's storage.
+ pub base: GlobalValue,
+
+ /// Guaranteed minimum heap size in bytes. Heap accesses before `min_size` don't need bounds
+ /// checking.
+ pub min_size: Uimm64,
+
+ /// Size in bytes of the offset-guard pages following the heap.
+ pub offset_guard_size: Uimm64,
+
+ /// Heap style, with additional style-specific info.
+ pub style: HeapStyle,
+
+ /// The index type for the heap.
+ pub index_type: Type,
+}
+
+/// Style of heap including style-specific information.
+#[derive(Clone)]
+pub enum HeapStyle {
+ /// A dynamic heap can be relocated to a different base address when it is grown.
+ Dynamic {
+ /// Global value providing the current bound of the heap in bytes.
+ bound_gv: GlobalValue,
+ },
+
+ /// A static heap has a fixed base address and a number of not-yet-allocated pages before the
+ /// offset-guard pages.
+ Static {
+ /// Heap bound in bytes. The offset-guard pages are allocated after the bound.
+ bound: Uimm64,
+ },
+}
+
+impl fmt::Display for HeapData {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_str(match self.style {
+ HeapStyle::Dynamic { .. } => "dynamic",
+ HeapStyle::Static { .. } => "static",
+ })?;
+
+ write!(f, " {}, min {}", self.base, self.min_size)?;
+ match self.style {
+ HeapStyle::Dynamic { bound_gv } => write!(f, ", bound {}", bound_gv)?,
+ HeapStyle::Static { bound } => write!(f, ", bound {}", bound)?,
+ }
+ write!(
+ f,
+ ", offset_guard {}, index_type {}",
+ self.offset_guard_size, self.index_type
+ )
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/immediates.rs b/third_party/rust/cranelift-codegen/src/ir/immediates.rs
new file mode 100644
index 0000000000..f575ea361f
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/immediates.rs
@@ -0,0 +1,1312 @@
+//! Immediate operands for Cranelift instructions
+//!
+//! This module defines the types of immediate operands that can appear on Cranelift instructions.
+//! Each type here should have a corresponding definition in the
+//! `cranelift-codegen/meta/src/shared/immediates` crate in the meta language.
+
+use alloc::vec::Vec;
+use core::cmp::Ordering;
+use core::fmt::{self, Display, Formatter};
+use core::str::FromStr;
+use core::{i32, u32};
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+/// Convert a type into a vector of bytes; all implementors in this file must use little-endian
+/// orderings of bytes to match WebAssembly's little-endianness.
+pub trait IntoBytes {
+ /// Return the little-endian byte representation of the implementing type.
+ fn into_bytes(self) -> Vec<u8>;
+}
+
+impl IntoBytes for u8 {
+ fn into_bytes(self) -> Vec<u8> {
+ vec![self]
+ }
+}
+
+impl IntoBytes for i16 {
+ fn into_bytes(self) -> Vec<u8> {
+ self.to_le_bytes().to_vec()
+ }
+}
+
+impl IntoBytes for i32 {
+ fn into_bytes(self) -> Vec<u8> {
+ self.to_le_bytes().to_vec()
+ }
+}
+
+impl IntoBytes for Vec<u8> {
+ fn into_bytes(self) -> Vec<u8> {
+ self
+ }
+}
+
+/// 64-bit immediate signed integer operand.
+///
+/// An `Imm64` operand can also be used to represent immediate values of smaller integer types by
+/// sign-extending to `i64`.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
+pub struct Imm64(i64);
+
+impl Imm64 {
+ /// Create a new `Imm64` representing the signed number `x`.
+ pub fn new(x: i64) -> Self {
+ Self(x)
+ }
+
+ /// Return self negated.
+ pub fn wrapping_neg(self) -> Self {
+ Self(self.0.wrapping_neg())
+ }
+
+ /// Return bits of this immediate.
+ pub fn bits(&self) -> i64 {
+ self.0
+ }
+
+ /// Sign extend this immediate as if it were a signed integer of the given
+ /// power-of-two width.
+ pub fn sign_extend_from_width(&mut self, bit_width: u16) {
+ debug_assert!(bit_width.is_power_of_two());
+
+ if bit_width >= 64 {
+ return;
+ }
+
+ let bit_width = bit_width as i64;
+ let delta = 64 - bit_width;
+ let sign_extended = (self.0 << delta) >> delta;
+ *self = Imm64(sign_extended);
+ }
+}
+
+impl Into<i64> for Imm64 {
+ fn into(self) -> i64 {
+ self.0
+ }
+}
+
+impl IntoBytes for Imm64 {
+ fn into_bytes(self) -> Vec<u8> {
+ self.0.to_le_bytes().to_vec()
+ }
+}
+
+impl From<i64> for Imm64 {
+ fn from(x: i64) -> Self {
+ Self(x)
+ }
+}
+
+impl Display for Imm64 {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ let x = self.0;
+ if -10_000 < x && x < 10_000 {
+ // Use decimal for small numbers.
+ write!(f, "{}", x)
+ } else {
+ write_hex(x as u64, f)
+ }
+ }
+}
+
+/// Parse a 64-bit signed number.
+fn parse_i64(s: &str) -> Result<i64, &'static str> {
+ let negative = s.starts_with('-');
+ let s2 = if negative || s.starts_with('+') {
+ &s[1..]
+ } else {
+ s
+ };
+
+ let mut value = parse_u64(s2)?;
+
+ // We support the range-and-a-half from -2^63 .. 2^64-1.
+ if negative {
+ value = value.wrapping_neg();
+ // Don't allow large negative values to wrap around and become positive.
+ if value as i64 > 0 {
+ return Err("Negative number too small");
+ }
+ }
+ Ok(value as i64)
+}
+
+impl FromStr for Imm64 {
+ type Err = &'static str;
+
+ // Parse a decimal or hexadecimal `Imm64`, formatted as above.
+ fn from_str(s: &str) -> Result<Self, &'static str> {
+ parse_i64(s).map(Self::new)
+ }
+}
+
+/// 64-bit immediate unsigned integer operand.
+///
+/// A `Uimm64` operand can also be used to represent immediate values of smaller integer types by
+/// zero-extending to `i64`.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
+pub struct Uimm64(u64);
+
+impl Uimm64 {
+ /// Create a new `Uimm64` representing the unsigned number `x`.
+ pub fn new(x: u64) -> Self {
+ Self(x)
+ }
+
+ /// Return self negated.
+ pub fn wrapping_neg(self) -> Self {
+ Self(self.0.wrapping_neg())
+ }
+}
+
+impl Into<u64> for Uimm64 {
+ fn into(self) -> u64 {
+ self.0
+ }
+}
+
+impl From<u64> for Uimm64 {
+ fn from(x: u64) -> Self {
+ Self(x)
+ }
+}
+
+/// Hexadecimal with a multiple of 4 digits and group separators:
+///
+/// 0xfff0
+/// 0x0001_ffff
+/// 0xffff_ffff_fff8_4400
+///
+fn write_hex(x: u64, f: &mut Formatter) -> fmt::Result {
+ let mut pos = (64 - x.leading_zeros() - 1) & 0xf0;
+ write!(f, "0x{:04x}", (x >> pos) & 0xffff)?;
+ while pos > 0 {
+ pos -= 16;
+ write!(f, "_{:04x}", (x >> pos) & 0xffff)?;
+ }
+ Ok(())
+}
+
+impl Display for Uimm64 {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ let x = self.0;
+ if x < 10_000 {
+ // Use decimal for small numbers.
+ write!(f, "{}", x)
+ } else {
+ write_hex(x, f)
+ }
+ }
+}
+
+/// Parse a 64-bit unsigned number.
+fn parse_u64(s: &str) -> Result<u64, &'static str> {
+ let mut value: u64 = 0;
+ let mut digits = 0;
+
+ if s.starts_with("-0x") {
+ return Err("Invalid character in hexadecimal number");
+ } else if s.starts_with("0x") {
+ // Hexadecimal.
+ for ch in s[2..].chars() {
+ match ch.to_digit(16) {
+ Some(digit) => {
+ digits += 1;
+ if digits > 16 {
+ return Err("Too many hexadecimal digits");
+ }
+ // This can't overflow given the digit limit.
+ value = (value << 4) | u64::from(digit);
+ }
+ None => {
+ // Allow embedded underscores, but fail on anything else.
+ if ch != '_' {
+ return Err("Invalid character in hexadecimal number");
+ }
+ }
+ }
+ }
+ } else {
+ // Decimal number, possibly negative.
+ for ch in s.chars() {
+ match ch.to_digit(16) {
+ Some(digit) => {
+ digits += 1;
+ match value.checked_mul(10) {
+ None => return Err("Too large decimal number"),
+ Some(v) => value = v,
+ }
+ match value.checked_add(u64::from(digit)) {
+ None => return Err("Too large decimal number"),
+ Some(v) => value = v,
+ }
+ }
+ None => {
+ // Allow embedded underscores, but fail on anything else.
+ if ch != '_' {
+ return Err("Invalid character in decimal number");
+ }
+ }
+ }
+ }
+ }
+
+ if digits == 0 {
+ return Err("No digits in number");
+ }
+
+ Ok(value)
+}
+
+impl FromStr for Uimm64 {
+ type Err = &'static str;
+
+ // Parse a decimal or hexadecimal `Uimm64`, formatted as above.
+ fn from_str(s: &str) -> Result<Self, &'static str> {
+ parse_u64(s).map(Self::new)
+ }
+}
+
+/// 8-bit unsigned integer immediate operand.
+///
+/// This is used to indicate lane indexes typically.
+pub type Uimm8 = u8;
+
+/// A 32-bit unsigned integer immediate operand.
+///
+/// This is used to represent sizes of memory objects.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
+pub struct Uimm32(u32);
+
+impl Into<u32> for Uimm32 {
+ fn into(self) -> u32 {
+ self.0
+ }
+}
+
+impl Into<i64> for Uimm32 {
+ fn into(self) -> i64 {
+ i64::from(self.0)
+ }
+}
+
+impl From<u32> for Uimm32 {
+ fn from(x: u32) -> Self {
+ Self(x)
+ }
+}
+
+impl Display for Uimm32 {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ if self.0 < 10_000 {
+ write!(f, "{}", self.0)
+ } else {
+ write_hex(u64::from(self.0), f)
+ }
+ }
+}
+
+impl FromStr for Uimm32 {
+ type Err = &'static str;
+
+ // Parse a decimal or hexadecimal `Uimm32`, formatted as above.
+ fn from_str(s: &str) -> Result<Self, &'static str> {
+ parse_i64(s).and_then(|x| {
+ if 0 <= x && x <= i64::from(u32::MAX) {
+ Ok(Self(x as u32))
+ } else {
+ Err("Uimm32 out of range")
+ }
+ })
+ }
+}
+
+/// A 128-bit immediate operand.
+///
+/// This is used as an immediate value in SIMD instructions.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct V128Imm(pub [u8; 16]);
+
+impl V128Imm {
+ /// Iterate over the bytes in the constant.
+ pub fn bytes(&self) -> impl Iterator<Item = &u8> {
+ self.0.iter()
+ }
+
+ /// Convert the immediate into a vector.
+ pub fn to_vec(self) -> Vec<u8> {
+ self.0.to_vec()
+ }
+
+ /// Convert the immediate into a slice.
+ pub fn as_slice(&self) -> &[u8] {
+ &self.0[..]
+ }
+}
+
+impl From<&[u8]> for V128Imm {
+ fn from(slice: &[u8]) -> Self {
+ assert_eq!(slice.len(), 16);
+ let mut buffer = [0; 16];
+ buffer.copy_from_slice(slice);
+ Self(buffer)
+ }
+}
+
+/// 32-bit signed immediate offset.
+///
+/// This is used to encode an immediate offset for load/store instructions. All supported ISAs have
+/// a maximum load/store offset that fits in an `i32`.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
+pub struct Offset32(i32);
+
+impl Offset32 {
+ /// Create a new `Offset32` representing the signed number `x`.
+ pub fn new(x: i32) -> Self {
+ Self(x)
+ }
+
+ /// Create a new `Offset32` representing the signed number `x` if possible.
+ pub fn try_from_i64(x: i64) -> Option<Self> {
+ let casted = x as i32;
+ if casted as i64 == x {
+ Some(Self::new(casted))
+ } else {
+ None
+ }
+ }
+
+ /// Add in the signed number `x` if possible.
+ pub fn try_add_i64(self, x: i64) -> Option<Self> {
+ let casted = x as i32;
+ if casted as i64 == x {
+ self.0.checked_add(casted).map(Self::new)
+ } else {
+ None
+ }
+ }
+}
+
+impl Into<i32> for Offset32 {
+ fn into(self) -> i32 {
+ self.0
+ }
+}
+
+impl Into<i64> for Offset32 {
+ fn into(self) -> i64 {
+ i64::from(self.0)
+ }
+}
+
+impl From<i32> for Offset32 {
+ fn from(x: i32) -> Self {
+ Self(x)
+ }
+}
+
+impl Display for Offset32 {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ // 0 displays as an empty offset.
+ if self.0 == 0 {
+ return Ok(());
+ }
+
+ // Always include a sign.
+ write!(f, "{}", if self.0 < 0 { '-' } else { '+' })?;
+
+ let val = i64::from(self.0).abs();
+ if val < 10_000 {
+ write!(f, "{}", val)
+ } else {
+ write_hex(val as u64, f)
+ }
+ }
+}
+
+impl FromStr for Offset32 {
+ type Err = &'static str;
+
+ // Parse a decimal or hexadecimal `Offset32`, formatted as above.
+ fn from_str(s: &str) -> Result<Self, &'static str> {
+ if !(s.starts_with('-') || s.starts_with('+')) {
+ return Err("Offset must begin with sign");
+ }
+ parse_i64(s).and_then(|x| {
+ if i64::from(i32::MIN) <= x && x <= i64::from(i32::MAX) {
+ Ok(Self::new(x as i32))
+ } else {
+ Err("Offset out of range")
+ }
+ })
+ }
+}
+
+/// An IEEE binary32 immediate floating point value, represented as a u32
+/// containing the bit pattern.
+///
+/// All bit patterns are allowed.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
+#[repr(C)]
+pub struct Ieee32(u32);
+
+/// An IEEE binary64 immediate floating point value, represented as a u64
+/// containing the bit pattern.
+///
+/// All bit patterns are allowed.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
+#[repr(C)]
+pub struct Ieee64(u64);
+
+/// Format a floating point number in a way that is reasonably human-readable, and that can be
+/// converted back to binary without any rounding issues. The hexadecimal formatting of normal and
+/// subnormal numbers is compatible with C99 and the `printf "%a"` format specifier. The NaN and Inf
+/// formats are not supported by C99.
+///
+/// The encoding parameters are:
+///
+/// w - exponent field width in bits
+/// t - trailing significand field width in bits
+///
+fn format_float(bits: u64, w: u8, t: u8, f: &mut Formatter) -> fmt::Result {
+ debug_assert!(w > 0 && w <= 16, "Invalid exponent range");
+ debug_assert!(1 + w + t <= 64, "Too large IEEE format for u64");
+ debug_assert!((t + w + 1).is_power_of_two(), "Unexpected IEEE format size");
+
+ let max_e_bits = (1u64 << w) - 1;
+ let t_bits = bits & ((1u64 << t) - 1); // Trailing significand.
+ let e_bits = (bits >> t) & max_e_bits; // Biased exponent.
+ let sign_bit = (bits >> (w + t)) & 1;
+
+ let bias: i32 = (1 << (w - 1)) - 1;
+ let e = e_bits as i32 - bias; // Unbiased exponent.
+ let emin = 1 - bias; // Minimum exponent.
+
+ // How many hexadecimal digits are needed for the trailing significand?
+ let digits = (t + 3) / 4;
+ // Trailing significand left-aligned in `digits` hexadecimal digits.
+ let left_t_bits = t_bits << (4 * digits - t);
+
+ // All formats share the leading sign.
+ if sign_bit != 0 {
+ write!(f, "-")?;
+ }
+
+ if e_bits == 0 {
+ if t_bits == 0 {
+ // Zero.
+ write!(f, "0.0")
+ } else {
+ // Subnormal.
+ write!(
+ f,
+ "0x0.{0:01$x}p{2}",
+ left_t_bits,
+ usize::from(digits),
+ emin
+ )
+ }
+ } else if e_bits == max_e_bits {
+ // Always print a `+` or `-` sign for these special values.
+ // This makes them easier to parse as they can't be confused as identifiers.
+ if sign_bit == 0 {
+ write!(f, "+")?;
+ }
+ if t_bits == 0 {
+ // Infinity.
+ write!(f, "Inf")
+ } else {
+ // NaN.
+ let payload = t_bits & ((1 << (t - 1)) - 1);
+ if t_bits & (1 << (t - 1)) != 0 {
+ // Quiet NaN.
+ if payload != 0 {
+ write!(f, "NaN:0x{:x}", payload)
+ } else {
+ write!(f, "NaN")
+ }
+ } else {
+ // Signaling NaN.
+ write!(f, "sNaN:0x{:x}", payload)
+ }
+ }
+ } else {
+ // Normal number.
+ write!(f, "0x1.{0:01$x}p{2}", left_t_bits, usize::from(digits), e)
+ }
+}
+
+/// Parse a float using the same format as `format_float` above.
+///
+/// The encoding parameters are:
+///
+/// w - exponent field width in bits
+/// t - trailing significand field width in bits
+///
+fn parse_float(s: &str, w: u8, t: u8) -> Result<u64, &'static str> {
+ debug_assert!(w > 0 && w <= 16, "Invalid exponent range");
+ debug_assert!(1 + w + t <= 64, "Too large IEEE format for u64");
+ debug_assert!((t + w + 1).is_power_of_two(), "Unexpected IEEE format size");
+
+ let (sign_bit, s2) = if s.starts_with('-') {
+ (1u64 << (t + w), &s[1..])
+ } else if s.starts_with('+') {
+ (0, &s[1..])
+ } else {
+ (0, s)
+ };
+
+ if !s2.starts_with("0x") {
+ let max_e_bits = ((1u64 << w) - 1) << t;
+ let quiet_bit = 1u64 << (t - 1);
+
+ // The only decimal encoding allowed is 0.
+ if s2 == "0.0" {
+ return Ok(sign_bit);
+ }
+
+ if s2 == "Inf" {
+ // +/- infinity: e = max, t = 0.
+ return Ok(sign_bit | max_e_bits);
+ }
+ if s2 == "NaN" {
+ // Canonical quiet NaN: e = max, t = quiet.
+ return Ok(sign_bit | max_e_bits | quiet_bit);
+ }
+ if s2.starts_with("NaN:0x") {
+ // Quiet NaN with payload.
+ return match u64::from_str_radix(&s2[6..], 16) {
+ Ok(payload) if payload < quiet_bit => {
+ Ok(sign_bit | max_e_bits | quiet_bit | payload)
+ }
+ _ => Err("Invalid NaN payload"),
+ };
+ }
+ if s2.starts_with("sNaN:0x") {
+ // Signaling NaN with payload.
+ return match u64::from_str_radix(&s2[7..], 16) {
+ Ok(payload) if 0 < payload && payload < quiet_bit => {
+ Ok(sign_bit | max_e_bits | payload)
+ }
+ _ => Err("Invalid sNaN payload"),
+ };
+ }
+
+ return Err("Float must be hexadecimal");
+ }
+ let s3 = &s2[2..];
+
+ let mut digits = 0u8;
+ let mut digits_before_period: Option<u8> = None;
+ let mut significand = 0u64;
+ let mut exponent = 0i32;
+
+ for (idx, ch) in s3.char_indices() {
+ match ch {
+ '.' => {
+ // This is the radix point. There can only be one.
+ if digits_before_period != None {
+ return Err("Multiple radix points");
+ } else {
+ digits_before_period = Some(digits);
+ }
+ }
+ 'p' => {
+ // The following exponent is a decimal number.
+ let exp_str = &s3[1 + idx..];
+ match exp_str.parse::<i16>() {
+ Ok(e) => {
+ exponent = i32::from(e);
+ break;
+ }
+ Err(_) => return Err("Bad exponent"),
+ }
+ }
+ _ => match ch.to_digit(16) {
+ Some(digit) => {
+ digits += 1;
+ if digits > 16 {
+ return Err("Too many digits");
+ }
+ significand = (significand << 4) | u64::from(digit);
+ }
+ None => return Err("Invalid character"),
+ },
+ }
+ }
+
+ if digits == 0 {
+ return Err("No digits");
+ }
+
+ if significand == 0 {
+ // This is +/- 0.0.
+ return Ok(sign_bit);
+ }
+
+ // Number of bits appearing after the radix point.
+ match digits_before_period {
+ None => {} // No radix point present.
+ Some(d) => exponent -= 4 * i32::from(digits - d),
+ };
+
+ // Normalize the significand and exponent.
+ let significant_bits = (64 - significand.leading_zeros()) as u8;
+ if significant_bits > t + 1 {
+ let adjust = significant_bits - (t + 1);
+ if significand & ((1u64 << adjust) - 1) != 0 {
+ return Err("Too many significant bits");
+ }
+ // Adjust significand down.
+ significand >>= adjust;
+ exponent += i32::from(adjust);
+ } else {
+ let adjust = t + 1 - significant_bits;
+ significand <<= adjust;
+ exponent -= i32::from(adjust);
+ }
+ debug_assert_eq!(significand >> t, 1);
+
+ // Trailing significand excludes the high bit.
+ let t_bits = significand & ((1 << t) - 1);
+
+ let max_exp = (1i32 << w) - 2;
+ let bias: i32 = (1 << (w - 1)) - 1;
+ exponent += bias + i32::from(t);
+
+ if exponent > max_exp {
+ Err("Magnitude too large")
+ } else if exponent > 0 {
+ // This is a normal number.
+ let e_bits = (exponent as u64) << t;
+ Ok(sign_bit | e_bits | t_bits)
+ } else if 1 - exponent <= i32::from(t) {
+ // This is a subnormal number: e = 0, t = significand bits.
+ // Renormalize significand for exponent = 1.
+ let adjust = 1 - exponent;
+ if significand & ((1u64 << adjust) - 1) != 0 {
+ Err("Subnormal underflow")
+ } else {
+ significand >>= adjust;
+ Ok(sign_bit | significand)
+ }
+ } else {
+ Err("Magnitude too small")
+ }
+}
+
+impl Ieee32 {
+ /// Create a new `Ieee32` containing the bits of `x`.
+ pub fn with_bits(x: u32) -> Self {
+ Self(x)
+ }
+
+ /// Create an `Ieee32` number representing `2.0^n`.
+ pub fn pow2<I: Into<i32>>(n: I) -> Self {
+ let n = n.into();
+ let w = 8;
+ let t = 23;
+ let bias = (1 << (w - 1)) - 1;
+ let exponent = (n + bias) as u32;
+ assert!(exponent > 0, "Underflow n={}", n);
+ assert!(exponent < (1 << w) + 1, "Overflow n={}", n);
+ Self(exponent << t)
+ }
+
+ /// Create an `Ieee32` number representing the greatest negative value
+ /// not convertable from f32 to a signed integer with width n.
+ pub fn fcvt_to_sint_negative_overflow<I: Into<i32>>(n: I) -> Self {
+ let n = n.into();
+ debug_assert!(n < 32);
+ debug_assert!(23 + 1 - n < 32);
+ Self::with_bits((1u32 << (32 - 1)) | Self::pow2(n - 1).0 | (1u32 << (23 + 1 - n)))
+ }
+
+ /// Return self negated.
+ pub fn neg(self) -> Self {
+ Self(self.0 ^ (1 << 31))
+ }
+
+ /// Create a new `Ieee32` representing the number `x`.
+ pub fn with_float(x: f32) -> Self {
+ Self(x.to_bits())
+ }
+
+ /// Get the bitwise representation.
+ pub fn bits(self) -> u32 {
+ self.0
+ }
+
+ /// Check if the value is a NaN.
+ pub fn is_nan(&self) -> bool {
+ f32::from_bits(self.0).is_nan()
+ }
+}
+
+impl PartialOrd for Ieee32 {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ f32::from_bits(self.0).partial_cmp(&f32::from_bits(other.0))
+ }
+}
+
+impl Display for Ieee32 {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ let bits: u32 = self.0;
+ format_float(u64::from(bits), 8, 23, f)
+ }
+}
+
+impl FromStr for Ieee32 {
+ type Err = &'static str;
+
+ fn from_str(s: &str) -> Result<Self, &'static str> {
+ match parse_float(s, 8, 23) {
+ Ok(b) => Ok(Self(b as u32)),
+ Err(s) => Err(s),
+ }
+ }
+}
+
+impl From<f32> for Ieee32 {
+ fn from(x: f32) -> Self {
+ Self::with_float(x)
+ }
+}
+
+impl IntoBytes for Ieee32 {
+ fn into_bytes(self) -> Vec<u8> {
+ self.0.to_le_bytes().to_vec()
+ }
+}
+
+impl Ieee64 {
+ /// Create a new `Ieee64` containing the bits of `x`.
+ pub fn with_bits(x: u64) -> Self {
+ Self(x)
+ }
+
+ /// Create an `Ieee64` number representing `2.0^n`.
+ pub fn pow2<I: Into<i64>>(n: I) -> Self {
+ let n = n.into();
+ let w = 11;
+ let t = 52;
+ let bias = (1 << (w - 1)) - 1;
+ let exponent = (n + bias) as u64;
+ assert!(exponent > 0, "Underflow n={}", n);
+ assert!(exponent < (1 << w) + 1, "Overflow n={}", n);
+ Self(exponent << t)
+ }
+
+ /// Create an `Ieee64` number representing the greatest negative value
+ /// not convertable from f64 to a signed integer with width n.
+ pub fn fcvt_to_sint_negative_overflow<I: Into<i64>>(n: I) -> Self {
+ let n = n.into();
+ debug_assert!(n < 64);
+ debug_assert!(52 + 1 - n < 64);
+ Self::with_bits((1u64 << (64 - 1)) | Self::pow2(n - 1).0 | (1u64 << (52 + 1 - n)))
+ }
+
+ /// Return self negated.
+ pub fn neg(self) -> Self {
+ Self(self.0 ^ (1 << 63))
+ }
+
+ /// Create a new `Ieee64` representing the number `x`.
+ pub fn with_float(x: f64) -> Self {
+ Self(x.to_bits())
+ }
+
+ /// Get the bitwise representation.
+ pub fn bits(self) -> u64 {
+ self.0
+ }
+
+ /// Check if the value is a NaN. For [Ieee64], this means checking that the 11 exponent bits are
+ /// all set.
+ pub fn is_nan(&self) -> bool {
+ f64::from_bits(self.0).is_nan()
+ }
+}
+
+impl PartialOrd for Ieee64 {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ f64::from_bits(self.0).partial_cmp(&f64::from_bits(other.0))
+ }
+}
+
+impl Display for Ieee64 {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ let bits: u64 = self.0;
+ format_float(bits, 11, 52, f)
+ }
+}
+
+impl FromStr for Ieee64 {
+ type Err = &'static str;
+
+ fn from_str(s: &str) -> Result<Self, &'static str> {
+ match parse_float(s, 11, 52) {
+ Ok(b) => Ok(Self(b)),
+ Err(s) => Err(s),
+ }
+ }
+}
+
+impl From<f64> for Ieee64 {
+ fn from(x: f64) -> Self {
+ Self::with_float(x)
+ }
+}
+
+impl From<u64> for Ieee64 {
+ fn from(x: u64) -> Self {
+ Self::with_float(f64::from_bits(x))
+ }
+}
+
+impl IntoBytes for Ieee64 {
+ fn into_bytes(self) -> Vec<u8> {
+ self.0.to_le_bytes().to_vec()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloc::string::ToString;
+ use core::fmt::Display;
+ use core::mem;
+ use core::str::FromStr;
+ use core::{f32, f64};
+
+ #[test]
+ fn format_imm64() {
+ assert_eq!(Imm64(0).to_string(), "0");
+ assert_eq!(Imm64(9999).to_string(), "9999");
+ assert_eq!(Imm64(10000).to_string(), "0x2710");
+ assert_eq!(Imm64(-9999).to_string(), "-9999");
+ assert_eq!(Imm64(-10000).to_string(), "0xffff_ffff_ffff_d8f0");
+ assert_eq!(Imm64(0xffff).to_string(), "0xffff");
+ assert_eq!(Imm64(0x10000).to_string(), "0x0001_0000");
+ }
+
+ #[test]
+ fn format_uimm64() {
+ assert_eq!(Uimm64(0).to_string(), "0");
+ assert_eq!(Uimm64(9999).to_string(), "9999");
+ assert_eq!(Uimm64(10000).to_string(), "0x2710");
+ assert_eq!(Uimm64(-9999i64 as u64).to_string(), "0xffff_ffff_ffff_d8f1");
+ assert_eq!(
+ Uimm64(-10000i64 as u64).to_string(),
+ "0xffff_ffff_ffff_d8f0"
+ );
+ assert_eq!(Uimm64(0xffff).to_string(), "0xffff");
+ assert_eq!(Uimm64(0x10000).to_string(), "0x0001_0000");
+ }
+
+ // Verify that `text` can be parsed as a `T` into a value that displays as `want`.
+ fn parse_ok<T: FromStr + Display>(text: &str, want: &str)
+ where
+ <T as FromStr>::Err: Display,
+ {
+ match text.parse::<T>() {
+ Err(s) => panic!("\"{}\".parse() error: {}", text, s),
+ Ok(x) => assert_eq!(x.to_string(), want),
+ }
+ }
+
+ // Verify that `text` fails to parse as `T` with the error `msg`.
+ fn parse_err<T: FromStr + Display>(text: &str, msg: &str)
+ where
+ <T as FromStr>::Err: Display,
+ {
+ match text.parse::<T>() {
+ Err(s) => assert_eq!(s.to_string(), msg),
+ Ok(x) => panic!("Wanted Err({}), but got {}", msg, x),
+ }
+ }
+
+ #[test]
+ fn parse_imm64() {
+ parse_ok::<Imm64>("0", "0");
+ parse_ok::<Imm64>("1", "1");
+ parse_ok::<Imm64>("-0", "0");
+ parse_ok::<Imm64>("-1", "-1");
+ parse_ok::<Imm64>("0x0", "0");
+ parse_ok::<Imm64>("0xf", "15");
+ parse_ok::<Imm64>("-0x9", "-9");
+
+ // Probe limits.
+ parse_ok::<Imm64>("0xffffffff_ffffffff", "-1");
+ parse_ok::<Imm64>("0x80000000_00000000", "0x8000_0000_0000_0000");
+ parse_ok::<Imm64>("-0x80000000_00000000", "0x8000_0000_0000_0000");
+ parse_err::<Imm64>("-0x80000000_00000001", "Negative number too small");
+ parse_ok::<Imm64>("18446744073709551615", "-1");
+ parse_ok::<Imm64>("-9223372036854775808", "0x8000_0000_0000_0000");
+ // Overflow both the `checked_add` and `checked_mul`.
+ parse_err::<Imm64>("18446744073709551616", "Too large decimal number");
+ parse_err::<Imm64>("184467440737095516100", "Too large decimal number");
+ parse_err::<Imm64>("-9223372036854775809", "Negative number too small");
+
+ // Underscores are allowed where digits go.
+ parse_ok::<Imm64>("0_0", "0");
+ parse_ok::<Imm64>("-_10_0", "-100");
+ parse_ok::<Imm64>("_10_", "10");
+ parse_ok::<Imm64>("0x97_88_bb", "0x0097_88bb");
+ parse_ok::<Imm64>("0x_97_", "151");
+
+ parse_err::<Imm64>("", "No digits in number");
+ parse_err::<Imm64>("-", "No digits in number");
+ parse_err::<Imm64>("_", "No digits in number");
+ parse_err::<Imm64>("0x", "No digits in number");
+ parse_err::<Imm64>("0x_", "No digits in number");
+ parse_err::<Imm64>("-0x", "No digits in number");
+ parse_err::<Imm64>(" ", "Invalid character in decimal number");
+ parse_err::<Imm64>("0 ", "Invalid character in decimal number");
+ parse_err::<Imm64>(" 0", "Invalid character in decimal number");
+ parse_err::<Imm64>("--", "Invalid character in decimal number");
+ parse_err::<Imm64>("-0x-", "Invalid character in hexadecimal number");
+
+ // Hex count overflow.
+ parse_err::<Imm64>("0x0_0000_0000_0000_0000", "Too many hexadecimal digits");
+ }
+
+ #[test]
+ fn parse_uimm64() {
+ parse_ok::<Uimm64>("0", "0");
+ parse_ok::<Uimm64>("1", "1");
+ parse_ok::<Uimm64>("0x0", "0");
+ parse_ok::<Uimm64>("0xf", "15");
+ parse_ok::<Uimm64>("0xffffffff_fffffff7", "0xffff_ffff_ffff_fff7");
+
+ // Probe limits.
+ parse_ok::<Uimm64>("0xffffffff_ffffffff", "0xffff_ffff_ffff_ffff");
+ parse_ok::<Uimm64>("0x80000000_00000000", "0x8000_0000_0000_0000");
+ parse_ok::<Uimm64>("18446744073709551615", "0xffff_ffff_ffff_ffff");
+ // Overflow both the `checked_add` and `checked_mul`.
+ parse_err::<Uimm64>("18446744073709551616", "Too large decimal number");
+ parse_err::<Uimm64>("184467440737095516100", "Too large decimal number");
+
+ // Underscores are allowed where digits go.
+ parse_ok::<Uimm64>("0_0", "0");
+ parse_ok::<Uimm64>("_10_", "10");
+ parse_ok::<Uimm64>("0x97_88_bb", "0x0097_88bb");
+ parse_ok::<Uimm64>("0x_97_", "151");
+
+ parse_err::<Uimm64>("", "No digits in number");
+ parse_err::<Uimm64>("_", "No digits in number");
+ parse_err::<Uimm64>("0x", "No digits in number");
+ parse_err::<Uimm64>("0x_", "No digits in number");
+ parse_err::<Uimm64>("-", "Invalid character in decimal number");
+ parse_err::<Uimm64>("-0x", "Invalid character in hexadecimal number");
+ parse_err::<Uimm64>(" ", "Invalid character in decimal number");
+ parse_err::<Uimm64>("0 ", "Invalid character in decimal number");
+ parse_err::<Uimm64>(" 0", "Invalid character in decimal number");
+ parse_err::<Uimm64>("--", "Invalid character in decimal number");
+ parse_err::<Uimm64>("-0x-", "Invalid character in hexadecimal number");
+ parse_err::<Uimm64>("-0", "Invalid character in decimal number");
+ parse_err::<Uimm64>("-1", "Invalid character in decimal number");
+
+ // Hex count overflow.
+ parse_err::<Uimm64>("0x0_0000_0000_0000_0000", "Too many hexadecimal digits");
+ }
+
+ #[test]
+ fn format_offset32() {
+ assert_eq!(Offset32(0).to_string(), "");
+ assert_eq!(Offset32(1).to_string(), "+1");
+ assert_eq!(Offset32(-1).to_string(), "-1");
+ assert_eq!(Offset32(9999).to_string(), "+9999");
+ assert_eq!(Offset32(10000).to_string(), "+0x2710");
+ assert_eq!(Offset32(-9999).to_string(), "-9999");
+ assert_eq!(Offset32(-10000).to_string(), "-0x2710");
+ assert_eq!(Offset32(0xffff).to_string(), "+0xffff");
+ assert_eq!(Offset32(0x10000).to_string(), "+0x0001_0000");
+ }
+
+ #[test]
+ fn parse_offset32() {
+ parse_ok::<Offset32>("+0", "");
+ parse_ok::<Offset32>("+1", "+1");
+ parse_ok::<Offset32>("-0", "");
+ parse_ok::<Offset32>("-1", "-1");
+ parse_ok::<Offset32>("+0x0", "");
+ parse_ok::<Offset32>("+0xf", "+15");
+ parse_ok::<Offset32>("-0x9", "-9");
+ parse_ok::<Offset32>("-0x8000_0000", "-0x8000_0000");
+
+ parse_err::<Offset32>("+0x8000_0000", "Offset out of range");
+ }
+
+ #[test]
+ fn format_ieee32() {
+ assert_eq!(Ieee32::with_float(0.0).to_string(), "0.0");
+ assert_eq!(Ieee32::with_float(-0.0).to_string(), "-0.0");
+ assert_eq!(Ieee32::with_float(1.0).to_string(), "0x1.000000p0");
+ assert_eq!(Ieee32::with_float(1.5).to_string(), "0x1.800000p0");
+ assert_eq!(Ieee32::with_float(0.5).to_string(), "0x1.000000p-1");
+ assert_eq!(
+ Ieee32::with_float(f32::EPSILON).to_string(),
+ "0x1.000000p-23"
+ );
+ assert_eq!(Ieee32::with_float(f32::MIN).to_string(), "-0x1.fffffep127");
+ assert_eq!(Ieee32::with_float(f32::MAX).to_string(), "0x1.fffffep127");
+ // Smallest positive normal number.
+ assert_eq!(
+ Ieee32::with_float(f32::MIN_POSITIVE).to_string(),
+ "0x1.000000p-126"
+ );
+ // Subnormals.
+ assert_eq!(
+ Ieee32::with_float(f32::MIN_POSITIVE / 2.0).to_string(),
+ "0x0.800000p-126"
+ );
+ assert_eq!(
+ Ieee32::with_float(f32::MIN_POSITIVE * f32::EPSILON).to_string(),
+ "0x0.000002p-126"
+ );
+ assert_eq!(Ieee32::with_float(f32::INFINITY).to_string(), "+Inf");
+ assert_eq!(Ieee32::with_float(f32::NEG_INFINITY).to_string(), "-Inf");
+ assert_eq!(Ieee32::with_float(f32::NAN).to_string(), "+NaN");
+ assert_eq!(Ieee32::with_float(-f32::NAN).to_string(), "-NaN");
+ // Construct some qNaNs with payloads.
+ assert_eq!(Ieee32(0x7fc00001).to_string(), "+NaN:0x1");
+ assert_eq!(Ieee32(0x7ff00001).to_string(), "+NaN:0x300001");
+ // Signaling NaNs.
+ assert_eq!(Ieee32(0x7f800001).to_string(), "+sNaN:0x1");
+ assert_eq!(Ieee32(0x7fa00001).to_string(), "+sNaN:0x200001");
+ }
+
+ #[test]
+ fn parse_ieee32() {
+ parse_ok::<Ieee32>("0.0", "0.0");
+ parse_ok::<Ieee32>("+0.0", "0.0");
+ parse_ok::<Ieee32>("-0.0", "-0.0");
+ parse_ok::<Ieee32>("0x0", "0.0");
+ parse_ok::<Ieee32>("0x0.0", "0.0");
+ parse_ok::<Ieee32>("0x.0", "0.0");
+ parse_ok::<Ieee32>("0x0.", "0.0");
+ parse_ok::<Ieee32>("0x1", "0x1.000000p0");
+ parse_ok::<Ieee32>("+0x1", "0x1.000000p0");
+ parse_ok::<Ieee32>("-0x1", "-0x1.000000p0");
+ parse_ok::<Ieee32>("0x10", "0x1.000000p4");
+ parse_ok::<Ieee32>("0x10.0", "0x1.000000p4");
+ parse_err::<Ieee32>("0.", "Float must be hexadecimal");
+ parse_err::<Ieee32>(".0", "Float must be hexadecimal");
+ parse_err::<Ieee32>("0", "Float must be hexadecimal");
+ parse_err::<Ieee32>("-0", "Float must be hexadecimal");
+ parse_err::<Ieee32>(".", "Float must be hexadecimal");
+ parse_err::<Ieee32>("", "Float must be hexadecimal");
+ parse_err::<Ieee32>("-", "Float must be hexadecimal");
+ parse_err::<Ieee32>("0x", "No digits");
+ parse_err::<Ieee32>("0x..", "Multiple radix points");
+
+ // Check significant bits.
+ parse_ok::<Ieee32>("0x0.ffffff", "0x1.fffffep-1");
+ parse_ok::<Ieee32>("0x1.fffffe", "0x1.fffffep0");
+ parse_ok::<Ieee32>("0x3.fffffc", "0x1.fffffep1");
+ parse_ok::<Ieee32>("0x7.fffff8", "0x1.fffffep2");
+ parse_ok::<Ieee32>("0xf.fffff0", "0x1.fffffep3");
+ parse_err::<Ieee32>("0x1.ffffff", "Too many significant bits");
+ parse_err::<Ieee32>("0x1.fffffe0000000000", "Too many digits");
+
+ // Exponents.
+ parse_ok::<Ieee32>("0x1p3", "0x1.000000p3");
+ parse_ok::<Ieee32>("0x1p-3", "0x1.000000p-3");
+ parse_ok::<Ieee32>("0x1.0p3", "0x1.000000p3");
+ parse_ok::<Ieee32>("0x2.0p3", "0x1.000000p4");
+ parse_ok::<Ieee32>("0x1.0p127", "0x1.000000p127");
+ parse_ok::<Ieee32>("0x1.0p-126", "0x1.000000p-126");
+ parse_ok::<Ieee32>("0x0.1p-122", "0x1.000000p-126");
+ parse_err::<Ieee32>("0x2.0p127", "Magnitude too large");
+
+ // Subnormals.
+ parse_ok::<Ieee32>("0x1.0p-127", "0x0.800000p-126");
+ parse_ok::<Ieee32>("0x1.0p-149", "0x0.000002p-126");
+ parse_ok::<Ieee32>("0x0.000002p-126", "0x0.000002p-126");
+ parse_err::<Ieee32>("0x0.100001p-126", "Subnormal underflow");
+ parse_err::<Ieee32>("0x1.8p-149", "Subnormal underflow");
+ parse_err::<Ieee32>("0x1.0p-150", "Magnitude too small");
+
+ // NaNs and Infs.
+ parse_ok::<Ieee32>("Inf", "+Inf");
+ parse_ok::<Ieee32>("+Inf", "+Inf");
+ parse_ok::<Ieee32>("-Inf", "-Inf");
+ parse_ok::<Ieee32>("NaN", "+NaN");
+ parse_ok::<Ieee32>("+NaN", "+NaN");
+ parse_ok::<Ieee32>("-NaN", "-NaN");
+ parse_ok::<Ieee32>("NaN:0x0", "+NaN");
+ parse_err::<Ieee32>("NaN:", "Float must be hexadecimal");
+ parse_err::<Ieee32>("NaN:0", "Float must be hexadecimal");
+ parse_err::<Ieee32>("NaN:0x", "Invalid NaN payload");
+ parse_ok::<Ieee32>("NaN:0x000001", "+NaN:0x1");
+ parse_ok::<Ieee32>("NaN:0x300001", "+NaN:0x300001");
+ parse_err::<Ieee32>("NaN:0x400001", "Invalid NaN payload");
+ parse_ok::<Ieee32>("sNaN:0x1", "+sNaN:0x1");
+ parse_err::<Ieee32>("sNaN:0x0", "Invalid sNaN payload");
+ parse_ok::<Ieee32>("sNaN:0x200001", "+sNaN:0x200001");
+ parse_err::<Ieee32>("sNaN:0x400001", "Invalid sNaN payload");
+ }
+
+ #[test]
+ fn pow2_ieee32() {
+ assert_eq!(Ieee32::pow2(0).to_string(), "0x1.000000p0");
+ assert_eq!(Ieee32::pow2(1).to_string(), "0x1.000000p1");
+ assert_eq!(Ieee32::pow2(-1).to_string(), "0x1.000000p-1");
+ assert_eq!(Ieee32::pow2(127).to_string(), "0x1.000000p127");
+ assert_eq!(Ieee32::pow2(-126).to_string(), "0x1.000000p-126");
+
+ assert_eq!(Ieee32::pow2(1).neg().to_string(), "-0x1.000000p1");
+ }
+
+ #[test]
+ fn fcvt_to_sint_negative_overflow_ieee32() {
+ for n in &[8, 16] {
+ assert_eq!(-((1u32 << (n - 1)) as f32) - 1.0, unsafe {
+ mem::transmute(Ieee32::fcvt_to_sint_negative_overflow(*n))
+ });
+ }
+ }
+
+ #[test]
+ fn format_ieee64() {
+ assert_eq!(Ieee64::with_float(0.0).to_string(), "0.0");
+ assert_eq!(Ieee64::with_float(-0.0).to_string(), "-0.0");
+ assert_eq!(Ieee64::with_float(1.0).to_string(), "0x1.0000000000000p0");
+ assert_eq!(Ieee64::with_float(1.5).to_string(), "0x1.8000000000000p0");
+ assert_eq!(Ieee64::with_float(0.5).to_string(), "0x1.0000000000000p-1");
+ assert_eq!(
+ Ieee64::with_float(f64::EPSILON).to_string(),
+ "0x1.0000000000000p-52"
+ );
+ assert_eq!(
+ Ieee64::with_float(f64::MIN).to_string(),
+ "-0x1.fffffffffffffp1023"
+ );
+ assert_eq!(
+ Ieee64::with_float(f64::MAX).to_string(),
+ "0x1.fffffffffffffp1023"
+ );
+ // Smallest positive normal number.
+ assert_eq!(
+ Ieee64::with_float(f64::MIN_POSITIVE).to_string(),
+ "0x1.0000000000000p-1022"
+ );
+ // Subnormals.
+ assert_eq!(
+ Ieee64::with_float(f64::MIN_POSITIVE / 2.0).to_string(),
+ "0x0.8000000000000p-1022"
+ );
+ assert_eq!(
+ Ieee64::with_float(f64::MIN_POSITIVE * f64::EPSILON).to_string(),
+ "0x0.0000000000001p-1022"
+ );
+ assert_eq!(Ieee64::with_float(f64::INFINITY).to_string(), "+Inf");
+ assert_eq!(Ieee64::with_float(f64::NEG_INFINITY).to_string(), "-Inf");
+ assert_eq!(Ieee64::with_float(f64::NAN).to_string(), "+NaN");
+ assert_eq!(Ieee64::with_float(-f64::NAN).to_string(), "-NaN");
+ // Construct some qNaNs with payloads.
+ assert_eq!(Ieee64(0x7ff8000000000001).to_string(), "+NaN:0x1");
+ assert_eq!(
+ Ieee64(0x7ffc000000000001).to_string(),
+ "+NaN:0x4000000000001"
+ );
+ // Signaling NaNs.
+ assert_eq!(Ieee64(0x7ff0000000000001).to_string(), "+sNaN:0x1");
+ assert_eq!(
+ Ieee64(0x7ff4000000000001).to_string(),
+ "+sNaN:0x4000000000001"
+ );
+ }
+
+ #[test]
+ fn parse_ieee64() {
+ parse_ok::<Ieee64>("0.0", "0.0");
+ parse_ok::<Ieee64>("-0.0", "-0.0");
+ parse_ok::<Ieee64>("0x0", "0.0");
+ parse_ok::<Ieee64>("0x0.0", "0.0");
+ parse_ok::<Ieee64>("0x.0", "0.0");
+ parse_ok::<Ieee64>("0x0.", "0.0");
+ parse_ok::<Ieee64>("0x1", "0x1.0000000000000p0");
+ parse_ok::<Ieee64>("-0x1", "-0x1.0000000000000p0");
+ parse_ok::<Ieee64>("0x10", "0x1.0000000000000p4");
+ parse_ok::<Ieee64>("0x10.0", "0x1.0000000000000p4");
+ parse_err::<Ieee64>("0.", "Float must be hexadecimal");
+ parse_err::<Ieee64>(".0", "Float must be hexadecimal");
+ parse_err::<Ieee64>("0", "Float must be hexadecimal");
+ parse_err::<Ieee64>("-0", "Float must be hexadecimal");
+ parse_err::<Ieee64>(".", "Float must be hexadecimal");
+ parse_err::<Ieee64>("", "Float must be hexadecimal");
+ parse_err::<Ieee64>("-", "Float must be hexadecimal");
+ parse_err::<Ieee64>("0x", "No digits");
+ parse_err::<Ieee64>("0x..", "Multiple radix points");
+
+ // Check significant bits.
+ parse_ok::<Ieee64>("0x0.fffffffffffff8", "0x1.fffffffffffffp-1");
+ parse_ok::<Ieee64>("0x1.fffffffffffff", "0x1.fffffffffffffp0");
+ parse_ok::<Ieee64>("0x3.ffffffffffffe", "0x1.fffffffffffffp1");
+ parse_ok::<Ieee64>("0x7.ffffffffffffc", "0x1.fffffffffffffp2");
+ parse_ok::<Ieee64>("0xf.ffffffffffff8", "0x1.fffffffffffffp3");
+ parse_err::<Ieee64>("0x3.fffffffffffff", "Too many significant bits");
+ parse_err::<Ieee64>("0x001.fffffe00000000", "Too many digits");
+
+ // Exponents.
+ parse_ok::<Ieee64>("0x1p3", "0x1.0000000000000p3");
+ parse_ok::<Ieee64>("0x1p-3", "0x1.0000000000000p-3");
+ parse_ok::<Ieee64>("0x1.0p3", "0x1.0000000000000p3");
+ parse_ok::<Ieee64>("0x2.0p3", "0x1.0000000000000p4");
+ parse_ok::<Ieee64>("0x1.0p1023", "0x1.0000000000000p1023");
+ parse_ok::<Ieee64>("0x1.0p-1022", "0x1.0000000000000p-1022");
+ parse_ok::<Ieee64>("0x0.1p-1018", "0x1.0000000000000p-1022");
+ parse_err::<Ieee64>("0x2.0p1023", "Magnitude too large");
+
+ // Subnormals.
+ parse_ok::<Ieee64>("0x1.0p-1023", "0x0.8000000000000p-1022");
+ parse_ok::<Ieee64>("0x1.0p-1074", "0x0.0000000000001p-1022");
+ parse_ok::<Ieee64>("0x0.0000000000001p-1022", "0x0.0000000000001p-1022");
+ parse_err::<Ieee64>("0x0.10000000000008p-1022", "Subnormal underflow");
+ parse_err::<Ieee64>("0x1.8p-1074", "Subnormal underflow");
+ parse_err::<Ieee64>("0x1.0p-1075", "Magnitude too small");
+
+ // NaNs and Infs.
+ parse_ok::<Ieee64>("Inf", "+Inf");
+ parse_ok::<Ieee64>("-Inf", "-Inf");
+ parse_ok::<Ieee64>("NaN", "+NaN");
+ parse_ok::<Ieee64>("-NaN", "-NaN");
+ parse_ok::<Ieee64>("NaN:0x0", "+NaN");
+ parse_err::<Ieee64>("NaN:", "Float must be hexadecimal");
+ parse_err::<Ieee64>("NaN:0", "Float must be hexadecimal");
+ parse_err::<Ieee64>("NaN:0x", "Invalid NaN payload");
+ parse_ok::<Ieee64>("NaN:0x000001", "+NaN:0x1");
+ parse_ok::<Ieee64>("NaN:0x4000000000001", "+NaN:0x4000000000001");
+ parse_err::<Ieee64>("NaN:0x8000000000001", "Invalid NaN payload");
+ parse_ok::<Ieee64>("sNaN:0x1", "+sNaN:0x1");
+ parse_err::<Ieee64>("sNaN:0x0", "Invalid sNaN payload");
+ parse_ok::<Ieee64>("sNaN:0x4000000000001", "+sNaN:0x4000000000001");
+ parse_err::<Ieee64>("sNaN:0x8000000000001", "Invalid sNaN payload");
+ }
+
+ #[test]
+ fn pow2_ieee64() {
+ assert_eq!(Ieee64::pow2(0).to_string(), "0x1.0000000000000p0");
+ assert_eq!(Ieee64::pow2(1).to_string(), "0x1.0000000000000p1");
+ assert_eq!(Ieee64::pow2(-1).to_string(), "0x1.0000000000000p-1");
+ assert_eq!(Ieee64::pow2(1023).to_string(), "0x1.0000000000000p1023");
+ assert_eq!(Ieee64::pow2(-1022).to_string(), "0x1.0000000000000p-1022");
+
+ assert_eq!(Ieee64::pow2(1).neg().to_string(), "-0x1.0000000000000p1");
+ }
+
+ #[test]
+ fn fcvt_to_sint_negative_overflow_ieee64() {
+ for n in &[8, 16, 32] {
+ assert_eq!(-((1u64 << (n - 1)) as f64) - 1.0, unsafe {
+ mem::transmute(Ieee64::fcvt_to_sint_negative_overflow(*n))
+ });
+ }
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/instructions.rs b/third_party/rust/cranelift-codegen/src/ir/instructions.rs
new file mode 100644
index 0000000000..13310bc01c
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/instructions.rs
@@ -0,0 +1,898 @@
+//! Instruction formats and opcodes.
+//!
+//! The `instructions` module contains definitions for instruction formats, opcodes, and the
+//! in-memory representation of IR instructions.
+//!
+//! A large part of this module is auto-generated from the instruction descriptions in the meta
+//! directory.
+
+use alloc::vec::Vec;
+use core::convert::{TryFrom, TryInto};
+use core::fmt::{self, Display, Formatter};
+use core::num::NonZeroU32;
+use core::ops::{Deref, DerefMut};
+use core::str::FromStr;
+
+use crate::ir::{self, trapcode::TrapCode, types, Block, FuncRef, JumpTable, SigRef, Type, Value};
+use crate::isa;
+
+use crate::bitset::BitSet;
+use crate::data_value::DataValue;
+use crate::entity;
+use ir::condcodes::{FloatCC, IntCC};
+
+/// Some instructions use an external list of argument values because there is not enough space in
+/// the 16-byte `InstructionData` struct. These value lists are stored in a memory pool in
+/// `dfg.value_lists`.
+pub type ValueList = entity::EntityList<Value>;
+
+/// Memory pool for holding value lists. See `ValueList`.
+pub type ValueListPool = entity::ListPool<Value>;
+
+// Include code generated by `cranelift-codegen/meta/src/gen_inst.rs`. This file contains:
+//
+// - The `pub enum InstructionFormat` enum with all the instruction formats.
+// - The `pub enum InstructionData` enum with all the instruction data fields.
+// - The `pub enum Opcode` definition with all known opcodes,
+// - The `const OPCODE_FORMAT: [InstructionFormat; N]` table.
+// - The private `fn opcode_name(Opcode) -> &'static str` function, and
+// - The hash table `const OPCODE_HASH_TABLE: [Opcode; N]`.
+//
+// For value type constraints:
+//
+// - The `const OPCODE_CONSTRAINTS : [OpcodeConstraints; N]` table.
+// - The `const TYPE_SETS : [ValueTypeSet; N]` table.
+// - The `const OPERAND_CONSTRAINTS : [OperandConstraint; N]` table.
+//
+include!(concat!(env!("OUT_DIR"), "/opcodes.rs"));
+
+impl Display for Opcode {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ write!(f, "{}", opcode_name(*self))
+ }
+}
+
+impl Opcode {
+ /// Get the instruction format for this opcode.
+ pub fn format(self) -> InstructionFormat {
+ OPCODE_FORMAT[self as usize - 1]
+ }
+
+ /// Get the constraint descriptor for this opcode.
+ /// Panic if this is called on `NotAnOpcode`.
+ pub fn constraints(self) -> OpcodeConstraints {
+ OPCODE_CONSTRAINTS[self as usize - 1]
+ }
+
+ /// Returns true if the instruction is a resumable trap.
+ pub fn is_resumable_trap(&self) -> bool {
+ match self {
+ Opcode::ResumableTrap | Opcode::ResumableTrapnz => true,
+ _ => false,
+ }
+ }
+}
+
+impl TryFrom<NonZeroU32> for Opcode {
+ type Error = ();
+
+ #[inline]
+ fn try_from(x: NonZeroU32) -> Result<Self, ()> {
+ let x: u16 = x.get().try_into().map_err(|_| ())?;
+ Self::try_from(x)
+ }
+}
+
+impl From<Opcode> for NonZeroU32 {
+ #[inline]
+ fn from(op: Opcode) -> NonZeroU32 {
+ let x = op as u8;
+ NonZeroU32::new(x as u32).unwrap()
+ }
+}
+
+// This trait really belongs in cranelift-reader where it is used by the `.clif` file parser, but since
+// it critically depends on the `opcode_name()` function which is needed here anyway, it lives in
+// this module. This also saves us from running the build script twice to generate code for the two
+// separate crates.
+impl FromStr for Opcode {
+ type Err = &'static str;
+
+ /// Parse an Opcode name from a string.
+ fn from_str(s: &str) -> Result<Self, &'static str> {
+ use crate::constant_hash::{probe, simple_hash, Table};
+
+ impl<'a> Table<&'a str> for [Option<Opcode>] {
+ fn len(&self) -> usize {
+ self.len()
+ }
+
+ fn key(&self, idx: usize) -> Option<&'a str> {
+ self[idx].map(opcode_name)
+ }
+ }
+
+ match probe::<&str, [Option<Self>]>(&OPCODE_HASH_TABLE, s, simple_hash(s)) {
+ Err(_) => Err("Unknown opcode"),
+ // We unwrap here because probe() should have ensured that the entry
+ // at this index is not None.
+ Ok(i) => Ok(OPCODE_HASH_TABLE[i].unwrap()),
+ }
+ }
+}
+
+/// A variable list of `Value` operands used for function call arguments and passing arguments to
+/// basic blocks.
+#[derive(Clone, Debug)]
+pub struct VariableArgs(Vec<Value>);
+
+impl VariableArgs {
+ /// Create an empty argument list.
+ pub fn new() -> Self {
+ Self(Vec::new())
+ }
+
+ /// Add an argument to the end.
+ pub fn push(&mut self, v: Value) {
+ self.0.push(v)
+ }
+
+ /// Check if the list is empty.
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ /// Convert this to a value list in `pool` with `fixed` prepended.
+ pub fn into_value_list(self, fixed: &[Value], pool: &mut ValueListPool) -> ValueList {
+ let mut vlist = ValueList::default();
+ vlist.extend(fixed.iter().cloned(), pool);
+ vlist.extend(self.0, pool);
+ vlist
+ }
+}
+
+// Coerce `VariableArgs` into a `&[Value]` slice.
+impl Deref for VariableArgs {
+ type Target = [Value];
+
+ fn deref(&self) -> &[Value] {
+ &self.0
+ }
+}
+
+impl DerefMut for VariableArgs {
+ fn deref_mut(&mut self) -> &mut [Value] {
+ &mut self.0
+ }
+}
+
+impl Display for VariableArgs {
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ for (i, val) in self.0.iter().enumerate() {
+ if i == 0 {
+ write!(fmt, "{}", val)?;
+ } else {
+ write!(fmt, ", {}", val)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl Default for VariableArgs {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+/// Analyzing an instruction.
+///
+/// Avoid large matches on instruction formats by using the methods defined here to examine
+/// instructions.
+impl InstructionData {
+ /// Return information about the destination of a branch or jump instruction.
+ ///
+ /// Any instruction that can transfer control to another block reveals its possible destinations
+ /// here.
+ pub fn analyze_branch<'a>(&'a self, pool: &'a ValueListPool) -> BranchInfo<'a> {
+ match *self {
+ Self::Jump {
+ destination,
+ ref args,
+ ..
+ } => BranchInfo::SingleDest(destination, args.as_slice(pool)),
+ Self::BranchInt {
+ destination,
+ ref args,
+ ..
+ }
+ | Self::BranchFloat {
+ destination,
+ ref args,
+ ..
+ }
+ | Self::Branch {
+ destination,
+ ref args,
+ ..
+ } => BranchInfo::SingleDest(destination, &args.as_slice(pool)[1..]),
+ Self::BranchIcmp {
+ destination,
+ ref args,
+ ..
+ } => BranchInfo::SingleDest(destination, &args.as_slice(pool)[2..]),
+ Self::BranchTable {
+ table, destination, ..
+ } => BranchInfo::Table(table, Some(destination)),
+ Self::IndirectJump { table, .. } => BranchInfo::Table(table, None),
+ _ => {
+ debug_assert!(!self.opcode().is_branch());
+ BranchInfo::NotABranch
+ }
+ }
+ }
+
+ /// Get the single destination of this branch instruction, if it is a single destination
+ /// branch or jump.
+ ///
+ /// Multi-destination branches like `br_table` return `None`.
+ pub fn branch_destination(&self) -> Option<Block> {
+ match *self {
+ Self::Jump { destination, .. }
+ | Self::Branch { destination, .. }
+ | Self::BranchInt { destination, .. }
+ | Self::BranchFloat { destination, .. }
+ | Self::BranchIcmp { destination, .. } => Some(destination),
+ Self::BranchTable { .. } | Self::IndirectJump { .. } => None,
+ _ => {
+ debug_assert!(!self.opcode().is_branch());
+ None
+ }
+ }
+ }
+
+ /// Get a mutable reference to the single destination of this branch instruction, if it is a
+ /// single destination branch or jump.
+ ///
+ /// Multi-destination branches like `br_table` return `None`.
+ pub fn branch_destination_mut(&mut self) -> Option<&mut Block> {
+ match *self {
+ Self::Jump {
+ ref mut destination,
+ ..
+ }
+ | Self::Branch {
+ ref mut destination,
+ ..
+ }
+ | Self::BranchInt {
+ ref mut destination,
+ ..
+ }
+ | Self::BranchFloat {
+ ref mut destination,
+ ..
+ }
+ | Self::BranchIcmp {
+ ref mut destination,
+ ..
+ } => Some(destination),
+ Self::BranchTable { .. } | Self::IndirectJump { .. } => None,
+ _ => {
+ debug_assert!(!self.opcode().is_branch());
+ None
+ }
+ }
+ }
+
+ /// Return the value of an immediate if the instruction has one or `None` otherwise. Only
+ /// immediate values are considered, not global values, constant handles, condition codes, etc.
+ pub fn imm_value(&self) -> Option<DataValue> {
+ match self {
+ &InstructionData::UnaryBool { imm, .. } => Some(DataValue::from(imm)),
+ // 8-bit.
+ &InstructionData::BinaryImm8 { imm, .. }
+ | &InstructionData::BranchTableEntry { imm, .. } => Some(DataValue::from(imm as i8)), // Note the switch from unsigned to signed.
+ // 32-bit
+ &InstructionData::UnaryIeee32 { imm, .. } => Some(DataValue::from(imm)),
+ &InstructionData::HeapAddr { imm, .. } => {
+ let imm: u32 = imm.into();
+ Some(DataValue::from(imm as i32)) // Note the switch from unsigned to signed.
+ }
+ &InstructionData::Load { offset, .. }
+ | &InstructionData::LoadComplex { offset, .. }
+ | &InstructionData::Store { offset, .. }
+ | &InstructionData::StoreComplex { offset, .. }
+ | &InstructionData::StackLoad { offset, .. }
+ | &InstructionData::StackStore { offset, .. }
+ | &InstructionData::TableAddr { offset, .. } => Some(DataValue::from(offset)),
+ // 64-bit.
+ &InstructionData::UnaryImm { imm, .. }
+ | &InstructionData::BinaryImm64 { imm, .. }
+ | &InstructionData::IntCompareImm { imm, .. } => Some(DataValue::from(imm.bits())),
+ &InstructionData::UnaryIeee64 { imm, .. } => Some(DataValue::from(imm)),
+ // 128-bit; though these immediates are present logically in the IR they are not
+ // included in the `InstructionData` for memory-size reasons. This case, returning
+ // `None`, is left here to alert users of this method that they should retrieve the
+ // value using the `DataFlowGraph`.
+ &InstructionData::Shuffle { mask: _, .. } => None,
+ _ => None,
+ }
+ }
+
+ /// If this is a trapping instruction, get its trap code. Otherwise, return
+ /// `None`.
+ pub fn trap_code(&self) -> Option<TrapCode> {
+ match *self {
+ Self::CondTrap { code, .. }
+ | Self::FloatCondTrap { code, .. }
+ | Self::IntCondTrap { code, .. }
+ | Self::Trap { code, .. } => Some(code),
+ _ => None,
+ }
+ }
+
+ /// If this is a control-flow instruction depending on an integer condition, gets its
+ /// condition. Otherwise, return `None`.
+ pub fn cond_code(&self) -> Option<IntCC> {
+ match self {
+ &InstructionData::IntCond { cond, .. }
+ | &InstructionData::BranchIcmp { cond, .. }
+ | &InstructionData::IntCompare { cond, .. }
+ | &InstructionData::IntCondTrap { cond, .. }
+ | &InstructionData::BranchInt { cond, .. }
+ | &InstructionData::IntSelect { cond, .. }
+ | &InstructionData::IntCompareImm { cond, .. } => Some(cond),
+ _ => None,
+ }
+ }
+
+ /// If this is a control-flow instruction depending on a floating-point condition, gets its
+ /// condition. Otherwise, return `None`.
+ pub fn fp_cond_code(&self) -> Option<FloatCC> {
+ match self {
+ &InstructionData::BranchFloat { cond, .. }
+ | &InstructionData::FloatCompare { cond, .. }
+ | &InstructionData::FloatCond { cond, .. }
+ | &InstructionData::FloatCondTrap { cond, .. } => Some(cond),
+ _ => None,
+ }
+ }
+
+ /// If this is a trapping instruction, get an exclusive reference to its
+ /// trap code. Otherwise, return `None`.
+ pub fn trap_code_mut(&mut self) -> Option<&mut TrapCode> {
+ match self {
+ Self::CondTrap { code, .. }
+ | Self::FloatCondTrap { code, .. }
+ | Self::IntCondTrap { code, .. }
+ | Self::Trap { code, .. } => Some(code),
+ _ => None,
+ }
+ }
+
+ /// If this is an atomic read/modify/write instruction, return its subopcode.
+ pub fn atomic_rmw_op(&self) -> Option<ir::AtomicRmwOp> {
+ match self {
+ &InstructionData::AtomicRmw { op, .. } => Some(op),
+ _ => None,
+ }
+ }
+
+ /// If this is a load/store instruction, returns its immediate offset.
+ pub fn load_store_offset(&self) -> Option<i32> {
+ match self {
+ &InstructionData::Load { offset, .. }
+ | &InstructionData::StackLoad { offset, .. }
+ | &InstructionData::LoadComplex { offset, .. }
+ | &InstructionData::Store { offset, .. }
+ | &InstructionData::StackStore { offset, .. }
+ | &InstructionData::StoreComplex { offset, .. } => Some(offset.into()),
+ _ => None,
+ }
+ }
+
+ /// Return information about a call instruction.
+ ///
+ /// Any instruction that can call another function reveals its call signature here.
+ pub fn analyze_call<'a>(&'a self, pool: &'a ValueListPool) -> CallInfo<'a> {
+ match *self {
+ Self::Call {
+ func_ref, ref args, ..
+ } => CallInfo::Direct(func_ref, args.as_slice(pool)),
+ Self::CallIndirect {
+ sig_ref, ref args, ..
+ } => CallInfo::Indirect(sig_ref, &args.as_slice(pool)[1..]),
+ _ => {
+ debug_assert!(!self.opcode().is_call());
+ CallInfo::NotACall
+ }
+ }
+ }
+
+ #[inline]
+ pub(crate) fn sign_extend_immediates(&mut self, ctrl_typevar: Type) {
+ if ctrl_typevar.is_invalid() {
+ return;
+ }
+
+ let bit_width = ctrl_typevar.bits();
+
+ match self {
+ Self::BinaryImm64 {
+ opcode,
+ arg: _,
+ imm,
+ } => {
+ if *opcode == Opcode::SdivImm || *opcode == Opcode::SremImm {
+ imm.sign_extend_from_width(bit_width);
+ }
+ }
+ Self::IntCompareImm {
+ opcode,
+ arg: _,
+ cond,
+ imm,
+ } => {
+ debug_assert_eq!(*opcode, Opcode::IcmpImm);
+ if cond.unsigned() != *cond {
+ imm.sign_extend_from_width(bit_width);
+ }
+ }
+ _ => {}
+ }
+ }
+}
+
+/// Information about branch and jump instructions.
+pub enum BranchInfo<'a> {
+ /// This is not a branch or jump instruction.
+ /// This instruction will not transfer control to another block in the function, but it may still
+ /// affect control flow by returning or trapping.
+ NotABranch,
+
+ /// This is a branch or jump to a single destination block, possibly taking value arguments.
+ SingleDest(Block, &'a [Value]),
+
+ /// This is a jump table branch which can have many destination blocks and maybe one default block.
+ Table(JumpTable, Option<Block>),
+}
+
+/// Information about call instructions.
+pub enum CallInfo<'a> {
+ /// This is not a call instruction.
+ NotACall,
+
+ /// This is a direct call to an external function declared in the preamble. See
+ /// `DataFlowGraph.ext_funcs`.
+ Direct(FuncRef, &'a [Value]),
+
+ /// This is an indirect call with the specified signature. See `DataFlowGraph.signatures`.
+ Indirect(SigRef, &'a [Value]),
+}
+
+/// Value type constraints for a given opcode.
+///
+/// The `InstructionFormat` determines the constraints on most operands, but `Value` operands and
+/// results are not determined by the format. Every `Opcode` has an associated
+/// `OpcodeConstraints` object that provides the missing details.
+#[derive(Clone, Copy)]
+pub struct OpcodeConstraints {
+ /// Flags for this opcode encoded as a bit field:
+ ///
+ /// Bits 0-2:
+ /// Number of fixed result values. This does not include `variable_args` results as are
+ /// produced by call instructions.
+ ///
+ /// Bit 3:
+ /// This opcode is polymorphic and the controlling type variable can be inferred from the
+ /// designated input operand. This is the `typevar_operand` index given to the
+ /// `InstructionFormat` meta language object. When this bit is not set, the controlling
+ /// type variable must be the first output value instead.
+ ///
+ /// Bit 4:
+ /// This opcode is polymorphic and the controlling type variable does *not* appear as the
+ /// first result type.
+ ///
+ /// Bits 5-7:
+ /// Number of fixed value arguments. The minimum required number of value operands.
+ flags: u8,
+
+ /// Permitted set of types for the controlling type variable as an index into `TYPE_SETS`.
+ typeset_offset: u8,
+
+ /// Offset into `OPERAND_CONSTRAINT` table of the descriptors for this opcode. The first
+ /// `num_fixed_results()` entries describe the result constraints, then follows constraints for
+ /// the fixed `Value` input operands. (`num_fixed_value_arguments()` of them).
+ constraint_offset: u16,
+}
+
+impl OpcodeConstraints {
+ /// Can the controlling type variable for this opcode be inferred from the designated value
+ /// input operand?
+ /// This also implies that this opcode is polymorphic.
+ pub fn use_typevar_operand(self) -> bool {
+ (self.flags & 0x8) != 0
+ }
+
+ /// Is it necessary to look at the designated value input operand in order to determine the
+ /// controlling type variable, or is it good enough to use the first return type?
+ ///
+ /// Most polymorphic instructions produce a single result with the type of the controlling type
+ /// variable. A few polymorphic instructions either don't produce any results, or produce
+ /// results with a fixed type. These instructions return `true`.
+ pub fn requires_typevar_operand(self) -> bool {
+ (self.flags & 0x10) != 0
+ }
+
+ /// Get the number of *fixed* result values produced by this opcode.
+ /// This does not include `variable_args` produced by calls.
+ pub fn num_fixed_results(self) -> usize {
+ (self.flags & 0x7) as usize
+ }
+
+ /// Get the number of *fixed* input values required by this opcode.
+ ///
+ /// This does not include `variable_args` arguments on call and branch instructions.
+ ///
+ /// The number of fixed input values is usually implied by the instruction format, but
+ /// instruction formats that use a `ValueList` put both fixed and variable arguments in the
+ /// list. This method returns the *minimum* number of values required in the value list.
+ pub fn num_fixed_value_arguments(self) -> usize {
+ ((self.flags >> 5) & 0x7) as usize
+ }
+
+ /// Get the offset into `TYPE_SETS` for the controlling type variable.
+ /// Returns `None` if the instruction is not polymorphic.
+ fn typeset_offset(self) -> Option<usize> {
+ let offset = usize::from(self.typeset_offset);
+ if offset < TYPE_SETS.len() {
+ Some(offset)
+ } else {
+ None
+ }
+ }
+
+ /// Get the offset into OPERAND_CONSTRAINTS where the descriptors for this opcode begin.
+ fn constraint_offset(self) -> usize {
+ self.constraint_offset as usize
+ }
+
+ /// Get the value type of result number `n`, having resolved the controlling type variable to
+ /// `ctrl_type`.
+ pub fn result_type(self, n: usize, ctrl_type: Type) -> Type {
+ debug_assert!(n < self.num_fixed_results(), "Invalid result index");
+ if let ResolvedConstraint::Bound(t) =
+ OPERAND_CONSTRAINTS[self.constraint_offset() + n].resolve(ctrl_type)
+ {
+ t
+ } else {
+ panic!("Result constraints can't be free");
+ }
+ }
+
+ /// Get the value type of input value number `n`, having resolved the controlling type variable
+ /// to `ctrl_type`.
+ ///
+ /// Unlike results, it is possible for some input values to vary freely within a specific
+ /// `ValueTypeSet`. This is represented with the `ArgumentConstraint::Free` variant.
+ pub fn value_argument_constraint(self, n: usize, ctrl_type: Type) -> ResolvedConstraint {
+ debug_assert!(
+ n < self.num_fixed_value_arguments(),
+ "Invalid value argument index"
+ );
+ let offset = self.constraint_offset() + self.num_fixed_results();
+ OPERAND_CONSTRAINTS[offset + n].resolve(ctrl_type)
+ }
+
+ /// Get the typeset of allowed types for the controlling type variable in a polymorphic
+ /// instruction.
+ pub fn ctrl_typeset(self) -> Option<ValueTypeSet> {
+ self.typeset_offset().map(|offset| TYPE_SETS[offset])
+ }
+
+ /// Is this instruction polymorphic?
+ pub fn is_polymorphic(self) -> bool {
+ self.ctrl_typeset().is_some()
+ }
+}
+
+type BitSet8 = BitSet<u8>;
+type BitSet16 = BitSet<u16>;
+
+/// A value type set describes the permitted set of types for a type variable.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct ValueTypeSet {
+ /// Allowed lane sizes
+ pub lanes: BitSet16,
+ /// Allowed int widths
+ pub ints: BitSet8,
+ /// Allowed float widths
+ pub floats: BitSet8,
+ /// Allowed bool widths
+ pub bools: BitSet8,
+ /// Allowed ref widths
+ pub refs: BitSet8,
+}
+
+impl ValueTypeSet {
+ /// Is `scalar` part of the base type set?
+ ///
+ /// Note that the base type set does not have to be included in the type set proper.
+ fn is_base_type(self, scalar: Type) -> bool {
+ let l2b = scalar.log2_lane_bits();
+ if scalar.is_int() {
+ self.ints.contains(l2b)
+ } else if scalar.is_float() {
+ self.floats.contains(l2b)
+ } else if scalar.is_bool() {
+ self.bools.contains(l2b)
+ } else if scalar.is_ref() {
+ self.refs.contains(l2b)
+ } else {
+ false
+ }
+ }
+
+ /// Does `typ` belong to this set?
+ pub fn contains(self, typ: Type) -> bool {
+ let l2l = typ.log2_lane_count();
+ self.lanes.contains(l2l) && self.is_base_type(typ.lane_type())
+ }
+
+ /// Get an example member of this type set.
+ ///
+ /// This is used for error messages to avoid suggesting invalid types.
+ pub fn example(self) -> Type {
+ let t = if self.ints.max().unwrap_or(0) > 5 {
+ types::I32
+ } else if self.floats.max().unwrap_or(0) > 5 {
+ types::F32
+ } else if self.bools.max().unwrap_or(0) > 5 {
+ types::B32
+ } else {
+ types::B1
+ };
+ t.by(1 << self.lanes.min().unwrap()).unwrap()
+ }
+}
+
+/// Operand constraints. This describes the value type constraints on a single `Value` operand.
+enum OperandConstraint {
+ /// This operand has a concrete value type.
+ Concrete(Type),
+
+ /// This operand can vary freely within the given type set.
+ /// The type set is identified by its index into the TYPE_SETS constant table.
+ Free(u8),
+
+ /// This operand is the same type as the controlling type variable.
+ Same,
+
+ /// This operand is `ctrlType.lane_of()`.
+ LaneOf,
+
+ /// This operand is `ctrlType.as_bool()`.
+ AsBool,
+
+ /// This operand is `ctrlType.half_width()`.
+ HalfWidth,
+
+ /// This operand is `ctrlType.double_width()`.
+ DoubleWidth,
+
+ /// This operand is `ctrlType.half_vector()`.
+ HalfVector,
+
+ /// This operand is `ctrlType.double_vector()`.
+ DoubleVector,
+
+ /// This operand is `ctrlType.split_lanes()`.
+ SplitLanes,
+
+ /// This operand is `ctrlType.merge_lanes()`.
+ MergeLanes,
+}
+
+impl OperandConstraint {
+ /// Resolve this operand constraint into a concrete value type, given the value of the
+ /// controlling type variable.
+ pub fn resolve(&self, ctrl_type: Type) -> ResolvedConstraint {
+ use self::OperandConstraint::*;
+ use self::ResolvedConstraint::Bound;
+ match *self {
+ Concrete(t) => Bound(t),
+ Free(vts) => ResolvedConstraint::Free(TYPE_SETS[vts as usize]),
+ Same => Bound(ctrl_type),
+ LaneOf => Bound(ctrl_type.lane_of()),
+ AsBool => Bound(ctrl_type.as_bool()),
+ HalfWidth => Bound(ctrl_type.half_width().expect("invalid type for half_width")),
+ DoubleWidth => Bound(
+ ctrl_type
+ .double_width()
+ .expect("invalid type for double_width"),
+ ),
+ HalfVector => Bound(
+ ctrl_type
+ .half_vector()
+ .expect("invalid type for half_vector"),
+ ),
+ DoubleVector => Bound(ctrl_type.by(2).expect("invalid type for double_vector")),
+ SplitLanes => Bound(
+ ctrl_type
+ .split_lanes()
+ .expect("invalid type for split_lanes"),
+ ),
+ MergeLanes => Bound(
+ ctrl_type
+ .merge_lanes()
+ .expect("invalid type for merge_lanes"),
+ ),
+ }
+ }
+}
+
+/// The type constraint on a value argument once the controlling type variable is known.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum ResolvedConstraint {
+ /// The operand is bound to a known type.
+ Bound(Type),
+ /// The operand type can vary freely within the given set.
+ Free(ValueTypeSet),
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloc::string::ToString;
+
+ #[test]
+ fn opcodes() {
+ use core::mem;
+
+ let x = Opcode::Iadd;
+ let mut y = Opcode::Isub;
+
+ assert!(x != y);
+ y = Opcode::Iadd;
+ assert_eq!(x, y);
+ assert_eq!(x.format(), InstructionFormat::Binary);
+
+ assert_eq!(format!("{:?}", Opcode::IaddImm), "IaddImm");
+ assert_eq!(Opcode::IaddImm.to_string(), "iadd_imm");
+
+ // Check the matcher.
+ assert_eq!("iadd".parse::<Opcode>(), Ok(Opcode::Iadd));
+ assert_eq!("iadd_imm".parse::<Opcode>(), Ok(Opcode::IaddImm));
+ assert_eq!("iadd\0".parse::<Opcode>(), Err("Unknown opcode"));
+ assert_eq!("".parse::<Opcode>(), Err("Unknown opcode"));
+ assert_eq!("\0".parse::<Opcode>(), Err("Unknown opcode"));
+
+ // Opcode is a single byte, and because Option<Opcode> originally came to 2 bytes, early on
+ // Opcode included a variant NotAnOpcode to avoid the unnecessary bloat. Since then the Rust
+ // compiler has brought in NonZero optimization, meaning that an enum not using the 0 value
+ // can be optional for no size cost. We want to ensure Option<Opcode> remains small.
+ assert_eq!(mem::size_of::<Opcode>(), mem::size_of::<Option<Opcode>>());
+ }
+
+ #[test]
+ fn instruction_data() {
+ use core::mem;
+ // The size of the `InstructionData` enum is important for performance. It should not
+ // exceed 16 bytes. Use `Box<FooData>` out-of-line payloads for instruction formats that
+ // require more space than that. It would be fine with a data structure smaller than 16
+ // bytes, but what are the odds of that?
+ assert_eq!(mem::size_of::<InstructionData>(), 16);
+ }
+
+ #[test]
+ fn constraints() {
+ let a = Opcode::Iadd.constraints();
+ assert!(a.use_typevar_operand());
+ assert!(!a.requires_typevar_operand());
+ assert_eq!(a.num_fixed_results(), 1);
+ assert_eq!(a.num_fixed_value_arguments(), 2);
+ assert_eq!(a.result_type(0, types::I32), types::I32);
+ assert_eq!(a.result_type(0, types::I8), types::I8);
+ assert_eq!(
+ a.value_argument_constraint(0, types::I32),
+ ResolvedConstraint::Bound(types::I32)
+ );
+ assert_eq!(
+ a.value_argument_constraint(1, types::I32),
+ ResolvedConstraint::Bound(types::I32)
+ );
+
+ let b = Opcode::Bitcast.constraints();
+ assert!(!b.use_typevar_operand());
+ assert!(!b.requires_typevar_operand());
+ assert_eq!(b.num_fixed_results(), 1);
+ assert_eq!(b.num_fixed_value_arguments(), 1);
+ assert_eq!(b.result_type(0, types::I32), types::I32);
+ assert_eq!(b.result_type(0, types::I8), types::I8);
+ match b.value_argument_constraint(0, types::I32) {
+ ResolvedConstraint::Free(vts) => assert!(vts.contains(types::F32)),
+ _ => panic!("Unexpected constraint from value_argument_constraint"),
+ }
+
+ let c = Opcode::Call.constraints();
+ assert_eq!(c.num_fixed_results(), 0);
+ assert_eq!(c.num_fixed_value_arguments(), 0);
+
+ let i = Opcode::CallIndirect.constraints();
+ assert_eq!(i.num_fixed_results(), 0);
+ assert_eq!(i.num_fixed_value_arguments(), 1);
+
+ let cmp = Opcode::Icmp.constraints();
+ assert!(cmp.use_typevar_operand());
+ assert!(cmp.requires_typevar_operand());
+ assert_eq!(cmp.num_fixed_results(), 1);
+ assert_eq!(cmp.num_fixed_value_arguments(), 2);
+ }
+
+ #[test]
+ fn value_set() {
+ use crate::ir::types::*;
+
+ let vts = ValueTypeSet {
+ lanes: BitSet16::from_range(0, 8),
+ ints: BitSet8::from_range(4, 7),
+ floats: BitSet8::from_range(0, 0),
+ bools: BitSet8::from_range(3, 7),
+ refs: BitSet8::from_range(5, 7),
+ };
+ assert!(!vts.contains(I8));
+ assert!(vts.contains(I32));
+ assert!(vts.contains(I64));
+ assert!(vts.contains(I32X4));
+ assert!(!vts.contains(F32));
+ assert!(!vts.contains(B1));
+ assert!(vts.contains(B8));
+ assert!(vts.contains(B64));
+ assert!(vts.contains(R32));
+ assert!(vts.contains(R64));
+ assert_eq!(vts.example().to_string(), "i32");
+
+ let vts = ValueTypeSet {
+ lanes: BitSet16::from_range(0, 8),
+ ints: BitSet8::from_range(0, 0),
+ floats: BitSet8::from_range(5, 7),
+ bools: BitSet8::from_range(3, 7),
+ refs: BitSet8::from_range(0, 0),
+ };
+ assert_eq!(vts.example().to_string(), "f32");
+
+ let vts = ValueTypeSet {
+ lanes: BitSet16::from_range(1, 8),
+ ints: BitSet8::from_range(0, 0),
+ floats: BitSet8::from_range(5, 7),
+ bools: BitSet8::from_range(3, 7),
+ refs: BitSet8::from_range(0, 0),
+ };
+ assert_eq!(vts.example().to_string(), "f32x2");
+
+ let vts = ValueTypeSet {
+ lanes: BitSet16::from_range(2, 8),
+ ints: BitSet8::from_range(0, 0),
+ floats: BitSet8::from_range(0, 0),
+ bools: BitSet8::from_range(3, 7),
+ refs: BitSet8::from_range(0, 0),
+ };
+ assert!(!vts.contains(B32X2));
+ assert!(vts.contains(B32X4));
+ assert_eq!(vts.example().to_string(), "b32x4");
+
+ let vts = ValueTypeSet {
+ // TypeSet(lanes=(1, 256), ints=(8, 64))
+ lanes: BitSet16::from_range(0, 9),
+ ints: BitSet8::from_range(3, 7),
+ floats: BitSet8::from_range(0, 0),
+ bools: BitSet8::from_range(0, 0),
+ refs: BitSet8::from_range(0, 0),
+ };
+ assert!(vts.contains(I32));
+ assert!(vts.contains(I32X4));
+ assert!(!vts.contains(R32));
+ assert!(!vts.contains(R64));
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/jumptable.rs b/third_party/rust/cranelift-codegen/src/ir/jumptable.rs
new file mode 100644
index 0000000000..a0596728a3
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/jumptable.rs
@@ -0,0 +1,119 @@
+//! Jump table representation.
+//!
+//! Jump tables are declared in the preamble and assigned an `ir::entities::JumpTable` reference.
+//! The actual table of destinations is stored in a `JumpTableData` struct defined in this module.
+
+use crate::ir::entities::Block;
+use alloc::vec::Vec;
+use core::fmt::{self, Display, Formatter};
+use core::slice::{Iter, IterMut};
+
+/// Contents of a jump table.
+///
+/// All jump tables use 0-based indexing and are densely populated.
+#[derive(Clone)]
+pub struct JumpTableData {
+ // Table entries.
+ table: Vec<Block>,
+}
+
+impl JumpTableData {
+ /// Create a new empty jump table.
+ pub fn new() -> Self {
+ Self { table: Vec::new() }
+ }
+
+ /// Create a new empty jump table with the specified capacity.
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self {
+ table: Vec::with_capacity(capacity),
+ }
+ }
+
+ /// Get the number of table entries.
+ pub fn len(&self) -> usize {
+ self.table.len()
+ }
+
+ /// Append a table entry.
+ pub fn push_entry(&mut self, dest: Block) {
+ self.table.push(dest)
+ }
+
+ /// Checks if any of the entries branch to `block`.
+ pub fn branches_to(&self, block: Block) -> bool {
+ self.table.iter().any(|target_block| *target_block == block)
+ }
+
+ /// Access the whole table as a slice.
+ pub fn as_slice(&self) -> &[Block] {
+ self.table.as_slice()
+ }
+
+ /// Access the whole table as a mutable slice.
+ pub fn as_mut_slice(&mut self) -> &mut [Block] {
+ self.table.as_mut_slice()
+ }
+
+ /// Returns an iterator over the table.
+ pub fn iter(&self) -> Iter<Block> {
+ self.table.iter()
+ }
+
+ /// Returns an iterator that allows modifying each value.
+ pub fn iter_mut(&mut self) -> IterMut<Block> {
+ self.table.iter_mut()
+ }
+}
+
+impl Display for JumpTableData {
+ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
+ write!(fmt, "jump_table [")?;
+ match self.table.first() {
+ None => (),
+ Some(first) => write!(fmt, "{}", first)?,
+ }
+ for block in self.table.iter().skip(1) {
+ write!(fmt, ", {}", block)?;
+ }
+ write!(fmt, "]")
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::JumpTableData;
+ use crate::entity::EntityRef;
+ use crate::ir::Block;
+ use alloc::string::ToString;
+
+ #[test]
+ fn empty() {
+ let jt = JumpTableData::new();
+
+ assert_eq!(jt.as_slice().get(0), None);
+ assert_eq!(jt.as_slice().get(10), None);
+
+ assert_eq!(jt.to_string(), "jump_table []");
+
+ let v = jt.as_slice();
+ assert_eq!(v, []);
+ }
+
+ #[test]
+ fn insert() {
+ let e1 = Block::new(1);
+ let e2 = Block::new(2);
+
+ let mut jt = JumpTableData::new();
+
+ jt.push_entry(e1);
+ jt.push_entry(e2);
+ jt.push_entry(e1);
+
+ assert_eq!(jt.to_string(), "jump_table [block1, block2, block1]");
+
+ let v = jt.as_slice();
+ assert_eq!(v, [e1, e2, e1]);
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/layout.rs b/third_party/rust/cranelift-codegen/src/ir/layout.rs
new file mode 100644
index 0000000000..62516325dd
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/layout.rs
@@ -0,0 +1,1220 @@
+//! Function layout.
+//!
+//! The order of basic blocks in a function and the order of instructions in a block is
+//! determined by the `Layout` data structure defined in this module.
+
+use crate::entity::SecondaryMap;
+use crate::ir::dfg::DataFlowGraph;
+use crate::ir::progpoint::{ExpandedProgramPoint, ProgramOrder};
+use crate::ir::{Block, Inst};
+use crate::packed_option::PackedOption;
+use crate::timing;
+use core::cmp;
+use core::iter::{IntoIterator, Iterator};
+use log::debug;
+
+/// The `Layout` struct determines the layout of blocks and instructions in a function. It does not
+/// contain definitions of instructions or blocks, but depends on `Inst` and `Block` entity references
+/// being defined elsewhere.
+///
+/// This data structure determines:
+///
+/// - The order of blocks in the function.
+/// - Which block contains a given instruction.
+/// - The order of instructions with a block.
+///
+/// While data dependencies are not recorded, instruction ordering does affect control
+/// dependencies, so part of the semantics of the program are determined by the layout.
+///
+#[derive(Clone)]
+pub struct Layout {
+ /// Linked list nodes for the layout order of blocks Forms a doubly linked list, terminated in
+ /// both ends by `None`.
+ blocks: SecondaryMap<Block, BlockNode>,
+
+ /// Linked list nodes for the layout order of instructions. Forms a double linked list per block,
+ /// terminated in both ends by `None`.
+ insts: SecondaryMap<Inst, InstNode>,
+
+ /// First block in the layout order, or `None` when no blocks have been laid out.
+ first_block: Option<Block>,
+
+ /// Last block in the layout order, or `None` when no blocks have been laid out.
+ last_block: Option<Block>,
+}
+
+impl Layout {
+ /// Create a new empty `Layout`.
+ pub fn new() -> Self {
+ Self {
+ blocks: SecondaryMap::new(),
+ insts: SecondaryMap::new(),
+ first_block: None,
+ last_block: None,
+ }
+ }
+
+ /// Clear the layout.
+ pub fn clear(&mut self) {
+ self.blocks.clear();
+ self.insts.clear();
+ self.first_block = None;
+ self.last_block = None;
+ }
+
+ /// Returns the capacity of the `BlockData` map.
+ pub fn block_capacity(&self) -> usize {
+ self.blocks.capacity()
+ }
+}
+
+/// Sequence numbers.
+///
+/// All instructions and blocks are given a sequence number that can be used to quickly determine
+/// their relative position in the layout. The sequence numbers are not contiguous, but are assigned
+/// like line numbers in BASIC: 10, 20, 30, ...
+///
+/// The block sequence numbers are strictly increasing, and so are the instruction sequence numbers
+/// within a block. The instruction sequence numbers are all between the sequence number of their
+/// containing block and the following block.
+///
+/// The result is that sequence numbers work like BASIC line numbers for the textual form of the IR.
+type SequenceNumber = u32;
+
+/// Initial stride assigned to new sequence numbers.
+const MAJOR_STRIDE: SequenceNumber = 10;
+
+/// Secondary stride used when renumbering locally.
+const MINOR_STRIDE: SequenceNumber = 2;
+
+/// Limit on the sequence number range we'll renumber locally. If this limit is exceeded, we'll
+/// switch to a full function renumbering.
+const LOCAL_LIMIT: SequenceNumber = 100 * MINOR_STRIDE;
+
+/// Compute the midpoint between `a` and `b`.
+/// Return `None` if the midpoint would be equal to either.
+fn midpoint(a: SequenceNumber, b: SequenceNumber) -> Option<SequenceNumber> {
+ debug_assert!(a < b);
+ // Avoid integer overflow.
+ let m = a + (b - a) / 2;
+ if m > a {
+ Some(m)
+ } else {
+ None
+ }
+}
+
+#[test]
+fn test_midpoint() {
+ assert_eq!(midpoint(0, 1), None);
+ assert_eq!(midpoint(0, 2), Some(1));
+ assert_eq!(midpoint(0, 3), Some(1));
+ assert_eq!(midpoint(0, 4), Some(2));
+ assert_eq!(midpoint(1, 4), Some(2));
+ assert_eq!(midpoint(2, 4), Some(3));
+ assert_eq!(midpoint(3, 4), None);
+ assert_eq!(midpoint(3, 4), None);
+}
+
+impl ProgramOrder for Layout {
+ fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
+ where
+ A: Into<ExpandedProgramPoint>,
+ B: Into<ExpandedProgramPoint>,
+ {
+ let a_seq = self.seq(a);
+ let b_seq = self.seq(b);
+ a_seq.cmp(&b_seq)
+ }
+
+ fn is_block_gap(&self, inst: Inst, block: Block) -> bool {
+ let i = &self.insts[inst];
+ let e = &self.blocks[block];
+
+ i.next.is_none() && i.block == e.prev
+ }
+}
+
+// Private methods for dealing with sequence numbers.
+impl Layout {
+ /// Get the sequence number of a program point that must correspond to an entity in the layout.
+ fn seq<PP: Into<ExpandedProgramPoint>>(&self, pp: PP) -> SequenceNumber {
+ // When `PP = Inst` or `PP = Block`, we expect this dynamic type check to be optimized out.
+ match pp.into() {
+ ExpandedProgramPoint::Block(block) => self.blocks[block].seq,
+ ExpandedProgramPoint::Inst(inst) => self.insts[inst].seq,
+ }
+ }
+
+ /// Get the last sequence number in `block`.
+ fn last_block_seq(&self, block: Block) -> SequenceNumber {
+ // Get the seq of the last instruction if it exists, otherwise use the block header seq.
+ self.blocks[block]
+ .last_inst
+ .map(|inst| self.insts[inst].seq)
+ .unwrap_or(self.blocks[block].seq)
+ }
+
+ /// Assign a valid sequence number to `block` such that the numbers are still monotonic. This may
+ /// require renumbering.
+ fn assign_block_seq(&mut self, block: Block) {
+ debug_assert!(self.is_block_inserted(block));
+
+ // Get the sequence number immediately before `block`, or 0.
+ let prev_seq = self.blocks[block]
+ .prev
+ .map(|prev_block| self.last_block_seq(prev_block))
+ .unwrap_or(0);
+
+ // Get the sequence number immediately following `block`.
+ let next_seq = if let Some(inst) = self.blocks[block].first_inst.expand() {
+ self.insts[inst].seq
+ } else if let Some(next_block) = self.blocks[block].next.expand() {
+ self.blocks[next_block].seq
+ } else {
+ // There is nothing after `block`. We can just use a major stride.
+ self.blocks[block].seq = prev_seq + MAJOR_STRIDE;
+ return;
+ };
+
+ // Check if there is room between these sequence numbers.
+ if let Some(seq) = midpoint(prev_seq, next_seq) {
+ self.blocks[block].seq = seq;
+ } else {
+ // No available integers between `prev_seq` and `next_seq`. We have to renumber.
+ self.renumber_from_block(block, prev_seq + MINOR_STRIDE, prev_seq + LOCAL_LIMIT);
+ }
+ }
+
+ /// Assign a valid sequence number to `inst` such that the numbers are still monotonic. This may
+ /// require renumbering.
+ fn assign_inst_seq(&mut self, inst: Inst) {
+ let block = self
+ .inst_block(inst)
+ .expect("inst must be inserted before assigning an seq");
+
+ // Get the sequence number immediately before `inst`.
+ let prev_seq = match self.insts[inst].prev.expand() {
+ Some(prev_inst) => self.insts[prev_inst].seq,
+ None => self.blocks[block].seq,
+ };
+
+ // Get the sequence number immediately following `inst`.
+ let next_seq = if let Some(next_inst) = self.insts[inst].next.expand() {
+ self.insts[next_inst].seq
+ } else if let Some(next_block) = self.blocks[block].next.expand() {
+ self.blocks[next_block].seq
+ } else {
+ // There is nothing after `inst`. We can just use a major stride.
+ self.insts[inst].seq = prev_seq + MAJOR_STRIDE;
+ return;
+ };
+
+ // Check if there is room between these sequence numbers.
+ if let Some(seq) = midpoint(prev_seq, next_seq) {
+ self.insts[inst].seq = seq;
+ } else {
+ // No available integers between `prev_seq` and `next_seq`. We have to renumber.
+ self.renumber_from_inst(inst, prev_seq + MINOR_STRIDE, prev_seq + LOCAL_LIMIT);
+ }
+ }
+
+ /// Renumber instructions starting from `inst` until the end of the block or until numbers catch
+ /// up.
+ ///
+ /// Return `None` if renumbering has caught up and the sequence is monotonic again. Otherwise
+ /// return the last used sequence number.
+ ///
+ /// If sequence numbers exceed `limit`, switch to a full function renumbering and return `None`.
+ fn renumber_insts(
+ &mut self,
+ inst: Inst,
+ seq: SequenceNumber,
+ limit: SequenceNumber,
+ ) -> Option<SequenceNumber> {
+ let mut inst = inst;
+ let mut seq = seq;
+
+ loop {
+ self.insts[inst].seq = seq;
+
+ // Next instruction.
+ inst = match self.insts[inst].next.expand() {
+ None => return Some(seq),
+ Some(next) => next,
+ };
+
+ if seq < self.insts[inst].seq {
+ // Sequence caught up.
+ return None;
+ }
+
+ if seq > limit {
+ // We're pushing too many instructions in front of us.
+ // Switch to a full function renumbering to make some space.
+ self.full_renumber();
+ return None;
+ }
+
+ seq += MINOR_STRIDE;
+ }
+ }
+
+ /// Renumber starting from `block` to `seq` and continuing until the sequence numbers are
+ /// monotonic again.
+ fn renumber_from_block(
+ &mut self,
+ block: Block,
+ first_seq: SequenceNumber,
+ limit: SequenceNumber,
+ ) {
+ let mut block = block;
+ let mut seq = first_seq;
+
+ loop {
+ self.blocks[block].seq = seq;
+
+ // Renumber instructions in `block`. Stop when the numbers catch up.
+ if let Some(inst) = self.blocks[block].first_inst.expand() {
+ seq = match self.renumber_insts(inst, seq + MINOR_STRIDE, limit) {
+ Some(s) => s,
+ None => return,
+ }
+ }
+
+ // Advance to the next block.
+ block = match self.blocks[block].next.expand() {
+ Some(next) => next,
+ None => return,
+ };
+
+ // Stop renumbering once the numbers catch up.
+ if seq < self.blocks[block].seq {
+ return;
+ }
+
+ seq += MINOR_STRIDE;
+ }
+ }
+
+ /// Renumber starting from `inst` to `seq` and continuing until the sequence numbers are
+ /// monotonic again.
+ fn renumber_from_inst(&mut self, inst: Inst, first_seq: SequenceNumber, limit: SequenceNumber) {
+ if let Some(seq) = self.renumber_insts(inst, first_seq, limit) {
+ // Renumbering spills over into next block.
+ if let Some(next_block) = self.blocks[self.inst_block(inst).unwrap()].next.expand() {
+ self.renumber_from_block(next_block, seq + MINOR_STRIDE, limit);
+ }
+ }
+ }
+
+ /// Renumber all blocks and instructions in the layout.
+ ///
+ /// This doesn't affect the position of anything, but it gives more room in the internal
+ /// sequence numbers for inserting instructions later.
+ fn full_renumber(&mut self) {
+ let _tt = timing::layout_renumber();
+ let mut seq = 0;
+ let mut next_block = self.first_block;
+ while let Some(block) = next_block {
+ self.blocks[block].seq = seq;
+ seq += MAJOR_STRIDE;
+ next_block = self.blocks[block].next.expand();
+
+ let mut next_inst = self.blocks[block].first_inst.expand();
+ while let Some(inst) = next_inst {
+ self.insts[inst].seq = seq;
+ seq += MAJOR_STRIDE;
+ next_inst = self.insts[inst].next.expand();
+ }
+ }
+ debug!("Renumbered {} program points", seq / MAJOR_STRIDE);
+ }
+}
+
+/// Methods for laying out blocks.
+///
+/// An unknown block starts out as *not inserted* in the block layout. The layout is a linear order of
+/// inserted blocks. Once a block has been inserted in the layout, instructions can be added. A block
+/// can only be removed from the layout when it is empty.
+///
+/// Since every block must end with a terminator instruction which cannot fall through, the layout of
+/// blocks do not affect the semantics of the program.
+///
+impl Layout {
+ /// Is `block` currently part of the layout?
+ pub fn is_block_inserted(&self, block: Block) -> bool {
+ Some(block) == self.first_block || self.blocks[block].prev.is_some()
+ }
+
+ /// Insert `block` as the last block in the layout.
+ pub fn append_block(&mut self, block: Block) {
+ debug_assert!(
+ !self.is_block_inserted(block),
+ "Cannot append block that is already in the layout"
+ );
+ {
+ let node = &mut self.blocks[block];
+ debug_assert!(node.first_inst.is_none() && node.last_inst.is_none());
+ node.prev = self.last_block.into();
+ node.next = None.into();
+ }
+ if let Some(last) = self.last_block {
+ self.blocks[last].next = block.into();
+ } else {
+ self.first_block = Some(block);
+ }
+ self.last_block = Some(block);
+ self.assign_block_seq(block);
+ }
+
+ /// Insert `block` in the layout before the existing block `before`.
+ pub fn insert_block(&mut self, block: Block, before: Block) {
+ debug_assert!(
+ !self.is_block_inserted(block),
+ "Cannot insert block that is already in the layout"
+ );
+ debug_assert!(
+ self.is_block_inserted(before),
+ "block Insertion point not in the layout"
+ );
+ let after = self.blocks[before].prev;
+ {
+ let node = &mut self.blocks[block];
+ node.next = before.into();
+ node.prev = after;
+ }
+ self.blocks[before].prev = block.into();
+ match after.expand() {
+ None => self.first_block = Some(block),
+ Some(a) => self.blocks[a].next = block.into(),
+ }
+ self.assign_block_seq(block);
+ }
+
+ /// Insert `block` in the layout *after* the existing block `after`.
+ pub fn insert_block_after(&mut self, block: Block, after: Block) {
+ debug_assert!(
+ !self.is_block_inserted(block),
+ "Cannot insert block that is already in the layout"
+ );
+ debug_assert!(
+ self.is_block_inserted(after),
+ "block Insertion point not in the layout"
+ );
+ let before = self.blocks[after].next;
+ {
+ let node = &mut self.blocks[block];
+ node.next = before;
+ node.prev = after.into();
+ }
+ self.blocks[after].next = block.into();
+ match before.expand() {
+ None => self.last_block = Some(block),
+ Some(b) => self.blocks[b].prev = block.into(),
+ }
+ self.assign_block_seq(block);
+ }
+
+ /// Remove `block` from the layout.
+ pub fn remove_block(&mut self, block: Block) {
+ debug_assert!(self.is_block_inserted(block), "block not in the layout");
+ debug_assert!(self.first_inst(block).is_none(), "block must be empty.");
+
+ // Clear the `block` node and extract links.
+ let prev;
+ let next;
+ {
+ let n = &mut self.blocks[block];
+ prev = n.prev;
+ next = n.next;
+ n.prev = None.into();
+ n.next = None.into();
+ }
+ // Fix up links to `block`.
+ match prev.expand() {
+ None => self.first_block = next.expand(),
+ Some(p) => self.blocks[p].next = next,
+ }
+ match next.expand() {
+ None => self.last_block = prev.expand(),
+ Some(n) => self.blocks[n].prev = prev,
+ }
+ }
+
+ /// Return an iterator over all blocks in layout order.
+ pub fn blocks(&self) -> Blocks {
+ Blocks {
+ layout: self,
+ next: self.first_block,
+ }
+ }
+
+ /// Get the function's entry block.
+ /// This is simply the first block in the layout order.
+ pub fn entry_block(&self) -> Option<Block> {
+ self.first_block
+ }
+
+ /// Get the last block in the layout.
+ pub fn last_block(&self) -> Option<Block> {
+ self.last_block
+ }
+
+ /// Get the block preceding `block` in the layout order.
+ pub fn prev_block(&self, block: Block) -> Option<Block> {
+ self.blocks[block].prev.expand()
+ }
+
+ /// Get the block following `block` in the layout order.
+ pub fn next_block(&self, block: Block) -> Option<Block> {
+ self.blocks[block].next.expand()
+ }
+}
+
+#[derive(Clone, Debug, Default)]
+struct BlockNode {
+ prev: PackedOption<Block>,
+ next: PackedOption<Block>,
+ first_inst: PackedOption<Inst>,
+ last_inst: PackedOption<Inst>,
+ seq: SequenceNumber,
+}
+
+/// Iterate over blocks in layout order. See `Layout::blocks()`.
+pub struct Blocks<'f> {
+ layout: &'f Layout,
+ next: Option<Block>,
+}
+
+impl<'f> Iterator for Blocks<'f> {
+ type Item = Block;
+
+ fn next(&mut self) -> Option<Block> {
+ match self.next {
+ Some(block) => {
+ self.next = self.layout.next_block(block);
+ Some(block)
+ }
+ None => None,
+ }
+ }
+}
+
+/// Use a layout reference in a for loop.
+impl<'f> IntoIterator for &'f Layout {
+ type Item = Block;
+ type IntoIter = Blocks<'f>;
+
+ fn into_iter(self) -> Blocks<'f> {
+ self.blocks()
+ }
+}
+
+/// Methods for arranging instructions.
+///
+/// An instruction starts out as *not inserted* in the layout. An instruction can be inserted into
+/// a block at a given position.
+impl Layout {
+ /// Get the block containing `inst`, or `None` if `inst` is not inserted in the layout.
+ pub fn inst_block(&self, inst: Inst) -> Option<Block> {
+ self.insts[inst].block.into()
+ }
+
+ /// Get the block containing the program point `pp`. Panic if `pp` is not in the layout.
+ pub fn pp_block<PP>(&self, pp: PP) -> Block
+ where
+ PP: Into<ExpandedProgramPoint>,
+ {
+ match pp.into() {
+ ExpandedProgramPoint::Block(block) => block,
+ ExpandedProgramPoint::Inst(inst) => {
+ self.inst_block(inst).expect("Program point not in layout")
+ }
+ }
+ }
+
+ /// Append `inst` to the end of `block`.
+ pub fn append_inst(&mut self, inst: Inst, block: Block) {
+ debug_assert_eq!(self.inst_block(inst), None);
+ debug_assert!(
+ self.is_block_inserted(block),
+ "Cannot append instructions to block not in layout"
+ );
+ {
+ let block_node = &mut self.blocks[block];
+ {
+ let inst_node = &mut self.insts[inst];
+ inst_node.block = block.into();
+ inst_node.prev = block_node.last_inst;
+ debug_assert!(inst_node.next.is_none());
+ }
+ if block_node.first_inst.is_none() {
+ block_node.first_inst = inst.into();
+ } else {
+ self.insts[block_node.last_inst.unwrap()].next = inst.into();
+ }
+ block_node.last_inst = inst.into();
+ }
+ self.assign_inst_seq(inst);
+ }
+
+ /// Fetch a block's first instruction.
+ pub fn first_inst(&self, block: Block) -> Option<Inst> {
+ self.blocks[block].first_inst.into()
+ }
+
+ /// Fetch a block's last instruction.
+ pub fn last_inst(&self, block: Block) -> Option<Inst> {
+ self.blocks[block].last_inst.into()
+ }
+
+ /// Fetch the instruction following `inst`.
+ pub fn next_inst(&self, inst: Inst) -> Option<Inst> {
+ self.insts[inst].next.expand()
+ }
+
+ /// Fetch the instruction preceding `inst`.
+ pub fn prev_inst(&self, inst: Inst) -> Option<Inst> {
+ self.insts[inst].prev.expand()
+ }
+
+ /// Fetch the first instruction in a block's terminal branch group.
+ pub fn canonical_branch_inst(&self, dfg: &DataFlowGraph, block: Block) -> Option<Inst> {
+ // Basic blocks permit at most two terminal branch instructions.
+ // If two, the former is conditional and the latter is unconditional.
+ let last = self.last_inst(block)?;
+ if let Some(prev) = self.prev_inst(last) {
+ if dfg[prev].opcode().is_branch() {
+ return Some(prev);
+ }
+ }
+ Some(last)
+ }
+
+ /// Insert `inst` before the instruction `before` in the same block.
+ pub fn insert_inst(&mut self, inst: Inst, before: Inst) {
+ debug_assert_eq!(self.inst_block(inst), None);
+ let block = self
+ .inst_block(before)
+ .expect("Instruction before insertion point not in the layout");
+ let after = self.insts[before].prev;
+ {
+ let inst_node = &mut self.insts[inst];
+ inst_node.block = block.into();
+ inst_node.next = before.into();
+ inst_node.prev = after;
+ }
+ self.insts[before].prev = inst.into();
+ match after.expand() {
+ None => self.blocks[block].first_inst = inst.into(),
+ Some(a) => self.insts[a].next = inst.into(),
+ }
+ self.assign_inst_seq(inst);
+ }
+
+ /// Remove `inst` from the layout.
+ pub fn remove_inst(&mut self, inst: Inst) {
+ let block = self.inst_block(inst).expect("Instruction already removed.");
+ // Clear the `inst` node and extract links.
+ let prev;
+ let next;
+ {
+ let n = &mut self.insts[inst];
+ prev = n.prev;
+ next = n.next;
+ n.block = None.into();
+ n.prev = None.into();
+ n.next = None.into();
+ }
+ // Fix up links to `inst`.
+ match prev.expand() {
+ None => self.blocks[block].first_inst = next,
+ Some(p) => self.insts[p].next = next,
+ }
+ match next.expand() {
+ None => self.blocks[block].last_inst = prev,
+ Some(n) => self.insts[n].prev = prev,
+ }
+ }
+
+ /// Iterate over the instructions in `block` in layout order.
+ pub fn block_insts(&self, block: Block) -> Insts {
+ Insts {
+ layout: self,
+ head: self.blocks[block].first_inst.into(),
+ tail: self.blocks[block].last_inst.into(),
+ }
+ }
+
+ /// Iterate over a limited set of instruction which are likely the branches of `block` in layout
+ /// order. Any instruction not visited by this iterator is not a branch, but an instruction visited by this may not be a branch.
+ pub fn block_likely_branches(&self, block: Block) -> Insts {
+ // Note: Checking whether an instruction is a branch or not while walking backward might add
+ // extra overhead. However, we know that the number of branches is limited to 2 at the end of
+ // each block, and therefore we can just iterate over the last 2 instructions.
+ let mut iter = self.block_insts(block);
+ let head = iter.head;
+ let tail = iter.tail;
+ iter.next_back();
+ let head = iter.next_back().or(head);
+ Insts {
+ layout: self,
+ head,
+ tail,
+ }
+ }
+
+ /// Split the block containing `before` in two.
+ ///
+ /// Insert `new_block` after the old block and move `before` and the following instructions to
+ /// `new_block`:
+ ///
+ /// ```text
+ /// old_block:
+ /// i1
+ /// i2
+ /// i3 << before
+ /// i4
+ /// ```
+ /// becomes:
+ ///
+ /// ```text
+ /// old_block:
+ /// i1
+ /// i2
+ /// new_block:
+ /// i3 << before
+ /// i4
+ /// ```
+ pub fn split_block(&mut self, new_block: Block, before: Inst) {
+ let old_block = self
+ .inst_block(before)
+ .expect("The `before` instruction must be in the layout");
+ debug_assert!(!self.is_block_inserted(new_block));
+
+ // Insert new_block after old_block.
+ let next_block = self.blocks[old_block].next;
+ let last_inst = self.blocks[old_block].last_inst;
+ {
+ let node = &mut self.blocks[new_block];
+ node.prev = old_block.into();
+ node.next = next_block;
+ node.first_inst = before.into();
+ node.last_inst = last_inst;
+ }
+ self.blocks[old_block].next = new_block.into();
+
+ // Fix backwards link.
+ if Some(old_block) == self.last_block {
+ self.last_block = Some(new_block);
+ } else {
+ self.blocks[next_block.unwrap()].prev = new_block.into();
+ }
+
+ // Disconnect the instruction links.
+ let prev_inst = self.insts[before].prev;
+ self.insts[before].prev = None.into();
+ self.blocks[old_block].last_inst = prev_inst;
+ match prev_inst.expand() {
+ None => self.blocks[old_block].first_inst = None.into(),
+ Some(pi) => self.insts[pi].next = None.into(),
+ }
+
+ // Fix the instruction -> block pointers.
+ let mut opt_i = Some(before);
+ while let Some(i) = opt_i {
+ debug_assert_eq!(self.insts[i].block.expand(), Some(old_block));
+ self.insts[i].block = new_block.into();
+ opt_i = self.insts[i].next.into();
+ }
+
+ self.assign_block_seq(new_block);
+ }
+}
+
+#[derive(Clone, Debug, Default)]
+struct InstNode {
+ /// The Block containing this instruction, or `None` if the instruction is not yet inserted.
+ block: PackedOption<Block>,
+ prev: PackedOption<Inst>,
+ next: PackedOption<Inst>,
+ seq: SequenceNumber,
+}
+
+/// Iterate over instructions in a block in layout order. See `Layout::block_insts()`.
+pub struct Insts<'f> {
+ layout: &'f Layout,
+ head: Option<Inst>,
+ tail: Option<Inst>,
+}
+
+impl<'f> Iterator for Insts<'f> {
+ type Item = Inst;
+
+ fn next(&mut self) -> Option<Inst> {
+ let rval = self.head;
+ if let Some(inst) = rval {
+ if self.head == self.tail {
+ self.head = None;
+ self.tail = None;
+ } else {
+ self.head = self.layout.insts[inst].next.into();
+ }
+ }
+ rval
+ }
+}
+
+impl<'f> DoubleEndedIterator for Insts<'f> {
+ fn next_back(&mut self) -> Option<Inst> {
+ let rval = self.tail;
+ if let Some(inst) = rval {
+ if self.head == self.tail {
+ self.head = None;
+ self.tail = None;
+ } else {
+ self.tail = self.layout.insts[inst].prev.into();
+ }
+ }
+ rval
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::Layout;
+ use crate::cursor::{Cursor, CursorPosition};
+ use crate::entity::EntityRef;
+ use crate::ir::{Block, Inst, ProgramOrder, SourceLoc};
+ use alloc::vec::Vec;
+ use core::cmp::Ordering;
+
+ struct LayoutCursor<'f> {
+ /// Borrowed function layout. Public so it can be re-borrowed from this cursor.
+ pub layout: &'f mut Layout,
+ pos: CursorPosition,
+ }
+
+ impl<'f> Cursor for LayoutCursor<'f> {
+ fn position(&self) -> CursorPosition {
+ self.pos
+ }
+
+ fn set_position(&mut self, pos: CursorPosition) {
+ self.pos = pos;
+ }
+
+ fn srcloc(&self) -> SourceLoc {
+ unimplemented!()
+ }
+
+ fn set_srcloc(&mut self, _srcloc: SourceLoc) {
+ unimplemented!()
+ }
+
+ fn layout(&self) -> &Layout {
+ self.layout
+ }
+
+ fn layout_mut(&mut self) -> &mut Layout {
+ self.layout
+ }
+ }
+
+ impl<'f> LayoutCursor<'f> {
+ /// Create a new `LayoutCursor` for `layout`.
+ /// The cursor holds a mutable reference to `layout` for its entire lifetime.
+ pub fn new(layout: &'f mut Layout) -> Self {
+ Self {
+ layout,
+ pos: CursorPosition::Nowhere,
+ }
+ }
+ }
+
+ fn verify(layout: &mut Layout, blocks: &[(Block, &[Inst])]) {
+ // Check that blocks are inserted and instructions belong the right places.
+ // Check forward linkage with iterators.
+ // Check that layout sequence numbers are strictly monotonic.
+ {
+ let mut seq = 0;
+ let mut block_iter = layout.blocks();
+ for &(block, insts) in blocks {
+ assert!(layout.is_block_inserted(block));
+ assert_eq!(block_iter.next(), Some(block));
+ assert!(layout.blocks[block].seq > seq);
+ seq = layout.blocks[block].seq;
+
+ let mut inst_iter = layout.block_insts(block);
+ for &inst in insts {
+ assert_eq!(layout.inst_block(inst), Some(block));
+ assert_eq!(inst_iter.next(), Some(inst));
+ assert!(layout.insts[inst].seq > seq);
+ seq = layout.insts[inst].seq;
+ }
+ assert_eq!(inst_iter.next(), None);
+ }
+ assert_eq!(block_iter.next(), None);
+ }
+
+ // Check backwards linkage with a cursor.
+ let mut cur = LayoutCursor::new(layout);
+ for &(block, insts) in blocks.into_iter().rev() {
+ assert_eq!(cur.prev_block(), Some(block));
+ for &inst in insts.into_iter().rev() {
+ assert_eq!(cur.prev_inst(), Some(inst));
+ }
+ assert_eq!(cur.prev_inst(), None);
+ }
+ assert_eq!(cur.prev_block(), None);
+ }
+
+ #[test]
+ fn append_block() {
+ let mut layout = Layout::new();
+ let e0 = Block::new(0);
+ let e1 = Block::new(1);
+ let e2 = Block::new(2);
+
+ {
+ let imm = &layout;
+ assert!(!imm.is_block_inserted(e0));
+ assert!(!imm.is_block_inserted(e1));
+ }
+ verify(&mut layout, &[]);
+
+ layout.append_block(e1);
+ assert!(!layout.is_block_inserted(e0));
+ assert!(layout.is_block_inserted(e1));
+ assert!(!layout.is_block_inserted(e2));
+ let v: Vec<Block> = layout.blocks().collect();
+ assert_eq!(v, [e1]);
+
+ layout.append_block(e2);
+ assert!(!layout.is_block_inserted(e0));
+ assert!(layout.is_block_inserted(e1));
+ assert!(layout.is_block_inserted(e2));
+ let v: Vec<Block> = layout.blocks().collect();
+ assert_eq!(v, [e1, e2]);
+
+ layout.append_block(e0);
+ assert!(layout.is_block_inserted(e0));
+ assert!(layout.is_block_inserted(e1));
+ assert!(layout.is_block_inserted(e2));
+ let v: Vec<Block> = layout.blocks().collect();
+ assert_eq!(v, [e1, e2, e0]);
+
+ {
+ let imm = &layout;
+ let mut v = Vec::new();
+ for e in imm {
+ v.push(e);
+ }
+ assert_eq!(v, [e1, e2, e0]);
+ }
+
+ // Test cursor positioning.
+ let mut cur = LayoutCursor::new(&mut layout);
+ assert_eq!(cur.position(), CursorPosition::Nowhere);
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.position(), CursorPosition::Nowhere);
+ assert_eq!(cur.prev_inst(), None);
+ assert_eq!(cur.position(), CursorPosition::Nowhere);
+
+ assert_eq!(cur.next_block(), Some(e1));
+ assert_eq!(cur.position(), CursorPosition::Before(e1));
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.position(), CursorPosition::After(e1));
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.position(), CursorPosition::After(e1));
+ assert_eq!(cur.next_block(), Some(e2));
+ assert_eq!(cur.prev_inst(), None);
+ assert_eq!(cur.position(), CursorPosition::Before(e2));
+ assert_eq!(cur.next_block(), Some(e0));
+ assert_eq!(cur.next_block(), None);
+ assert_eq!(cur.position(), CursorPosition::Nowhere);
+
+ // Backwards through the blocks.
+ assert_eq!(cur.prev_block(), Some(e0));
+ assert_eq!(cur.position(), CursorPosition::After(e0));
+ assert_eq!(cur.prev_block(), Some(e2));
+ assert_eq!(cur.prev_block(), Some(e1));
+ assert_eq!(cur.prev_block(), None);
+ assert_eq!(cur.position(), CursorPosition::Nowhere);
+ }
+
+ #[test]
+ fn insert_block() {
+ let mut layout = Layout::new();
+ let e0 = Block::new(0);
+ let e1 = Block::new(1);
+ let e2 = Block::new(2);
+
+ {
+ let imm = &layout;
+ assert!(!imm.is_block_inserted(e0));
+ assert!(!imm.is_block_inserted(e1));
+
+ let v: Vec<Block> = layout.blocks().collect();
+ assert_eq!(v, []);
+ }
+
+ layout.append_block(e1);
+ assert!(!layout.is_block_inserted(e0));
+ assert!(layout.is_block_inserted(e1));
+ assert!(!layout.is_block_inserted(e2));
+ verify(&mut layout, &[(e1, &[])]);
+
+ layout.insert_block(e2, e1);
+ assert!(!layout.is_block_inserted(e0));
+ assert!(layout.is_block_inserted(e1));
+ assert!(layout.is_block_inserted(e2));
+ verify(&mut layout, &[(e2, &[]), (e1, &[])]);
+
+ layout.insert_block(e0, e1);
+ assert!(layout.is_block_inserted(e0));
+ assert!(layout.is_block_inserted(e1));
+ assert!(layout.is_block_inserted(e2));
+ verify(&mut layout, &[(e2, &[]), (e0, &[]), (e1, &[])]);
+ }
+
+ #[test]
+ fn insert_block_after() {
+ let mut layout = Layout::new();
+ let e0 = Block::new(0);
+ let e1 = Block::new(1);
+ let e2 = Block::new(2);
+
+ layout.append_block(e1);
+ layout.insert_block_after(e2, e1);
+ verify(&mut layout, &[(e1, &[]), (e2, &[])]);
+
+ layout.insert_block_after(e0, e1);
+ verify(&mut layout, &[(e1, &[]), (e0, &[]), (e2, &[])]);
+ }
+
+ #[test]
+ fn append_inst() {
+ let mut layout = Layout::new();
+ let e1 = Block::new(1);
+
+ layout.append_block(e1);
+ let v: Vec<Inst> = layout.block_insts(e1).collect();
+ assert_eq!(v, []);
+
+ let i0 = Inst::new(0);
+ let i1 = Inst::new(1);
+ let i2 = Inst::new(2);
+
+ assert_eq!(layout.inst_block(i0), None);
+ assert_eq!(layout.inst_block(i1), None);
+ assert_eq!(layout.inst_block(i2), None);
+
+ layout.append_inst(i1, e1);
+ assert_eq!(layout.inst_block(i0), None);
+ assert_eq!(layout.inst_block(i1), Some(e1));
+ assert_eq!(layout.inst_block(i2), None);
+ let v: Vec<Inst> = layout.block_insts(e1).collect();
+ assert_eq!(v, [i1]);
+
+ layout.append_inst(i2, e1);
+ assert_eq!(layout.inst_block(i0), None);
+ assert_eq!(layout.inst_block(i1), Some(e1));
+ assert_eq!(layout.inst_block(i2), Some(e1));
+ let v: Vec<Inst> = layout.block_insts(e1).collect();
+ assert_eq!(v, [i1, i2]);
+
+ // Test double-ended instruction iterator.
+ let v: Vec<Inst> = layout.block_insts(e1).rev().collect();
+ assert_eq!(v, [i2, i1]);
+
+ layout.append_inst(i0, e1);
+ verify(&mut layout, &[(e1, &[i1, i2, i0])]);
+
+ // Test cursor positioning.
+ let mut cur = LayoutCursor::new(&mut layout).at_top(e1);
+ assert_eq!(cur.position(), CursorPosition::Before(e1));
+ assert_eq!(cur.prev_inst(), None);
+ assert_eq!(cur.position(), CursorPosition::Before(e1));
+ assert_eq!(cur.next_inst(), Some(i1));
+ assert_eq!(cur.position(), CursorPosition::At(i1));
+ assert_eq!(cur.next_inst(), Some(i2));
+ assert_eq!(cur.next_inst(), Some(i0));
+ assert_eq!(cur.prev_inst(), Some(i2));
+ assert_eq!(cur.position(), CursorPosition::At(i2));
+ assert_eq!(cur.next_inst(), Some(i0));
+ assert_eq!(cur.position(), CursorPosition::At(i0));
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.position(), CursorPosition::After(e1));
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.position(), CursorPosition::After(e1));
+ assert_eq!(cur.prev_inst(), Some(i0));
+ assert_eq!(cur.prev_inst(), Some(i2));
+ assert_eq!(cur.prev_inst(), Some(i1));
+ assert_eq!(cur.prev_inst(), None);
+ assert_eq!(cur.position(), CursorPosition::Before(e1));
+
+ // Test remove_inst.
+ cur.goto_inst(i2);
+ assert_eq!(cur.remove_inst(), i2);
+ verify(cur.layout, &[(e1, &[i1, i0])]);
+ assert_eq!(cur.layout.inst_block(i2), None);
+ assert_eq!(cur.remove_inst(), i0);
+ verify(cur.layout, &[(e1, &[i1])]);
+ assert_eq!(cur.layout.inst_block(i0), None);
+ assert_eq!(cur.position(), CursorPosition::After(e1));
+ cur.layout.remove_inst(i1);
+ verify(cur.layout, &[(e1, &[])]);
+ assert_eq!(cur.layout.inst_block(i1), None);
+ }
+
+ #[test]
+ fn insert_inst() {
+ let mut layout = Layout::new();
+ let e1 = Block::new(1);
+
+ layout.append_block(e1);
+ let v: Vec<Inst> = layout.block_insts(e1).collect();
+ assert_eq!(v, []);
+
+ let i0 = Inst::new(0);
+ let i1 = Inst::new(1);
+ let i2 = Inst::new(2);
+
+ assert_eq!(layout.inst_block(i0), None);
+ assert_eq!(layout.inst_block(i1), None);
+ assert_eq!(layout.inst_block(i2), None);
+
+ layout.append_inst(i1, e1);
+ assert_eq!(layout.inst_block(i0), None);
+ assert_eq!(layout.inst_block(i1), Some(e1));
+ assert_eq!(layout.inst_block(i2), None);
+ let v: Vec<Inst> = layout.block_insts(e1).collect();
+ assert_eq!(v, [i1]);
+
+ layout.insert_inst(i2, i1);
+ assert_eq!(layout.inst_block(i0), None);
+ assert_eq!(layout.inst_block(i1), Some(e1));
+ assert_eq!(layout.inst_block(i2), Some(e1));
+ let v: Vec<Inst> = layout.block_insts(e1).collect();
+ assert_eq!(v, [i2, i1]);
+
+ layout.insert_inst(i0, i1);
+ verify(&mut layout, &[(e1, &[i2, i0, i1])]);
+ }
+
+ #[test]
+ fn multiple_blocks() {
+ let mut layout = Layout::new();
+
+ let e0 = Block::new(0);
+ let e1 = Block::new(1);
+
+ assert_eq!(layout.entry_block(), None);
+ layout.append_block(e0);
+ assert_eq!(layout.entry_block(), Some(e0));
+ layout.append_block(e1);
+ assert_eq!(layout.entry_block(), Some(e0));
+
+ let i0 = Inst::new(0);
+ let i1 = Inst::new(1);
+ let i2 = Inst::new(2);
+ let i3 = Inst::new(3);
+
+ layout.append_inst(i0, e0);
+ layout.append_inst(i1, e0);
+ layout.append_inst(i2, e1);
+ layout.append_inst(i3, e1);
+
+ let v0: Vec<Inst> = layout.block_insts(e0).collect();
+ let v1: Vec<Inst> = layout.block_insts(e1).collect();
+ assert_eq!(v0, [i0, i1]);
+ assert_eq!(v1, [i2, i3]);
+ }
+
+ #[test]
+ fn split_block() {
+ let mut layout = Layout::new();
+
+ let e0 = Block::new(0);
+ let e1 = Block::new(1);
+ let e2 = Block::new(2);
+
+ let i0 = Inst::new(0);
+ let i1 = Inst::new(1);
+ let i2 = Inst::new(2);
+ let i3 = Inst::new(3);
+
+ layout.append_block(e0);
+ layout.append_inst(i0, e0);
+ assert_eq!(layout.inst_block(i0), Some(e0));
+ layout.split_block(e1, i0);
+ assert_eq!(layout.inst_block(i0), Some(e1));
+
+ {
+ let mut cur = LayoutCursor::new(&mut layout);
+ assert_eq!(cur.next_block(), Some(e0));
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.next_block(), Some(e1));
+ assert_eq!(cur.next_inst(), Some(i0));
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.next_block(), None);
+
+ // Check backwards links.
+ assert_eq!(cur.prev_block(), Some(e1));
+ assert_eq!(cur.prev_inst(), Some(i0));
+ assert_eq!(cur.prev_inst(), None);
+ assert_eq!(cur.prev_block(), Some(e0));
+ assert_eq!(cur.prev_inst(), None);
+ assert_eq!(cur.prev_block(), None);
+ }
+
+ layout.append_inst(i1, e0);
+ layout.append_inst(i2, e0);
+ layout.append_inst(i3, e0);
+ layout.split_block(e2, i2);
+
+ assert_eq!(layout.inst_block(i0), Some(e1));
+ assert_eq!(layout.inst_block(i1), Some(e0));
+ assert_eq!(layout.inst_block(i2), Some(e2));
+ assert_eq!(layout.inst_block(i3), Some(e2));
+
+ {
+ let mut cur = LayoutCursor::new(&mut layout);
+ assert_eq!(cur.next_block(), Some(e0));
+ assert_eq!(cur.next_inst(), Some(i1));
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.next_block(), Some(e2));
+ assert_eq!(cur.next_inst(), Some(i2));
+ assert_eq!(cur.next_inst(), Some(i3));
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.next_block(), Some(e1));
+ assert_eq!(cur.next_inst(), Some(i0));
+ assert_eq!(cur.next_inst(), None);
+ assert_eq!(cur.next_block(), None);
+
+ assert_eq!(cur.prev_block(), Some(e1));
+ assert_eq!(cur.prev_inst(), Some(i0));
+ assert_eq!(cur.prev_inst(), None);
+ assert_eq!(cur.prev_block(), Some(e2));
+ assert_eq!(cur.prev_inst(), Some(i3));
+ assert_eq!(cur.prev_inst(), Some(i2));
+ assert_eq!(cur.prev_inst(), None);
+ assert_eq!(cur.prev_block(), Some(e0));
+ assert_eq!(cur.prev_inst(), Some(i1));
+ assert_eq!(cur.prev_inst(), None);
+ assert_eq!(cur.prev_block(), None);
+ }
+
+ // Check `ProgramOrder`.
+ assert_eq!(layout.cmp(e2, e2), Ordering::Equal);
+ assert_eq!(layout.cmp(e2, i2), Ordering::Less);
+ assert_eq!(layout.cmp(i3, i2), Ordering::Greater);
+
+ assert_eq!(layout.is_block_gap(i1, e2), true);
+ assert_eq!(layout.is_block_gap(i3, e1), true);
+ assert_eq!(layout.is_block_gap(i1, e1), false);
+ assert_eq!(layout.is_block_gap(i2, e1), false);
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/libcall.rs b/third_party/rust/cranelift-codegen/src/ir/libcall.rs
new file mode 100644
index 0000000000..9dc134e480
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/libcall.rs
@@ -0,0 +1,260 @@
+//! Naming well-known routines in the runtime library.
+
+use crate::ir::{
+ types, AbiParam, ArgumentPurpose, ExtFuncData, ExternalName, FuncRef, Function, Inst, Opcode,
+ Signature, Type,
+};
+use crate::isa::{CallConv, RegUnit, TargetIsa};
+use core::fmt;
+use core::str::FromStr;
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+/// The name of a runtime library routine.
+///
+/// Runtime library calls are generated for Cranelift IR instructions that don't have an equivalent
+/// ISA instruction or an easy macro expansion. A `LibCall` is used as a well-known name to refer to
+/// the runtime library routine. This way, Cranelift doesn't have to know about the naming
+/// convention in the embedding VM's runtime library.
+///
+/// This list is likely to grow over time.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub enum LibCall {
+ /// probe for stack overflow. These are emitted for functions which need
+ /// when the `enable_probestack` setting is true.
+ Probestack,
+ /// udiv.i64
+ UdivI64,
+ /// sdiv.i64
+ SdivI64,
+ /// urem.i64
+ UremI64,
+ /// srem.i64
+ SremI64,
+ /// ishl.i64
+ IshlI64,
+ /// ushr.i64
+ UshrI64,
+ /// sshr.i64
+ SshrI64,
+ /// ceil.f32
+ CeilF32,
+ /// ceil.f64
+ CeilF64,
+ /// floor.f32
+ FloorF32,
+ /// floor.f64
+ FloorF64,
+ /// trunc.f32
+ TruncF32,
+ /// frunc.f64
+ TruncF64,
+ /// nearest.f32
+ NearestF32,
+ /// nearest.f64
+ NearestF64,
+ /// libc.memcpy
+ Memcpy,
+ /// libc.memset
+ Memset,
+ /// libc.memmove
+ Memmove,
+
+ /// Elf __tls_get_addr
+ ElfTlsGetAddr,
+}
+
+impl fmt::Display for LibCall {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(self, f)
+ }
+}
+
+impl FromStr for LibCall {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "Probestack" => Ok(Self::Probestack),
+ "UdivI64" => Ok(Self::UdivI64),
+ "SdivI64" => Ok(Self::SdivI64),
+ "UremI64" => Ok(Self::UremI64),
+ "SremI64" => Ok(Self::SremI64),
+ "IshlI64" => Ok(Self::IshlI64),
+ "UshrI64" => Ok(Self::UshrI64),
+ "SshrI64" => Ok(Self::SshrI64),
+ "CeilF32" => Ok(Self::CeilF32),
+ "CeilF64" => Ok(Self::CeilF64),
+ "FloorF32" => Ok(Self::FloorF32),
+ "FloorF64" => Ok(Self::FloorF64),
+ "TruncF32" => Ok(Self::TruncF32),
+ "TruncF64" => Ok(Self::TruncF64),
+ "NearestF32" => Ok(Self::NearestF32),
+ "NearestF64" => Ok(Self::NearestF64),
+ "Memcpy" => Ok(Self::Memcpy),
+ "Memset" => Ok(Self::Memset),
+ "Memmove" => Ok(Self::Memmove),
+
+ "ElfTlsGetAddr" => Ok(Self::ElfTlsGetAddr),
+ _ => Err(()),
+ }
+ }
+}
+
+impl LibCall {
+ /// Get the well-known library call name to use as a replacement for an instruction with the
+ /// given opcode and controlling type variable.
+ ///
+ /// Returns `None` if no well-known library routine name exists for that instruction.
+ pub fn for_inst(opcode: Opcode, ctrl_type: Type) -> Option<Self> {
+ Some(match ctrl_type {
+ types::I64 => match opcode {
+ Opcode::Udiv => Self::UdivI64,
+ Opcode::Sdiv => Self::SdivI64,
+ Opcode::Urem => Self::UremI64,
+ Opcode::Srem => Self::SremI64,
+ Opcode::Ishl => Self::IshlI64,
+ Opcode::Ushr => Self::UshrI64,
+ Opcode::Sshr => Self::SshrI64,
+ _ => return None,
+ },
+ types::F32 => match opcode {
+ Opcode::Ceil => Self::CeilF32,
+ Opcode::Floor => Self::FloorF32,
+ Opcode::Trunc => Self::TruncF32,
+ Opcode::Nearest => Self::NearestF32,
+ _ => return None,
+ },
+ types::F64 => match opcode {
+ Opcode::Ceil => Self::CeilF64,
+ Opcode::Floor => Self::FloorF64,
+ Opcode::Trunc => Self::TruncF64,
+ Opcode::Nearest => Self::NearestF64,
+ _ => return None,
+ },
+ _ => return None,
+ })
+ }
+}
+
+/// Get a function reference for `libcall` in `func`, following the signature
+/// for `inst`.
+///
+/// If there is an existing reference, use it, otherwise make a new one.
+pub(crate) fn get_libcall_funcref(
+ libcall: LibCall,
+ call_conv: CallConv,
+ func: &mut Function,
+ inst: Inst,
+ isa: &dyn TargetIsa,
+) -> FuncRef {
+ find_funcref(libcall, func)
+ .unwrap_or_else(|| make_funcref_for_inst(libcall, call_conv, func, inst, isa))
+}
+
+/// Get a function reference for the probestack function in `func`.
+///
+/// If there is an existing reference, use it, otherwise make a new one.
+pub fn get_probestack_funcref(
+ func: &mut Function,
+ reg_type: Type,
+ arg_reg: RegUnit,
+ isa: &dyn TargetIsa,
+) -> FuncRef {
+ find_funcref(LibCall::Probestack, func)
+ .unwrap_or_else(|| make_funcref_for_probestack(func, reg_type, arg_reg, isa))
+}
+
+/// Get the existing function reference for `libcall` in `func` if it exists.
+fn find_funcref(libcall: LibCall, func: &Function) -> Option<FuncRef> {
+ // We're assuming that all libcall function decls are at the end.
+ // If we get this wrong, worst case we'll have duplicate libcall decls which is harmless.
+ for (fref, func_data) in func.dfg.ext_funcs.iter().rev() {
+ match func_data.name {
+ ExternalName::LibCall(lc) => {
+ if lc == libcall {
+ return Some(fref);
+ }
+ }
+ _ => break,
+ }
+ }
+ None
+}
+
+/// Create a funcref for `LibCall::Probestack`.
+fn make_funcref_for_probestack(
+ func: &mut Function,
+ reg_type: Type,
+ arg_reg: RegUnit,
+ isa: &dyn TargetIsa,
+) -> FuncRef {
+ let mut sig = Signature::new(CallConv::Probestack);
+ let rax = AbiParam::special_reg(reg_type, ArgumentPurpose::Normal, arg_reg);
+ sig.params.push(rax);
+ if !isa.flags().probestack_func_adjusts_sp() {
+ sig.returns.push(rax);
+ }
+ make_funcref(LibCall::Probestack, func, sig, isa)
+}
+
+/// Create a funcref for `libcall` with a signature matching `inst`.
+fn make_funcref_for_inst(
+ libcall: LibCall,
+ call_conv: CallConv,
+ func: &mut Function,
+ inst: Inst,
+ isa: &dyn TargetIsa,
+) -> FuncRef {
+ let mut sig = Signature::new(call_conv);
+ for &v in func.dfg.inst_args(inst) {
+ sig.params.push(AbiParam::new(func.dfg.value_type(v)));
+ }
+ for &v in func.dfg.inst_results(inst) {
+ sig.returns.push(AbiParam::new(func.dfg.value_type(v)));
+ }
+
+ if call_conv.extends_baldrdash() {
+ // Adds the special VMContext parameter to the signature.
+ sig.params.push(AbiParam::special(
+ isa.pointer_type(),
+ ArgumentPurpose::VMContext,
+ ));
+ }
+
+ make_funcref(libcall, func, sig, isa)
+}
+
+/// Create a funcref for `libcall`.
+fn make_funcref(
+ libcall: LibCall,
+ func: &mut Function,
+ sig: Signature,
+ isa: &dyn TargetIsa,
+) -> FuncRef {
+ let sigref = func.import_signature(sig);
+
+ func.import_function(ExtFuncData {
+ name: ExternalName::LibCall(libcall),
+ signature: sigref,
+ colocated: isa.flags().use_colocated_libcalls(),
+ })
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloc::string::ToString;
+
+ #[test]
+ fn display() {
+ assert_eq!(LibCall::CeilF32.to_string(), "CeilF32");
+ assert_eq!(LibCall::NearestF64.to_string(), "NearestF64");
+ }
+
+ #[test]
+ fn parsing() {
+ assert_eq!("FloorF32".parse(), Ok(LibCall::FloorF32));
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/memflags.rs b/third_party/rust/cranelift-codegen/src/ir/memflags.rs
new file mode 100644
index 0000000000..87fd6bf3ab
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/memflags.rs
@@ -0,0 +1,117 @@
+//! Memory operation flags.
+
+use core::fmt;
+
+enum FlagBit {
+ Notrap,
+ Aligned,
+ Readonly,
+}
+
+const NAMES: [&str; 3] = ["notrap", "aligned", "readonly"];
+
+/// Flags for memory operations like load/store.
+///
+/// Each of these flags introduce a limited form of undefined behavior. The flags each enable
+/// certain optimizations that need to make additional assumptions. Generally, the semantics of a
+/// program does not change when a flag is removed, but adding a flag will.
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
+pub struct MemFlags {
+ bits: u8,
+}
+
+impl MemFlags {
+ /// Create a new empty set of flags.
+ pub fn new() -> Self {
+ Self { bits: 0 }
+ }
+
+ /// Create a set of flags representing an access from a "trusted" address, meaning it's
+ /// known to be aligned and non-trapping.
+ pub fn trusted() -> Self {
+ let mut result = Self::new();
+ result.set_notrap();
+ result.set_aligned();
+ result
+ }
+
+ /// Read a flag bit.
+ fn read(self, bit: FlagBit) -> bool {
+ self.bits & (1 << bit as usize) != 0
+ }
+
+ /// Set a flag bit.
+ fn set(&mut self, bit: FlagBit) {
+ self.bits |= 1 << bit as usize
+ }
+
+ /// Set a flag bit by name.
+ ///
+ /// Returns true if the flag was found and set, false for an unknown flag name.
+ pub fn set_by_name(&mut self, name: &str) -> bool {
+ match NAMES.iter().position(|&s| s == name) {
+ Some(bit) => {
+ self.bits |= 1 << bit;
+ true
+ }
+ None => false,
+ }
+ }
+
+ /// Test if the `notrap` flag is set.
+ ///
+ /// Normally, trapping is part of the semantics of a load/store operation. If the platform
+ /// would cause a trap when accessing the effective address, the Cranelift memory operation is
+ /// also required to trap.
+ ///
+ /// The `notrap` flag tells Cranelift that the memory is *accessible*, which means that
+ /// accesses will not trap. This makes it possible to delete an unused load or a dead store
+ /// instruction.
+ pub fn notrap(self) -> bool {
+ self.read(FlagBit::Notrap)
+ }
+
+ /// Set the `notrap` flag.
+ pub fn set_notrap(&mut self) {
+ self.set(FlagBit::Notrap)
+ }
+
+ /// Test if the `aligned` flag is set.
+ ///
+ /// By default, Cranelift memory instructions work with any unaligned effective address. If the
+ /// `aligned` flag is set, the instruction is permitted to trap or return a wrong result if the
+ /// effective address is misaligned.
+ pub fn aligned(self) -> bool {
+ self.read(FlagBit::Aligned)
+ }
+
+ /// Set the `aligned` flag.
+ pub fn set_aligned(&mut self) {
+ self.set(FlagBit::Aligned)
+ }
+
+ /// Test if the `readonly` flag is set.
+ ///
+ /// Loads with this flag have no memory dependencies.
+ /// This results in undefined behavior if the dereferenced memory is mutated at any time
+ /// between when the function is called and when it is exited.
+ pub fn readonly(self) -> bool {
+ self.read(FlagBit::Readonly)
+ }
+
+ /// Set the `readonly` flag.
+ pub fn set_readonly(&mut self) {
+ self.set(FlagBit::Readonly)
+ }
+}
+
+impl fmt::Display for MemFlags {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ for (i, n) in NAMES.iter().enumerate() {
+ if self.bits & (1 << i) != 0 {
+ write!(f, " {}", n)?;
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/mod.rs b/third_party/rust/cranelift-codegen/src/ir/mod.rs
new file mode 100644
index 0000000000..4dbe90df34
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/mod.rs
@@ -0,0 +1,115 @@
+//! Representation of Cranelift IR functions.
+
+mod atomic_rmw_op;
+mod builder;
+pub mod constant;
+pub mod dfg;
+pub mod entities;
+mod extfunc;
+mod extname;
+pub mod function;
+mod globalvalue;
+mod heap;
+pub mod immediates;
+pub mod instructions;
+pub mod jumptable;
+pub mod layout;
+pub(crate) mod libcall;
+mod memflags;
+mod progpoint;
+mod sourceloc;
+pub mod stackslot;
+mod table;
+mod trapcode;
+pub mod types;
+mod valueloc;
+
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+pub use crate::ir::atomic_rmw_op::AtomicRmwOp;
+pub use crate::ir::builder::{
+ InsertBuilder, InstBuilder, InstBuilderBase, InstInserterBase, ReplaceBuilder,
+};
+pub use crate::ir::constant::{ConstantData, ConstantOffset, ConstantPool};
+pub use crate::ir::dfg::{DataFlowGraph, ValueDef};
+pub use crate::ir::entities::{
+ Block, Constant, FuncRef, GlobalValue, Heap, Immediate, Inst, JumpTable, SigRef, StackSlot,
+ Table, Value,
+};
+pub use crate::ir::extfunc::{
+ AbiParam, ArgumentExtension, ArgumentPurpose, ExtFuncData, Signature,
+};
+pub use crate::ir::extname::ExternalName;
+pub use crate::ir::function::{DisplayFunctionAnnotations, Function};
+pub use crate::ir::globalvalue::GlobalValueData;
+pub use crate::ir::heap::{HeapData, HeapStyle};
+pub use crate::ir::instructions::{
+ InstructionData, Opcode, ValueList, ValueListPool, VariableArgs,
+};
+pub use crate::ir::jumptable::JumpTableData;
+pub use crate::ir::layout::Layout;
+pub use crate::ir::libcall::{get_probestack_funcref, LibCall};
+pub use crate::ir::memflags::MemFlags;
+pub use crate::ir::progpoint::{ExpandedProgramPoint, ProgramOrder, ProgramPoint};
+pub use crate::ir::sourceloc::SourceLoc;
+pub use crate::ir::stackslot::{StackLayoutInfo, StackSlotData, StackSlotKind, StackSlots};
+pub use crate::ir::table::TableData;
+pub use crate::ir::trapcode::TrapCode;
+pub use crate::ir::types::Type;
+pub use crate::ir::valueloc::{ArgumentLoc, ValueLoc};
+pub use cranelift_codegen_shared::condcodes;
+
+use crate::binemit;
+use crate::entity::{entity_impl, PrimaryMap, SecondaryMap};
+use crate::isa;
+
+/// Map of value locations.
+pub type ValueLocations = SecondaryMap<Value, ValueLoc>;
+
+/// Map of jump tables.
+pub type JumpTables = PrimaryMap<JumpTable, JumpTableData>;
+
+/// Map of instruction encodings.
+pub type InstEncodings = SecondaryMap<Inst, isa::Encoding>;
+
+/// Code offsets for blocks.
+pub type BlockOffsets = SecondaryMap<Block, binemit::CodeOffset>;
+
+/// Code offsets for Jump Tables.
+pub type JumpTableOffsets = SecondaryMap<JumpTable, binemit::CodeOffset>;
+
+/// Source locations for instructions.
+pub type SourceLocs = SecondaryMap<Inst, SourceLoc>;
+
+/// Marked with a label value.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct ValueLabel(u32);
+entity_impl!(ValueLabel, "val");
+
+/// A label of a Value.
+#[derive(Debug, Clone)]
+pub struct ValueLabelStart {
+ /// Source location when it is in effect
+ pub from: SourceLoc,
+
+ /// The label index.
+ pub label: ValueLabel,
+}
+
+/// Value label assignements: label starts or value aliases.
+#[derive(Debug, Clone)]
+pub enum ValueLabelAssignments {
+ /// Original value labels assigned at transform.
+ Starts(alloc::vec::Vec<ValueLabelStart>),
+
+ /// A value alias to original value.
+ Alias {
+ /// Source location when it is in effect
+ from: SourceLoc,
+
+ /// The label index.
+ value: Value,
+ },
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/progpoint.rs b/third_party/rust/cranelift-codegen/src/ir/progpoint.rs
new file mode 100644
index 0000000000..0152949e7a
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/progpoint.rs
@@ -0,0 +1,164 @@
+//! Program points.
+
+use crate::entity::EntityRef;
+use crate::ir::{Block, Inst, ValueDef};
+use core::cmp;
+use core::fmt;
+use core::u32;
+
+/// A `ProgramPoint` represents a position in a function where the live range of an SSA value can
+/// begin or end. It can be either:
+///
+/// 1. An instruction or
+/// 2. A block header.
+///
+/// This corresponds more or less to the lines in the textual form of Cranelift IR.
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub struct ProgramPoint(u32);
+
+impl From<Inst> for ProgramPoint {
+ fn from(inst: Inst) -> Self {
+ let idx = inst.index();
+ debug_assert!(idx < (u32::MAX / 2) as usize);
+ Self((idx * 2) as u32)
+ }
+}
+
+impl From<Block> for ProgramPoint {
+ fn from(block: Block) -> Self {
+ let idx = block.index();
+ debug_assert!(idx < (u32::MAX / 2) as usize);
+ Self((idx * 2 + 1) as u32)
+ }
+}
+
+impl From<ValueDef> for ProgramPoint {
+ fn from(def: ValueDef) -> Self {
+ match def {
+ ValueDef::Result(inst, _) => inst.into(),
+ ValueDef::Param(block, _) => block.into(),
+ }
+ }
+}
+
+/// An expanded program point directly exposes the variants, but takes twice the space to
+/// represent.
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub enum ExpandedProgramPoint {
+ /// An instruction in the function.
+ Inst(Inst),
+ /// A block header.
+ Block(Block),
+}
+
+impl ExpandedProgramPoint {
+ /// Get the instruction we know is inside.
+ pub fn unwrap_inst(self) -> Inst {
+ match self {
+ Self::Inst(x) => x,
+ Self::Block(x) => panic!("expected inst: {}", x),
+ }
+ }
+}
+
+impl From<Inst> for ExpandedProgramPoint {
+ fn from(inst: Inst) -> Self {
+ Self::Inst(inst)
+ }
+}
+
+impl From<Block> for ExpandedProgramPoint {
+ fn from(block: Block) -> Self {
+ Self::Block(block)
+ }
+}
+
+impl From<ValueDef> for ExpandedProgramPoint {
+ fn from(def: ValueDef) -> Self {
+ match def {
+ ValueDef::Result(inst, _) => inst.into(),
+ ValueDef::Param(block, _) => block.into(),
+ }
+ }
+}
+
+impl From<ProgramPoint> for ExpandedProgramPoint {
+ fn from(pp: ProgramPoint) -> Self {
+ if pp.0 & 1 == 0 {
+ Self::Inst(Inst::from_u32(pp.0 / 2))
+ } else {
+ Self::Block(Block::from_u32(pp.0 / 2))
+ }
+ }
+}
+
+impl fmt::Display for ExpandedProgramPoint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ Self::Inst(x) => write!(f, "{}", x),
+ Self::Block(x) => write!(f, "{}", x),
+ }
+ }
+}
+
+impl fmt::Display for ProgramPoint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let epp: ExpandedProgramPoint = (*self).into();
+ epp.fmt(f)
+ }
+}
+
+impl fmt::Debug for ExpandedProgramPoint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "ExpandedProgramPoint({})", self)
+ }
+}
+
+impl fmt::Debug for ProgramPoint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "ProgramPoint({})", self)
+ }
+}
+
+/// Context for ordering program points.
+///
+/// `ProgramPoint` objects don't carry enough information to be ordered independently, they need a
+/// context providing the program order.
+pub trait ProgramOrder {
+ /// Compare the program points `a` and `b` relative to this program order.
+ ///
+ /// Return `Less` if `a` appears in the program before `b`.
+ ///
+ /// This is declared as a generic such that it can be called with `Inst` and `Block` arguments
+ /// directly. Depending on the implementation, there is a good chance performance will be
+ /// improved for those cases where the type of either argument is known statically.
+ fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
+ where
+ A: Into<ExpandedProgramPoint>,
+ B: Into<ExpandedProgramPoint>;
+
+ /// Is the range from `inst` to `block` just the gap between consecutive blocks?
+ ///
+ /// This returns true if `inst` is the terminator in the block immediately before `block`.
+ fn is_block_gap(&self, inst: Inst, block: Block) -> bool;
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::entity::EntityRef;
+ use crate::ir::{Block, Inst};
+ use alloc::string::ToString;
+
+ #[test]
+ fn convert() {
+ let i5 = Inst::new(5);
+ let b3 = Block::new(3);
+
+ let pp1: ProgramPoint = i5.into();
+ let pp2: ProgramPoint = b3.into();
+
+ assert_eq!(pp1.to_string(), "inst5");
+ assert_eq!(pp2.to_string(), "block3");
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/sourceloc.rs b/third_party/rust/cranelift-codegen/src/ir/sourceloc.rs
new file mode 100644
index 0000000000..ccab62f89b
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/sourceloc.rs
@@ -0,0 +1,66 @@
+//! Source locations.
+//!
+//! Cranelift tracks the original source location of each instruction, and preserves the source
+//! location when instructions are transformed.
+
+use core::fmt;
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+/// A source location.
+///
+/// This is an opaque 32-bit number attached to each Cranelift IR instruction. Cranelift does not
+/// interpret source locations in any way, they are simply preserved from the input to the output.
+///
+/// The default source location uses the all-ones bit pattern `!0`. It is used for instructions
+/// that can't be given a real source location.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct SourceLoc(u32);
+
+impl SourceLoc {
+ /// Create a new source location with the given bits.
+ pub fn new(bits: u32) -> Self {
+ Self(bits)
+ }
+
+ /// Is this the default source location?
+ pub fn is_default(self) -> bool {
+ self == Default::default()
+ }
+
+ /// Read the bits of this source location.
+ pub fn bits(self) -> u32 {
+ self.0
+ }
+}
+
+impl Default for SourceLoc {
+ fn default() -> Self {
+ Self(!0)
+ }
+}
+
+impl fmt::Display for SourceLoc {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ if self.is_default() {
+ write!(f, "@-")
+ } else {
+ write!(f, "@{:04x}", self.0)
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::ir::SourceLoc;
+ use alloc::string::ToString;
+
+ #[test]
+ fn display() {
+ assert_eq!(SourceLoc::default().to_string(), "@-");
+ assert_eq!(SourceLoc::new(0).to_string(), "@0000");
+ assert_eq!(SourceLoc::new(16).to_string(), "@0010");
+ assert_eq!(SourceLoc::new(0xabcdef).to_string(), "@abcdef");
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/stackslot.rs b/third_party/rust/cranelift-codegen/src/ir/stackslot.rs
new file mode 100644
index 0000000000..13d35d37b9
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/stackslot.rs
@@ -0,0 +1,443 @@
+//! Stack slots.
+//!
+//! The `StackSlotData` struct keeps track of a single stack slot in a function.
+//!
+
+use crate::entity::{Iter, IterMut, Keys, PrimaryMap};
+use crate::ir::{StackSlot, Type};
+use crate::packed_option::PackedOption;
+use alloc::vec::Vec;
+use core::cmp;
+use core::fmt;
+use core::ops::{Index, IndexMut};
+use core::slice;
+use core::str::FromStr;
+
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+/// The size of an object on the stack, or the size of a stack frame.
+///
+/// We don't use `usize` to represent object sizes on the target platform because Cranelift supports
+/// cross-compilation, and `usize` is a type that depends on the host platform, not the target
+/// platform.
+pub type StackSize = u32;
+
+/// A stack offset.
+///
+/// The location of a stack offset relative to a stack pointer or frame pointer.
+pub type StackOffset = i32;
+
+/// The minimum size of a spill slot in bytes.
+///
+/// ISA implementations are allowed to assume that small types like `b1` and `i8` get a full 4-byte
+/// spill slot.
+const MIN_SPILL_SLOT_SIZE: StackSize = 4;
+
+/// Get the spill slot size to use for `ty`.
+fn spill_size(ty: Type) -> StackSize {
+ cmp::max(MIN_SPILL_SLOT_SIZE, ty.bytes())
+}
+
+/// The kind of a stack slot.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub enum StackSlotKind {
+ /// A spill slot. This is a stack slot created by the register allocator.
+ SpillSlot,
+
+ /// An explicit stack slot. This is a chunk of stack memory for use by the `stack_load`
+ /// and `stack_store` instructions.
+ ExplicitSlot,
+
+ /// An incoming function argument.
+ ///
+ /// If the current function has more arguments than fits in registers, the remaining arguments
+ /// are passed on the stack by the caller. These incoming arguments are represented as SSA
+ /// values assigned to incoming stack slots.
+ IncomingArg,
+
+ /// An outgoing function argument.
+ ///
+ /// When preparing to call a function whose arguments don't fit in registers, outgoing argument
+ /// stack slots are used to represent individual arguments in the outgoing call frame. These
+ /// stack slots are only valid while setting up a call.
+ OutgoingArg,
+
+ /// Space allocated in the caller's frame for the callee's return values
+ /// that are passed out via return pointer.
+ ///
+ /// If there are more return values than registers available for the callee's calling
+ /// convention, or the return value is larger than the available registers' space, then we
+ /// allocate stack space in this frame and pass a pointer to the callee, which then writes its
+ /// return values into this space.
+ StructReturnSlot,
+
+ /// An emergency spill slot.
+ ///
+ /// Emergency slots are allocated late when the register's constraint solver needs extra space
+ /// to shuffle registers around. They are only used briefly, and can be reused.
+ EmergencySlot,
+}
+
+impl FromStr for StackSlotKind {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, ()> {
+ use self::StackSlotKind::*;
+ match s {
+ "explicit_slot" => Ok(ExplicitSlot),
+ "spill_slot" => Ok(SpillSlot),
+ "incoming_arg" => Ok(IncomingArg),
+ "outgoing_arg" => Ok(OutgoingArg),
+ "sret_slot" => Ok(StructReturnSlot),
+ "emergency_slot" => Ok(EmergencySlot),
+ _ => Err(()),
+ }
+ }
+}
+
+impl fmt::Display for StackSlotKind {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ use self::StackSlotKind::*;
+ f.write_str(match *self {
+ ExplicitSlot => "explicit_slot",
+ SpillSlot => "spill_slot",
+ IncomingArg => "incoming_arg",
+ OutgoingArg => "outgoing_arg",
+ StructReturnSlot => "sret_slot",
+ EmergencySlot => "emergency_slot",
+ })
+ }
+}
+
+/// Contents of a stack slot.
+#[derive(Clone, Debug, PartialEq, Eq)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct StackSlotData {
+ /// The kind of stack slot.
+ pub kind: StackSlotKind,
+
+ /// Size of stack slot in bytes.
+ pub size: StackSize,
+
+ /// Offset of stack slot relative to the stack pointer in the caller.
+ ///
+ /// On x86, the base address is the stack pointer *before* the return address was pushed. On
+ /// RISC ISAs, the base address is the value of the stack pointer on entry to the function.
+ ///
+ /// For `OutgoingArg` stack slots, the offset is relative to the current function's stack
+ /// pointer immediately before the call.
+ pub offset: Option<StackOffset>,
+}
+
+impl StackSlotData {
+ /// Create a stack slot with the specified byte size.
+ pub fn new(kind: StackSlotKind, size: StackSize) -> Self {
+ Self {
+ kind,
+ size,
+ offset: None,
+ }
+ }
+
+ /// Get the alignment in bytes of this stack slot given the stack pointer alignment.
+ pub fn alignment(&self, max_align: StackSize) -> StackSize {
+ debug_assert!(max_align.is_power_of_two());
+ // We want to find the largest power of two that divides both `self.size` and `max_align`.
+ // That is the same as isolating the rightmost bit in `x`.
+ let x = self.size | max_align;
+ // C.f. Hacker's delight.
+ x & x.wrapping_neg()
+ }
+}
+
+impl fmt::Display for StackSlotData {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{} {}", self.kind, self.size)?;
+ if let Some(offset) = self.offset {
+ write!(f, ", offset {}", offset)?;
+ }
+ Ok(())
+ }
+}
+
+/// Stack frame layout information.
+///
+/// This is computed by the `layout_stack()` method.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct StackLayoutInfo {
+ /// The total size of the stack frame.
+ ///
+ /// This is the distance from the stack pointer in the current function to the stack pointer in
+ /// the calling function, so it includes a pushed return address as well as space for outgoing
+ /// call arguments.
+ pub frame_size: StackSize,
+
+ /// The total size of the stack frame for inbound arguments pushed by the caller.
+ pub inbound_args_size: StackSize,
+}
+
+/// Stack frame manager.
+///
+/// Keep track of all the stack slots used by a function.
+#[derive(Clone, Debug, PartialEq, Eq, Default)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct StackSlots {
+ /// All allocated stack slots.
+ slots: PrimaryMap<StackSlot, StackSlotData>,
+
+ /// All the outgoing stack slots, ordered by offset.
+ outgoing: Vec<StackSlot>,
+
+ /// All the emergency slots.
+ emergency: Vec<StackSlot>,
+
+ /// Layout information computed from `layout_stack`.
+ pub layout_info: Option<StackLayoutInfo>,
+}
+
+/// Stack slot manager functions that behave mostly like an entity map.
+impl StackSlots {
+ /// Create an empty stack slot manager.
+ pub fn new() -> Self {
+ StackSlots::default()
+ }
+
+ /// Clear out everything.
+ pub fn clear(&mut self) {
+ self.slots.clear();
+ self.outgoing.clear();
+ self.emergency.clear();
+ self.layout_info = None;
+ }
+
+ /// Allocate a new stack slot.
+ ///
+ /// This function should be primarily used by the text format parser. There are more convenient
+ /// functions for creating specific kinds of stack slots below.
+ pub fn push(&mut self, data: StackSlotData) -> StackSlot {
+ self.slots.push(data)
+ }
+
+ /// Check if `ss` is a valid stack slot reference.
+ pub fn is_valid(&self, ss: StackSlot) -> bool {
+ self.slots.is_valid(ss)
+ }
+
+ /// Get an iterator over all the stack slot keys.
+ pub fn iter(&self) -> Iter<StackSlot, StackSlotData> {
+ self.slots.iter()
+ }
+
+ /// Get an iterator over all the stack slot keys, mutable edition.
+ pub fn iter_mut(&mut self) -> IterMut<StackSlot, StackSlotData> {
+ self.slots.iter_mut()
+ }
+
+ /// Get an iterator over all the stack slot records.
+ pub fn values(&self) -> slice::Iter<StackSlotData> {
+ self.slots.values()
+ }
+
+ /// Get an iterator over all the stack slot records, mutable edition.
+ pub fn values_mut(&mut self) -> slice::IterMut<StackSlotData> {
+ self.slots.values_mut()
+ }
+
+ /// Get an iterator over all the stack slot keys.
+ pub fn keys(&self) -> Keys<StackSlot> {
+ self.slots.keys()
+ }
+
+ /// Get a reference to the next stack slot that would be created by `push()`.
+ ///
+ /// This should just be used by the parser.
+ pub fn next_key(&self) -> StackSlot {
+ self.slots.next_key()
+ }
+}
+
+impl Index<StackSlot> for StackSlots {
+ type Output = StackSlotData;
+
+ fn index(&self, ss: StackSlot) -> &StackSlotData {
+ &self.slots[ss]
+ }
+}
+
+impl IndexMut<StackSlot> for StackSlots {
+ fn index_mut(&mut self, ss: StackSlot) -> &mut StackSlotData {
+ &mut self.slots[ss]
+ }
+}
+
+/// Higher-level stack frame manipulation functions.
+impl StackSlots {
+ /// Create a new spill slot for spilling values of type `ty`.
+ pub fn make_spill_slot(&mut self, ty: Type) -> StackSlot {
+ self.push(StackSlotData::new(StackSlotKind::SpillSlot, spill_size(ty)))
+ }
+
+ /// Create a stack slot representing an incoming function argument.
+ pub fn make_incoming_arg(&mut self, size: u32, offset: StackOffset) -> StackSlot {
+ let mut data = StackSlotData::new(StackSlotKind::IncomingArg, size);
+ debug_assert!(offset <= StackOffset::max_value() - data.size as StackOffset);
+ data.offset = Some(offset);
+ self.push(data)
+ }
+
+ /// Get a stack slot representing an outgoing argument.
+ ///
+ /// This may create a new stack slot, or reuse an existing outgoing stack slot with the
+ /// requested offset and size.
+ ///
+ /// The requested offset is relative to this function's stack pointer immediately before making
+ /// the call.
+ pub fn get_outgoing_arg(&mut self, size: u32, offset: StackOffset) -> StackSlot {
+ // Look for an existing outgoing stack slot with the same offset and size.
+ let inspos = match self.outgoing.binary_search_by_key(&(offset, size), |&ss| {
+ (self[ss].offset.unwrap(), self[ss].size)
+ }) {
+ Ok(idx) => return self.outgoing[idx],
+ Err(idx) => idx,
+ };
+
+ // No existing slot found. Make one and insert it into `outgoing`.
+ let mut data = StackSlotData::new(StackSlotKind::OutgoingArg, size);
+ debug_assert!(offset <= StackOffset::max_value() - size as StackOffset);
+ data.offset = Some(offset);
+ let ss = self.slots.push(data);
+ self.outgoing.insert(inspos, ss);
+ ss
+ }
+
+ /// Get an emergency spill slot that can be used to store a `ty` value.
+ ///
+ /// This may allocate a new slot, or it may reuse an existing emergency spill slot, excluding
+ /// any slots in the `in_use` list.
+ pub fn get_emergency_slot(
+ &mut self,
+ ty: Type,
+ in_use: &[PackedOption<StackSlot>],
+ ) -> StackSlot {
+ let size = spill_size(ty);
+
+ // Find the smallest existing slot that can fit the type.
+ if let Some(&ss) = self
+ .emergency
+ .iter()
+ .filter(|&&ss| self[ss].size >= size && !in_use.contains(&ss.into()))
+ .min_by_key(|&&ss| self[ss].size)
+ {
+ return ss;
+ }
+
+ // Alternatively, use the largest available slot and make it larger.
+ if let Some(&ss) = self
+ .emergency
+ .iter()
+ .filter(|&&ss| !in_use.contains(&ss.into()))
+ .max_by_key(|&&ss| self[ss].size)
+ {
+ self.slots[ss].size = size;
+ return ss;
+ }
+
+ // No existing slot found. Make one and insert it into `emergency`.
+ let data = StackSlotData::new(StackSlotKind::EmergencySlot, size);
+ let ss = self.slots.push(data);
+ self.emergency.push(ss);
+ ss
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::ir::types;
+ use crate::ir::Function;
+ use alloc::string::ToString;
+
+ #[test]
+ fn stack_slot() {
+ let mut func = Function::new();
+
+ let ss0 = func.create_stack_slot(StackSlotData::new(StackSlotKind::IncomingArg, 4));
+ let ss1 = func.create_stack_slot(StackSlotData::new(StackSlotKind::SpillSlot, 8));
+ assert_eq!(ss0.to_string(), "ss0");
+ assert_eq!(ss1.to_string(), "ss1");
+
+ assert_eq!(func.stack_slots[ss0].size, 4);
+ assert_eq!(func.stack_slots[ss1].size, 8);
+
+ assert_eq!(func.stack_slots[ss0].to_string(), "incoming_arg 4");
+ assert_eq!(func.stack_slots[ss1].to_string(), "spill_slot 8");
+ }
+
+ #[test]
+ fn outgoing() {
+ let mut sss = StackSlots::new();
+
+ let ss0 = sss.get_outgoing_arg(4, 8);
+ let ss1 = sss.get_outgoing_arg(4, 4);
+ let ss2 = sss.get_outgoing_arg(8, 8);
+
+ assert_eq!(sss[ss0].offset, Some(8));
+ assert_eq!(sss[ss0].size, 4);
+
+ assert_eq!(sss[ss1].offset, Some(4));
+ assert_eq!(sss[ss1].size, 4);
+
+ assert_eq!(sss[ss2].offset, Some(8));
+ assert_eq!(sss[ss2].size, 8);
+
+ assert_eq!(sss.get_outgoing_arg(4, 8), ss0);
+ assert_eq!(sss.get_outgoing_arg(4, 4), ss1);
+ assert_eq!(sss.get_outgoing_arg(8, 8), ss2);
+ }
+
+ #[test]
+ fn alignment() {
+ let slot = StackSlotData::new(StackSlotKind::SpillSlot, 8);
+
+ assert_eq!(slot.alignment(4), 4);
+ assert_eq!(slot.alignment(8), 8);
+ assert_eq!(slot.alignment(16), 8);
+
+ let slot2 = StackSlotData::new(StackSlotKind::ExplicitSlot, 24);
+
+ assert_eq!(slot2.alignment(4), 4);
+ assert_eq!(slot2.alignment(8), 8);
+ assert_eq!(slot2.alignment(16), 8);
+ assert_eq!(slot2.alignment(32), 8);
+ }
+
+ #[test]
+ fn emergency() {
+ let mut sss = StackSlots::new();
+
+ let ss0 = sss.get_emergency_slot(types::I32, &[]);
+ assert_eq!(sss[ss0].size, 4);
+
+ // When a smaller size is requested, we should simply get the same slot back.
+ assert_eq!(sss.get_emergency_slot(types::I8, &[]), ss0);
+ assert_eq!(sss[ss0].size, 4);
+ assert_eq!(sss.get_emergency_slot(types::F32, &[]), ss0);
+ assert_eq!(sss[ss0].size, 4);
+
+ // Ask for a larger size and the slot should grow.
+ assert_eq!(sss.get_emergency_slot(types::F64, &[]), ss0);
+ assert_eq!(sss[ss0].size, 8);
+
+ // When one slot is in use, we should get a new one.
+ let ss1 = sss.get_emergency_slot(types::I32, &[None.into(), ss0.into()]);
+ assert_eq!(sss[ss0].size, 8);
+ assert_eq!(sss[ss1].size, 4);
+
+ // Now we should get the smallest fit of the two available slots.
+ assert_eq!(sss.get_emergency_slot(types::F32, &[]), ss1);
+ assert_eq!(sss.get_emergency_slot(types::F64, &[]), ss0);
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/table.rs b/third_party/rust/cranelift-codegen/src/ir/table.rs
new file mode 100644
index 0000000000..9e436cca64
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/table.rs
@@ -0,0 +1,36 @@
+//! Tables.
+
+use crate::ir::immediates::Uimm64;
+use crate::ir::{GlobalValue, Type};
+use core::fmt;
+
+/// Information about a table declaration.
+#[derive(Clone)]
+pub struct TableData {
+ /// Global value giving the address of the start of the table.
+ pub base_gv: GlobalValue,
+
+ /// Guaranteed minimum table size in elements. Table accesses before `min_size` don't need
+ /// bounds checking.
+ pub min_size: Uimm64,
+
+ /// Global value giving the current bound of the table, in elements.
+ pub bound_gv: GlobalValue,
+
+ /// The size of a table element, in bytes.
+ pub element_size: Uimm64,
+
+ /// The index type for the table.
+ pub index_type: Type,
+}
+
+impl fmt::Display for TableData {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_str("dynamic")?;
+ write!(
+ f,
+ " {}, min {}, bound {}, element_size {}, index_type {}",
+ self.base_gv, self.min_size, self.bound_gv, self.element_size, self.index_type
+ )
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/trapcode.rs b/third_party/rust/cranelift-codegen/src/ir/trapcode.rs
new file mode 100644
index 0000000000..3114114f6d
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/trapcode.rs
@@ -0,0 +1,134 @@
+//! Trap codes describing the reason for a trap.
+
+use core::fmt::{self, Display, Formatter};
+use core::str::FromStr;
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+/// A trap code describing the reason for a trap.
+///
+/// All trap instructions have an explicit trap code.
+#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub enum TrapCode {
+ /// The current stack space was exhausted.
+ StackOverflow,
+
+ /// A `heap_addr` instruction detected an out-of-bounds error.
+ ///
+ /// Note that not all out-of-bounds heap accesses are reported this way;
+ /// some are detected by a segmentation fault on the heap unmapped or
+ /// offset-guard pages.
+ HeapOutOfBounds,
+
+ /// A wasm atomic operation was presented with a not-naturally-aligned linear-memory address.
+ HeapMisaligned,
+
+ /// A `table_addr` instruction detected an out-of-bounds error.
+ TableOutOfBounds,
+
+ /// Indirect call to a null table entry.
+ IndirectCallToNull,
+
+ /// Signature mismatch on indirect call.
+ BadSignature,
+
+ /// An integer arithmetic operation caused an overflow.
+ IntegerOverflow,
+
+ /// An integer division by zero.
+ IntegerDivisionByZero,
+
+ /// Failed float-to-int conversion.
+ BadConversionToInteger,
+
+ /// Code that was supposed to have been unreachable was reached.
+ UnreachableCodeReached,
+
+ /// Execution has potentially run too long and may be interrupted.
+ /// This trap is resumable.
+ Interrupt,
+
+ /// A user-defined trap code.
+ User(u16),
+}
+
+impl Display for TrapCode {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ use self::TrapCode::*;
+ let identifier = match *self {
+ StackOverflow => "stk_ovf",
+ HeapOutOfBounds => "heap_oob",
+ HeapMisaligned => "heap_misaligned",
+ TableOutOfBounds => "table_oob",
+ IndirectCallToNull => "icall_null",
+ BadSignature => "bad_sig",
+ IntegerOverflow => "int_ovf",
+ IntegerDivisionByZero => "int_divz",
+ BadConversionToInteger => "bad_toint",
+ UnreachableCodeReached => "unreachable",
+ Interrupt => "interrupt",
+ User(x) => return write!(f, "user{}", x),
+ };
+ f.write_str(identifier)
+ }
+}
+
+impl FromStr for TrapCode {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ use self::TrapCode::*;
+ match s {
+ "stk_ovf" => Ok(StackOverflow),
+ "heap_oob" => Ok(HeapOutOfBounds),
+ "heap_misaligned" => Ok(HeapMisaligned),
+ "table_oob" => Ok(TableOutOfBounds),
+ "icall_null" => Ok(IndirectCallToNull),
+ "bad_sig" => Ok(BadSignature),
+ "int_ovf" => Ok(IntegerOverflow),
+ "int_divz" => Ok(IntegerDivisionByZero),
+ "bad_toint" => Ok(BadConversionToInteger),
+ "unreachable" => Ok(UnreachableCodeReached),
+ "interrupt" => Ok(Interrupt),
+ _ if s.starts_with("user") => s[4..].parse().map(User).map_err(|_| ()),
+ _ => Err(()),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloc::string::ToString;
+
+ // Everything but user-defined codes.
+ const CODES: [TrapCode; 11] = [
+ TrapCode::StackOverflow,
+ TrapCode::HeapOutOfBounds,
+ TrapCode::HeapMisaligned,
+ TrapCode::TableOutOfBounds,
+ TrapCode::IndirectCallToNull,
+ TrapCode::BadSignature,
+ TrapCode::IntegerOverflow,
+ TrapCode::IntegerDivisionByZero,
+ TrapCode::BadConversionToInteger,
+ TrapCode::UnreachableCodeReached,
+ TrapCode::Interrupt,
+ ];
+
+ #[test]
+ fn display() {
+ for r in &CODES {
+ let tc = *r;
+ assert_eq!(tc.to_string().parse(), Ok(tc));
+ }
+ assert_eq!("bogus".parse::<TrapCode>(), Err(()));
+
+ assert_eq!(TrapCode::User(17).to_string(), "user17");
+ assert_eq!("user22".parse(), Ok(TrapCode::User(22)));
+ assert_eq!("user".parse::<TrapCode>(), Err(()));
+ assert_eq!("user-1".parse::<TrapCode>(), Err(()));
+ assert_eq!("users".parse::<TrapCode>(), Err(()));
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/types.rs b/third_party/rust/cranelift-codegen/src/ir/types.rs
new file mode 100644
index 0000000000..b9499435b8
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/types.rs
@@ -0,0 +1,533 @@
+//! Common types for the Cranelift code generator.
+
+use core::default::Default;
+use core::fmt::{self, Debug, Display, Formatter};
+use cranelift_codegen_shared::constants;
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+use target_lexicon::{PointerWidth, Triple};
+
+/// The type of an SSA value.
+///
+/// The `INVALID` type isn't a real type, and is used as a placeholder in the IR where a type
+/// field is present put no type is needed, such as the controlling type variable for a
+/// non-polymorphic instruction.
+///
+/// Basic integer types: `I8`, `I16`, `I32`, `I64`, and `I128`. These types are sign-agnostic.
+///
+/// Basic floating point types: `F32` and `F64`. IEEE single and double precision.
+///
+/// Boolean types: `B1`, `B8`, `B16`, `B32`, `B64`, and `B128`. These all encode 'true' or 'false'. The
+/// larger types use redundant bits.
+///
+/// SIMD vector types have power-of-two lanes, up to 256. Lanes can be any int/float/bool type.
+///
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub struct Type(u8);
+
+/// Not a valid type. Can't be loaded or stored. Can't be part of a SIMD vector.
+pub const INVALID: Type = Type(0);
+
+// Include code generated by `cranelift-codegen/meta/gen_types.rs`. This file contains constant
+// definitions for all the scalar types as well as common vector types for 64, 128, 256, and
+// 512-bit SIMD vectors.
+include!(concat!(env!("OUT_DIR"), "/types.rs"));
+
+impl Type {
+ /// Get the lane type of this SIMD vector type.
+ ///
+ /// A lane type is the same as a SIMD vector type with one lane, so it returns itself.
+ pub fn lane_type(self) -> Self {
+ if self.0 < constants::VECTOR_BASE {
+ self
+ } else {
+ Self(constants::LANE_BASE | (self.0 & 0x0f))
+ }
+ }
+
+ /// The type transformation that returns the lane type of a type variable; it is just a
+ /// renaming of lane_type() to be used in context where we think in terms of type variable
+ /// transformations.
+ pub fn lane_of(self) -> Self {
+ self.lane_type()
+ }
+
+ /// Get log_2 of the number of bits in a lane.
+ pub fn log2_lane_bits(self) -> u8 {
+ match self.lane_type() {
+ B1 => 0,
+ B8 | I8 => 3,
+ B16 | I16 => 4,
+ B32 | I32 | F32 | R32 => 5,
+ B64 | I64 | F64 | R64 => 6,
+ B128 | I128 => 7,
+ _ => 0,
+ }
+ }
+
+ /// Get the number of bits in a lane.
+ pub fn lane_bits(self) -> u8 {
+ match self.lane_type() {
+ B1 => 1,
+ B8 | I8 => 8,
+ B16 | I16 => 16,
+ B32 | I32 | F32 | R32 => 32,
+ B64 | I64 | F64 | R64 => 64,
+ B128 | I128 => 128,
+ _ => 0,
+ }
+ }
+
+ /// Get an integer type with the requested number of bits.
+ pub fn int(bits: u16) -> Option<Self> {
+ match bits {
+ 8 => Some(I8),
+ 16 => Some(I16),
+ 32 => Some(I32),
+ 64 => Some(I64),
+ 128 => Some(I128),
+ _ => None,
+ }
+ }
+
+ /// Get a type with the same number of lanes as `self`, but using `lane` as the lane type.
+ fn replace_lanes(self, lane: Self) -> Self {
+ debug_assert!(lane.is_lane() && !self.is_special());
+ Self((lane.0 & 0x0f) | (self.0 & 0xf0))
+ }
+
+ /// Get a type with the same number of lanes as this type, but with the lanes replaced by
+ /// booleans of the same size.
+ ///
+ /// Lane types are treated as vectors with one lane, so they are converted to the multi-bit
+ /// boolean types.
+ pub fn as_bool_pedantic(self) -> Self {
+ // Replace the low 4 bits with the boolean version, preserve the high 4 bits.
+ self.replace_lanes(match self.lane_type() {
+ B8 | I8 => B8,
+ B16 | I16 => B16,
+ B32 | I32 | F32 => B32,
+ B64 | I64 | F64 => B64,
+ R32 | R64 => panic!("Reference types should not convert to bool"),
+ B128 | I128 => B128,
+ _ => B1,
+ })
+ }
+
+ /// Get a type with the same number of lanes as this type, but with the lanes replaced by
+ /// booleans of the same size.
+ ///
+ /// Scalar types are all converted to `b1` which is usually what you want.
+ pub fn as_bool(self) -> Self {
+ if !self.is_vector() {
+ B1
+ } else {
+ self.as_bool_pedantic()
+ }
+ }
+
+ /// Get a type with the same number of lanes as this type, but with lanes that are half the
+ /// number of bits.
+ pub fn half_width(self) -> Option<Self> {
+ Some(self.replace_lanes(match self.lane_type() {
+ I16 => I8,
+ I32 => I16,
+ I64 => I32,
+ I128 => I64,
+ F64 => F32,
+ B16 => B8,
+ B32 => B16,
+ B64 => B32,
+ B128 => B64,
+ _ => return None,
+ }))
+ }
+
+ /// Get a type with the same number of lanes as this type, but with lanes that are twice the
+ /// number of bits.
+ pub fn double_width(self) -> Option<Self> {
+ Some(self.replace_lanes(match self.lane_type() {
+ I8 => I16,
+ I16 => I32,
+ I32 => I64,
+ I64 => I128,
+ F32 => F64,
+ B8 => B16,
+ B16 => B32,
+ B32 => B64,
+ B64 => B128,
+ _ => return None,
+ }))
+ }
+
+ /// Is this the INVALID type?
+ pub fn is_invalid(self) -> bool {
+ self == INVALID
+ }
+
+ /// Is this a special type?
+ pub fn is_special(self) -> bool {
+ self.0 < constants::LANE_BASE
+ }
+
+ /// Is this a lane type?
+ ///
+ /// This is a scalar type that can also appear as the lane type of a SIMD vector.
+ pub fn is_lane(self) -> bool {
+ constants::LANE_BASE <= self.0 && self.0 < constants::VECTOR_BASE
+ }
+
+ /// Is this a SIMD vector type?
+ ///
+ /// A vector type has 2 or more lanes.
+ pub fn is_vector(self) -> bool {
+ self.0 >= constants::VECTOR_BASE
+ }
+
+ /// Is this a scalar boolean type?
+ pub fn is_bool(self) -> bool {
+ match self {
+ B1 | B8 | B16 | B32 | B64 | B128 => true,
+ _ => false,
+ }
+ }
+
+ /// Is this a scalar integer type?
+ pub fn is_int(self) -> bool {
+ match self {
+ I8 | I16 | I32 | I64 | I128 => true,
+ _ => false,
+ }
+ }
+
+ /// Is this a scalar floating point type?
+ pub fn is_float(self) -> bool {
+ match self {
+ F32 | F64 => true,
+ _ => false,
+ }
+ }
+
+ /// Is this a CPU flags type?
+ pub fn is_flags(self) -> bool {
+ match self {
+ IFLAGS | FFLAGS => true,
+ _ => false,
+ }
+ }
+
+ /// Is this a ref type?
+ pub fn is_ref(self) -> bool {
+ match self {
+ R32 | R64 => true,
+ _ => false,
+ }
+ }
+
+ /// Get log_2 of the number of lanes in this SIMD vector type.
+ ///
+ /// All SIMD types have a lane count that is a power of two and no larger than 256, so this
+ /// will be a number in the range 0-8.
+ ///
+ /// A scalar type is the same as a SIMD vector type with one lane, so it returns 0.
+ pub fn log2_lane_count(self) -> u8 {
+ self.0.saturating_sub(constants::LANE_BASE) >> 4
+ }
+
+ /// Get the number of lanes in this SIMD vector type.
+ ///
+ /// A scalar type is the same as a SIMD vector type with one lane, so it returns 1.
+ pub fn lane_count(self) -> u16 {
+ 1 << self.log2_lane_count()
+ }
+
+ /// Get the total number of bits used to represent this type.
+ pub fn bits(self) -> u16 {
+ u16::from(self.lane_bits()) * self.lane_count()
+ }
+
+ /// Get the number of bytes used to store this type in memory.
+ pub fn bytes(self) -> u32 {
+ (u32::from(self.bits()) + 7) / 8
+ }
+
+ /// Get a SIMD vector type with `n` times more lanes than this one.
+ ///
+ /// If this is a scalar type, this produces a SIMD type with this as a lane type and `n` lanes.
+ ///
+ /// If this is already a SIMD vector type, this produces a SIMD vector type with `n *
+ /// self.lane_count()` lanes.
+ pub fn by(self, n: u16) -> Option<Self> {
+ if self.lane_bits() == 0 || !n.is_power_of_two() {
+ return None;
+ }
+ let log2_lanes: u32 = n.trailing_zeros();
+ let new_type = u32::from(self.0) + (log2_lanes << 4);
+ if new_type < 0x100 {
+ Some(Self(new_type as u8))
+ } else {
+ None
+ }
+ }
+
+ /// Get a SIMD vector with half the number of lanes.
+ ///
+ /// There is no `double_vector()` method. Use `t.by(2)` instead.
+ pub fn half_vector(self) -> Option<Self> {
+ if self.is_vector() {
+ Some(Self(self.0 - 0x10))
+ } else {
+ None
+ }
+ }
+
+ /// Split the lane width in half and double the number of lanes to maintain the same bit-width.
+ ///
+ /// If this is a scalar type of `n` bits, it produces a SIMD vector type of `(n/2)x2`.
+ pub fn split_lanes(self) -> Option<Self> {
+ match self.half_width() {
+ Some(half_width) => half_width.by(2),
+ None => None,
+ }
+ }
+
+ /// Merge lanes to half the number of lanes and double the lane width to maintain the same
+ /// bit-width.
+ ///
+ /// If this is a scalar type, it will return `None`.
+ pub fn merge_lanes(self) -> Option<Self> {
+ match self.double_width() {
+ Some(double_width) => double_width.half_vector(),
+ None => None,
+ }
+ }
+
+ /// Index of this type, for use with hash tables etc.
+ pub fn index(self) -> usize {
+ usize::from(self.0)
+ }
+
+ /// True iff:
+ ///
+ /// 1. `self.lane_count() == other.lane_count()` and
+ /// 2. `self.lane_bits() >= other.lane_bits()`
+ pub fn wider_or_equal(self, other: Self) -> bool {
+ self.lane_count() == other.lane_count() && self.lane_bits() >= other.lane_bits()
+ }
+
+ /// Return the pointer type for the given target triple.
+ pub fn triple_pointer_type(triple: &Triple) -> Self {
+ match triple.pointer_width() {
+ Ok(PointerWidth::U16) => I16,
+ Ok(PointerWidth::U32) => I32,
+ Ok(PointerWidth::U64) => I64,
+ Err(()) => panic!("unable to determine architecture pointer width"),
+ }
+ }
+}
+
+impl Display for Type {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ if self.is_bool() {
+ write!(f, "b{}", self.lane_bits())
+ } else if self.is_int() {
+ write!(f, "i{}", self.lane_bits())
+ } else if self.is_float() {
+ write!(f, "f{}", self.lane_bits())
+ } else if self.is_vector() {
+ write!(f, "{}x{}", self.lane_type(), self.lane_count())
+ } else if self.is_ref() {
+ write!(f, "r{}", self.lane_bits())
+ } else {
+ f.write_str(match *self {
+ IFLAGS => "iflags",
+ FFLAGS => "fflags",
+ SARG_T => "sarg_t",
+ INVALID => panic!("INVALID encountered"),
+ _ => panic!("Unknown Type(0x{:x})", self.0),
+ })
+ }
+ }
+}
+
+impl Debug for Type {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ if self.is_bool() {
+ write!(f, "types::B{}", self.lane_bits())
+ } else if self.is_int() {
+ write!(f, "types::I{}", self.lane_bits())
+ } else if self.is_float() {
+ write!(f, "types::F{}", self.lane_bits())
+ } else if self.is_vector() {
+ write!(f, "{:?}X{}", self.lane_type(), self.lane_count())
+ } else if self.is_ref() {
+ write!(f, "types::R{}", self.lane_bits())
+ } else {
+ match *self {
+ INVALID => write!(f, "types::INVALID"),
+ IFLAGS => write!(f, "types::IFLAGS"),
+ FFLAGS => write!(f, "types::FFLAGS"),
+ _ => write!(f, "Type(0x{:x})", self.0),
+ }
+ }
+ }
+}
+
+impl Default for Type {
+ fn default() -> Self {
+ INVALID
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloc::string::ToString;
+
+ #[test]
+ fn basic_scalars() {
+ assert_eq!(INVALID, INVALID.lane_type());
+ assert_eq!(0, INVALID.bits());
+ assert_eq!(IFLAGS, IFLAGS.lane_type());
+ assert_eq!(0, IFLAGS.bits());
+ assert_eq!(FFLAGS, FFLAGS.lane_type());
+ assert_eq!(0, FFLAGS.bits());
+ assert_eq!(B1, B1.lane_type());
+ assert_eq!(B8, B8.lane_type());
+ assert_eq!(B16, B16.lane_type());
+ assert_eq!(B32, B32.lane_type());
+ assert_eq!(B64, B64.lane_type());
+ assert_eq!(B128, B128.lane_type());
+ assert_eq!(I8, I8.lane_type());
+ assert_eq!(I16, I16.lane_type());
+ assert_eq!(I32, I32.lane_type());
+ assert_eq!(I64, I64.lane_type());
+ assert_eq!(I128, I128.lane_type());
+ assert_eq!(F32, F32.lane_type());
+ assert_eq!(F64, F64.lane_type());
+ assert_eq!(B1, B1.by(8).unwrap().lane_type());
+ assert_eq!(I32, I32X4.lane_type());
+ assert_eq!(F64, F64X2.lane_type());
+ assert_eq!(R32, R32.lane_type());
+ assert_eq!(R64, R64.lane_type());
+
+ assert_eq!(INVALID.lane_bits(), 0);
+ assert_eq!(IFLAGS.lane_bits(), 0);
+ assert_eq!(FFLAGS.lane_bits(), 0);
+ assert_eq!(B1.lane_bits(), 1);
+ assert_eq!(B8.lane_bits(), 8);
+ assert_eq!(B16.lane_bits(), 16);
+ assert_eq!(B32.lane_bits(), 32);
+ assert_eq!(B64.lane_bits(), 64);
+ assert_eq!(B128.lane_bits(), 128);
+ assert_eq!(I8.lane_bits(), 8);
+ assert_eq!(I16.lane_bits(), 16);
+ assert_eq!(I32.lane_bits(), 32);
+ assert_eq!(I64.lane_bits(), 64);
+ assert_eq!(I128.lane_bits(), 128);
+ assert_eq!(F32.lane_bits(), 32);
+ assert_eq!(F64.lane_bits(), 64);
+ assert_eq!(R32.lane_bits(), 32);
+ assert_eq!(R64.lane_bits(), 64);
+ }
+
+ #[test]
+ fn typevar_functions() {
+ assert_eq!(INVALID.half_width(), None);
+ assert_eq!(INVALID.half_width(), None);
+ assert_eq!(FFLAGS.half_width(), None);
+ assert_eq!(B1.half_width(), None);
+ assert_eq!(B8.half_width(), None);
+ assert_eq!(B16.half_width(), Some(B8));
+ assert_eq!(B32.half_width(), Some(B16));
+ assert_eq!(B64.half_width(), Some(B32));
+ assert_eq!(B128.half_width(), Some(B64));
+ assert_eq!(I8.half_width(), None);
+ assert_eq!(I16.half_width(), Some(I8));
+ assert_eq!(I32.half_width(), Some(I16));
+ assert_eq!(I32X4.half_width(), Some(I16X4));
+ assert_eq!(I64.half_width(), Some(I32));
+ assert_eq!(I128.half_width(), Some(I64));
+ assert_eq!(F32.half_width(), None);
+ assert_eq!(F64.half_width(), Some(F32));
+
+ assert_eq!(INVALID.double_width(), None);
+ assert_eq!(IFLAGS.double_width(), None);
+ assert_eq!(FFLAGS.double_width(), None);
+ assert_eq!(B1.double_width(), None);
+ assert_eq!(B8.double_width(), Some(B16));
+ assert_eq!(B16.double_width(), Some(B32));
+ assert_eq!(B32.double_width(), Some(B64));
+ assert_eq!(B64.double_width(), Some(B128));
+ assert_eq!(B128.double_width(), None);
+ assert_eq!(I8.double_width(), Some(I16));
+ assert_eq!(I16.double_width(), Some(I32));
+ assert_eq!(I32.double_width(), Some(I64));
+ assert_eq!(I32X4.double_width(), Some(I64X4));
+ assert_eq!(I64.double_width(), Some(I128));
+ assert_eq!(I128.double_width(), None);
+ assert_eq!(F32.double_width(), Some(F64));
+ assert_eq!(F64.double_width(), None);
+ }
+
+ #[test]
+ fn vectors() {
+ let big = F64.by(256).unwrap();
+ assert_eq!(big.lane_bits(), 64);
+ assert_eq!(big.lane_count(), 256);
+ assert_eq!(big.bits(), 64 * 256);
+
+ assert_eq!(big.half_vector().unwrap().to_string(), "f64x128");
+ assert_eq!(B1.by(2).unwrap().half_vector().unwrap().to_string(), "b1");
+ assert_eq!(I32.half_vector(), None);
+ assert_eq!(INVALID.half_vector(), None);
+
+ // Check that the generated constants match the computed vector types.
+ assert_eq!(I32.by(4), Some(I32X4));
+ assert_eq!(F64.by(8), Some(F64X8));
+ }
+
+ #[test]
+ fn format_scalars() {
+ assert_eq!(IFLAGS.to_string(), "iflags");
+ assert_eq!(FFLAGS.to_string(), "fflags");
+ assert_eq!(B1.to_string(), "b1");
+ assert_eq!(B8.to_string(), "b8");
+ assert_eq!(B16.to_string(), "b16");
+ assert_eq!(B32.to_string(), "b32");
+ assert_eq!(B64.to_string(), "b64");
+ assert_eq!(B128.to_string(), "b128");
+ assert_eq!(I8.to_string(), "i8");
+ assert_eq!(I16.to_string(), "i16");
+ assert_eq!(I32.to_string(), "i32");
+ assert_eq!(I64.to_string(), "i64");
+ assert_eq!(I128.to_string(), "i128");
+ assert_eq!(F32.to_string(), "f32");
+ assert_eq!(F64.to_string(), "f64");
+ assert_eq!(R32.to_string(), "r32");
+ assert_eq!(R64.to_string(), "r64");
+ }
+
+ #[test]
+ fn format_vectors() {
+ assert_eq!(B1.by(8).unwrap().to_string(), "b1x8");
+ assert_eq!(B8.by(1).unwrap().to_string(), "b8");
+ assert_eq!(B16.by(256).unwrap().to_string(), "b16x256");
+ assert_eq!(B32.by(4).unwrap().by(2).unwrap().to_string(), "b32x8");
+ assert_eq!(B64.by(8).unwrap().to_string(), "b64x8");
+ assert_eq!(I8.by(64).unwrap().to_string(), "i8x64");
+ assert_eq!(F64.by(2).unwrap().to_string(), "f64x2");
+ assert_eq!(I8.by(3), None);
+ assert_eq!(I8.by(512), None);
+ assert_eq!(INVALID.by(4), None);
+ }
+
+ #[test]
+ fn as_bool() {
+ assert_eq!(I32X4.as_bool(), B32X4);
+ assert_eq!(I32.as_bool(), B1);
+ assert_eq!(I32X4.as_bool_pedantic(), B32X4);
+ assert_eq!(I32.as_bool_pedantic(), B32);
+ }
+}
diff --git a/third_party/rust/cranelift-codegen/src/ir/valueloc.rs b/third_party/rust/cranelift-codegen/src/ir/valueloc.rs
new file mode 100644
index 0000000000..d0b924886a
--- /dev/null
+++ b/third_party/rust/cranelift-codegen/src/ir/valueloc.rs
@@ -0,0 +1,166 @@
+//! Value locations.
+//!
+//! The register allocator assigns every SSA value to either a register or a stack slot. This
+//! assignment is represented by a `ValueLoc` object.
+
+use crate::ir::StackSlot;
+use crate::isa::{RegInfo, RegUnit};
+use core::fmt;
+
+#[cfg(feature = "enable-serde")]
+use serde::{Deserialize, Serialize};
+
+/// Value location.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub enum ValueLoc {
+ /// This value has not been assigned to a location yet.
+ Unassigned,
+ /// Value is assigned to a register.
+ Reg(RegUnit),
+ /// Value is assigned to a stack slot.
+ Stack(StackSlot),
+}
+
+impl Default for ValueLoc {
+ fn default() -> Self {
+ Self::Unassigned
+ }
+}
+
+impl ValueLoc {
+ /// Is this an assigned location? (That is, not `Unassigned`).
+ pub fn is_assigned(self) -> bool {
+ match self {
+ Self::Unassigned => false,
+ _ => true,
+ }
+ }
+
+ /// Get the register unit of this location, or panic.
+ pub fn unwrap_reg(self) -> RegUnit {
+ match self {
+ Self::Reg(ru) => ru,
+ _ => panic!("unwrap_reg expected register, found {:?}", self),
+ }
+ }
+
+ /// Get the stack slot of this location, or panic.
+ pub fn unwrap_stack(self) -> StackSlot {
+ match self {
+ Self::Stack(ss) => ss,
+ _ => panic!("unwrap_stack expected stack slot, found {:?}", self),
+ }
+ }
+
+ /// Return an object that can display this value location, using the register info from the
+ /// target ISA.
+ pub fn display<'a, R: Into<Option<&'a RegInfo>>>(self, regs: R) -> DisplayValueLoc<'a> {
+ DisplayValueLoc(self, regs.into())
+ }
+}
+
+/// Displaying a `ValueLoc` correctly requires the associated `RegInfo` from the target ISA.
+/// Without the register info, register units are simply show as numbers.
+///
+/// The `DisplayValueLoc` type can display the contained `ValueLoc`.
+pub struct DisplayValueLoc<'a>(ValueLoc, Option<&'a RegInfo>);
+
+impl<'a> fmt::Display for DisplayValueLoc<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self.0 {
+ ValueLoc::Unassigned => write!(f, "-"),
+ ValueLoc::Reg(ru) => match self.1 {
+ Some(regs) => write!(f, "{}", regs.display_regunit(ru)),
+ None => write!(f, "%{}", ru),
+ },
+ ValueLoc::Stack(ss) => write!(f, "{}", ss),
+ }
+ }
+}
+
+/// Function argument location.
+///
+/// The ABI specifies how arguments are passed to a function, and where return values appear after
+/// the call. Just like a `ValueLoc`, function arguments can be passed in registers or on the
+/// stack.
+///
+/// Function arguments on the stack are accessed differently for the incoming arguments to the
+/// current function and the outgoing arguments to a called external function. For this reason,
+/// the location of stack arguments is described as an offset into the array of function arguments
+/// on the stack.
+///
+/// An `ArgumentLoc` can be translated to a `ValueLoc` only when we know if we're talking about an
+/// incoming argument or an outgoing argument.
+///
+/// - For stack arguments, different `StackSlot` entities are used to represent incoming and
+/// outgoing arguments.
+/// - For register arguments, there is usually no difference, but if we ever add support for a
+/// register-window ISA like SPARC, register arguments would also need to be translated.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
+#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
+pub enum ArgumentLoc {
+ /// This argument has not been assigned to a location yet.
+ Unassigned,
+ /// Argument is passed in a register.
+ Reg(RegUnit),
+ /// Argument is passed on the stack, at the given byte offset into the argument array.
+ Stack(i32),
+}
+
+impl Default for ArgumentLoc {
+ fn default() -> Self {
+ Self::Unassigned
+ }
+}
+
+impl ArgumentLoc {
+ /// Is this an assigned location? (That is, not `Unassigned`).
+ pub fn is_assigned(self) -> bool {
+ match self {
+ Self::Unassigned => false,
+ _ => true,
+ }
+ }
+
+ /// Is this a register location?
+ pub fn is_reg(self) -> bool {
+ match self {
+ Self::Reg(_) => true,
+ _ => false,
+ }
+ }
+
+ /// Is this a stack location?
+ pub fn is_stack(self) -> bool {
+ match self {
+ Self::Stack(_) => true,
+ _ => false,
+ }
+ }
+
+ /// Return an object that can display this argument location, using the register info from the
+ /// target ISA.
+ pub fn display<'a, R: Into<Option<&'a RegInfo>>>(self, regs: R) -> DisplayArgumentLoc<'a> {
+ DisplayArgumentLoc(self, regs.into())
+ }
+}
+
+/// Displaying a `ArgumentLoc` correctly requires the associated `RegInfo` from the target ISA.
+/// Without the register info, register units are simply show as numbers.
+///
+/// The `DisplayArgumentLoc` type can display the contained `ArgumentLoc`.
+pub struct DisplayArgumentLoc<'a>(ArgumentLoc, Option<&'a RegInfo>);
+
+impl<'a> fmt::Display for DisplayArgumentLoc<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self.0 {
+ ArgumentLoc::Unassigned => write!(f, "-"),
+ ArgumentLoc::Reg(ru) => match self.1 {
+ Some(regs) => write!(f, "{}", regs.display_regunit(ru)),
+ None => write!(f, "%{}", ru),
+ },
+ ArgumentLoc::Stack(offset) => write!(f, "{}", offset),
+ }
+ }
+}