summaryrefslogtreecommitdiffstats
path: root/third_party/rust/cranelift-frontend
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/cranelift-frontend
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/cranelift-frontend')
-rw-r--r--third_party/rust/cranelift-frontend/.cargo-checksum.json1
-rw-r--r--third_party/rust/cranelift-frontend/Cargo.toml26
-rw-r--r--third_party/rust/cranelift-frontend/LICENSE220
-rw-r--r--third_party/rust/cranelift-frontend/README.md5
-rw-r--r--third_party/rust/cranelift-frontend/src/frontend.rs1315
-rw-r--r--third_party/rust/cranelift-frontend/src/lib.rs206
-rw-r--r--third_party/rust/cranelift-frontend/src/ssa.rs1366
-rw-r--r--third_party/rust/cranelift-frontend/src/switch.rs661
-rw-r--r--third_party/rust/cranelift-frontend/src/variable.rs35
9 files changed, 3835 insertions, 0 deletions
diff --git a/third_party/rust/cranelift-frontend/.cargo-checksum.json b/third_party/rust/cranelift-frontend/.cargo-checksum.json
new file mode 100644
index 0000000000..ec23b113df
--- /dev/null
+++ b/third_party/rust/cranelift-frontend/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"efedd48411232c30fe72899fca348ca08a8cc83b373163376ee1780994608d8f","LICENSE":"268872b9816f90fd8e85db5a28d33f8150ebb8dd016653fb39ef1f94f2686bc5","README.md":"dea43e8044284df50f8b8772e9b48ba8b109b45c74111ff73619775d57ad8d67","src/frontend.rs":"53e108a04ead6890ee78f674b9ed6862443c66dd85ec214e4c27e0da3f67228e","src/lib.rs":"e757197479cc26732f5f872ffe40f60fdc376c748e1c16e9b42976fef51a2161","src/ssa.rs":"650d26025706cfb63935f956bca6f166b0edfa32260cd2a8c27f9b49fcc743c3","src/switch.rs":"73f9058a899d2b19ed7135e028cc6005c6e3016f8530619519eac9627cb1383e","src/variable.rs":"399437bd7d2ac11a7a748bad7dd1f6dac58824d374ec318f36367a9d077cc225"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/cranelift-frontend/Cargo.toml b/third_party/rust/cranelift-frontend/Cargo.toml
new file mode 100644
index 0000000000..d92665fc89
--- /dev/null
+++ b/third_party/rust/cranelift-frontend/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+authors = ["The Cranelift Project Developers"]
+name = "cranelift-frontend"
+version = "0.68.0"
+description = "Cranelift IR builder helper"
+license = "Apache-2.0 WITH LLVM-exception"
+documentation = "https://docs.rs/cranelift-frontend"
+categories = ["no-std"]
+repository = "https://github.com/bytecodealliance/wasmtime"
+readme = "README.md"
+edition = "2018"
+
+[dependencies]
+cranelift-codegen = { path = "../codegen", version = "0.68.0", default-features = false }
+target-lexicon = "0.11"
+log = { version = "0.4.6", default-features = false }
+hashbrown = { version = "0.9.1", optional = true }
+smallvec = { version = "1.0.0" }
+
+[features]
+default = ["std"]
+std = ["cranelift-codegen/std"]
+core = ["hashbrown", "cranelift-codegen/core"]
+
+[badges]
+maintenance = { status = "experimental" }
diff --git a/third_party/rust/cranelift-frontend/LICENSE b/third_party/rust/cranelift-frontend/LICENSE
new file mode 100644
index 0000000000..f9d81955f4
--- /dev/null
+++ b/third_party/rust/cranelift-frontend/LICENSE
@@ -0,0 +1,220 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+--- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
diff --git a/third_party/rust/cranelift-frontend/README.md b/third_party/rust/cranelift-frontend/README.md
new file mode 100644
index 0000000000..e43ad48f45
--- /dev/null
+++ b/third_party/rust/cranelift-frontend/README.md
@@ -0,0 +1,5 @@
+This crate provides a straightforward way to create a
+[Cranelift](https://crates.io/crates/cranelift) IR function and fill it with
+instructions translated from another language. It contains an SSA construction
+module that provides convenient methods for translating non-SSA variables into
+SSA Cranelift IR values via `use_var` and `def_var` calls.
diff --git a/third_party/rust/cranelift-frontend/src/frontend.rs b/third_party/rust/cranelift-frontend/src/frontend.rs
new file mode 100644
index 0000000000..3b9263301a
--- /dev/null
+++ b/third_party/rust/cranelift-frontend/src/frontend.rs
@@ -0,0 +1,1315 @@
+//! A frontend for building Cranelift IR from other languages.
+use crate::ssa::{SSABuilder, SideEffects};
+use crate::variable::Variable;
+use cranelift_codegen::cursor::{Cursor, FuncCursor};
+use cranelift_codegen::entity::{EntitySet, SecondaryMap};
+use cranelift_codegen::ir;
+use cranelift_codegen::ir::function::DisplayFunction;
+use cranelift_codegen::ir::{
+ types, AbiParam, Block, DataFlowGraph, ExtFuncData, ExternalName, FuncRef, Function,
+ GlobalValue, GlobalValueData, Heap, HeapData, Inst, InstBuilder, InstBuilderBase,
+ InstructionData, JumpTable, JumpTableData, LibCall, MemFlags, SigRef, Signature, StackSlot,
+ StackSlotData, Type, Value, ValueLabel, ValueLabelAssignments, ValueLabelStart,
+};
+use cranelift_codegen::isa::{TargetFrontendConfig, TargetIsa};
+use cranelift_codegen::packed_option::PackedOption;
+
+/// Structure used for translating a series of functions into Cranelift IR.
+///
+/// In order to reduce memory reallocations when compiling multiple functions,
+/// `FunctionBuilderContext` holds various data structures which are cleared between
+/// functions, rather than dropped, preserving the underlying allocations.
+pub struct FunctionBuilderContext {
+ ssa: SSABuilder,
+ blocks: SecondaryMap<Block, BlockData>,
+ types: SecondaryMap<Variable, Type>,
+}
+
+/// Temporary object used to build a single Cranelift IR `Function`.
+pub struct FunctionBuilder<'a> {
+ /// The function currently being built.
+ /// This field is public so the function can be re-borrowed.
+ pub func: &'a mut Function,
+
+ /// Source location to assign to all new instructions.
+ srcloc: ir::SourceLoc,
+
+ func_ctx: &'a mut FunctionBuilderContext,
+ position: PackedOption<Block>,
+}
+
+#[derive(Clone, Default)]
+struct BlockData {
+ /// A Block is "pristine" iff no instructions have been added since the last
+ /// call to `switch_to_block()`.
+ pristine: bool,
+
+ /// A Block is "filled" iff a terminator instruction has been inserted since
+ /// the last call to `switch_to_block()`.
+ ///
+ /// A filled block cannot be pristine.
+ filled: bool,
+
+ /// Count of parameters not supplied implicitly by the SSABuilder.
+ user_param_count: usize,
+}
+
+impl FunctionBuilderContext {
+ /// Creates a FunctionBuilderContext structure. The structure is automatically cleared after
+ /// each [`FunctionBuilder`](struct.FunctionBuilder.html) completes translating a function.
+ pub fn new() -> Self {
+ Self {
+ ssa: SSABuilder::new(),
+ blocks: SecondaryMap::new(),
+ types: SecondaryMap::new(),
+ }
+ }
+
+ fn clear(&mut self) {
+ self.ssa.clear();
+ self.blocks.clear();
+ self.types.clear();
+ }
+
+ fn is_empty(&self) -> bool {
+ self.ssa.is_empty() && self.blocks.is_empty() && self.types.is_empty()
+ }
+}
+
+/// Implementation of the [`InstBuilder`](cranelift_codegen::ir::InstBuilder) that has
+/// one convenience method per Cranelift IR instruction.
+pub struct FuncInstBuilder<'short, 'long: 'short> {
+ builder: &'short mut FunctionBuilder<'long>,
+ block: Block,
+}
+
+impl<'short, 'long> FuncInstBuilder<'short, 'long> {
+ fn new(builder: &'short mut FunctionBuilder<'long>, block: Block) -> Self {
+ Self { builder, block }
+ }
+}
+
+impl<'short, 'long> InstBuilderBase<'short> for FuncInstBuilder<'short, 'long> {
+ fn data_flow_graph(&self) -> &DataFlowGraph {
+ &self.builder.func.dfg
+ }
+
+ fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph {
+ &mut self.builder.func.dfg
+ }
+
+ // This implementation is richer than `InsertBuilder` because we use the data of the
+ // instruction being inserted to add related info to the DFG and the SSA building system,
+ // and perform debug sanity checks.
+ fn build(self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'short mut DataFlowGraph) {
+ // We only insert the Block in the layout when an instruction is added to it
+ self.builder.ensure_inserted_block();
+
+ let inst = self.builder.func.dfg.make_inst(data.clone());
+ self.builder.func.dfg.make_inst_results(inst, ctrl_typevar);
+ self.builder.func.layout.append_inst(inst, self.block);
+ if !self.builder.srcloc.is_default() {
+ self.builder.func.srclocs[inst] = self.builder.srcloc;
+ }
+
+ if data.opcode().is_branch() {
+ match data.branch_destination() {
+ Some(dest_block) => {
+ // If the user has supplied jump arguments we must adapt the arguments of
+ // the destination block
+ self.builder.declare_successor(dest_block, inst);
+ }
+ None => {
+ // branch_destination() doesn't detect jump_tables
+ // If jump table we declare all entries successor
+ if let InstructionData::BranchTable {
+ table, destination, ..
+ } = data
+ {
+ // Unlike all other jumps/branches, jump tables are
+ // capable of having the same successor appear
+ // multiple times, so we must deduplicate.
+ let mut unique = EntitySet::<Block>::new();
+ for dest_block in self
+ .builder
+ .func
+ .jump_tables
+ .get(table)
+ .expect("you are referencing an undeclared jump table")
+ .iter()
+ .filter(|&dest_block| unique.insert(*dest_block))
+ {
+ // Call `declare_block_predecessor` instead of `declare_successor` for
+ // avoiding the borrow checker.
+ self.builder.func_ctx.ssa.declare_block_predecessor(
+ *dest_block,
+ self.builder.position.unwrap(),
+ inst,
+ );
+ }
+ self.builder.declare_successor(destination, inst);
+ }
+ }
+ }
+ }
+
+ if data.opcode().is_terminator() {
+ self.builder.fill_current_block()
+ }
+ (inst, &mut self.builder.func.dfg)
+ }
+}
+
+/// This module allows you to create a function in Cranelift IR in a straightforward way, hiding
+/// all the complexity of its internal representation.
+///
+/// The module is parametrized by one type which is the representation of variables in your
+/// origin language. It offers a way to conveniently append instruction to your program flow.
+/// You are responsible to split your instruction flow into extended blocks (declared with
+/// `create_block`) whose properties are:
+///
+/// - branch and jump instructions can only point at the top of extended blocks;
+/// - the last instruction of each block is a terminator instruction which has no natural successor,
+/// and those instructions can only appear at the end of extended blocks.
+///
+/// The parameters of Cranelift IR instructions are Cranelift IR values, which can only be created
+/// as results of other Cranelift IR instructions. To be able to create variables redefined multiple
+/// times in your program, use the `def_var` and `use_var` command, that will maintain the
+/// correspondence between your variables and Cranelift IR SSA values.
+///
+/// The first block for which you call `switch_to_block` will be assumed to be the beginning of
+/// the function.
+///
+/// At creation, a `FunctionBuilder` instance borrows an already allocated `Function` which it
+/// modifies with the information stored in the mutable borrowed
+/// [`FunctionBuilderContext`](struct.FunctionBuilderContext.html). The function passed in
+/// argument should be newly created with
+/// [`Function::with_name_signature()`](Function::with_name_signature), whereas the
+/// `FunctionBuilderContext` can be kept as is between two function translations.
+///
+/// # Errors
+///
+/// The functions below will panic in debug mode whenever you try to modify the Cranelift IR
+/// function in a way that violate the coherence of the code. For instance: switching to a new
+/// `Block` when you haven't filled the current one with a terminator instruction, inserting a
+/// return instruction with arguments that don't match the function's signature.
+impl<'a> FunctionBuilder<'a> {
+ /// Creates a new FunctionBuilder structure that will operate on a `Function` using a
+ /// `FunctionBuilderContext`.
+ pub fn new(func: &'a mut Function, func_ctx: &'a mut FunctionBuilderContext) -> Self {
+ debug_assert!(func_ctx.is_empty());
+ Self {
+ func,
+ srcloc: Default::default(),
+ func_ctx,
+ position: Default::default(),
+ }
+ }
+
+ /// Get the block that this builder is currently at.
+ pub fn current_block(&self) -> Option<Block> {
+ self.position.expand()
+ }
+
+ /// Set the source location that should be assigned to all new instructions.
+ pub fn set_srcloc(&mut self, srcloc: ir::SourceLoc) {
+ self.srcloc = srcloc;
+ }
+
+ /// Creates a new `Block` and returns its reference.
+ pub fn create_block(&mut self) -> Block {
+ let block = self.func.dfg.make_block();
+ self.func_ctx.ssa.declare_block(block);
+ self.func_ctx.blocks[block] = BlockData {
+ filled: false,
+ pristine: true,
+ user_param_count: 0,
+ };
+ block
+ }
+
+ /// Insert `block` in the layout *after* the existing block `after`.
+ pub fn insert_block_after(&mut self, block: Block, after: Block) {
+ self.func.layout.insert_block_after(block, after);
+ }
+
+ /// After the call to this function, new instructions will be inserted into the designated
+ /// block, in the order they are declared. You must declare the types of the Block arguments
+ /// you will use here.
+ ///
+ /// When inserting the terminator instruction (which doesn't have a fallthrough to its immediate
+ /// successor), the block will be declared filled and it will not be possible to append
+ /// instructions to it.
+ pub fn switch_to_block(&mut self, block: Block) {
+ // First we check that the previous block has been filled.
+ debug_assert!(
+ self.position.is_none()
+ || self.is_unreachable()
+ || self.is_pristine()
+ || self.is_filled(),
+ "you have to fill your block before switching"
+ );
+ // We cannot switch to a filled block
+ debug_assert!(
+ !self.func_ctx.blocks[block].filled,
+ "you cannot switch to a block which is already filled"
+ );
+
+ // Then we change the cursor position.
+ self.position = PackedOption::from(block);
+ }
+
+ /// Declares that all the predecessors of this block are known.
+ ///
+ /// Function to call with `block` as soon as the last branch instruction to `block` has been
+ /// created. Forgetting to call this method on every block will cause inconsistencies in the
+ /// produced functions.
+ pub fn seal_block(&mut self, block: Block) {
+ let side_effects = self.func_ctx.ssa.seal_block(block, self.func);
+ self.handle_ssa_side_effects(side_effects);
+ }
+
+ /// Effectively calls seal_block on all unsealed blocks in the function.
+ ///
+ /// It's more efficient to seal `Block`s as soon as possible, during
+ /// translation, but for frontends where this is impractical to do, this
+ /// function can be used at the end of translating all blocks to ensure
+ /// that everything is sealed.
+ pub fn seal_all_blocks(&mut self) {
+ let side_effects = self.func_ctx.ssa.seal_all_blocks(self.func);
+ self.handle_ssa_side_effects(side_effects);
+ }
+
+ /// In order to use a variable in a `use_var`, you need to declare its type with this method.
+ pub fn declare_var(&mut self, var: Variable, ty: Type) {
+ debug_assert_eq!(
+ self.func_ctx.types[var],
+ types::INVALID,
+ "variable {:?} is declared twice",
+ var
+ );
+ self.func_ctx.types[var] = ty;
+ }
+
+ /// Returns the Cranelift IR value corresponding to the utilization at the current program
+ /// position of a previously defined user variable.
+ pub fn use_var(&mut self, var: Variable) -> Value {
+ let (val, side_effects) = {
+ let ty = *self.func_ctx.types.get(var).unwrap_or_else(|| {
+ panic!(
+ "variable {:?} is used but its type has not been declared",
+ var
+ )
+ });
+ debug_assert_ne!(
+ ty,
+ types::INVALID,
+ "variable {:?} is used but its type has not been declared",
+ var
+ );
+ self.func_ctx
+ .ssa
+ .use_var(self.func, var, ty, self.position.unwrap())
+ };
+ self.handle_ssa_side_effects(side_effects);
+ val
+ }
+
+ /// Register a new definition of a user variable. The type of the value must be
+ /// the same as the type registered for the variable.
+ pub fn def_var(&mut self, var: Variable, val: Value) {
+ debug_assert_eq!(
+ *self.func_ctx.types.get(var).unwrap_or_else(|| panic!(
+ "variable {:?} is used but its type has not been declared",
+ var
+ )),
+ self.func.dfg.value_type(val),
+ "declared type of variable {:?} doesn't match type of value {}",
+ var,
+ val
+ );
+
+ self.func_ctx.ssa.def_var(var, val, self.position.unwrap());
+ }
+
+ /// Set label for Value
+ ///
+ /// This will not do anything unless `func.dfg.collect_debug_info` is called first.
+ pub fn set_val_label(&mut self, val: Value, label: ValueLabel) {
+ if let Some(values_labels) = self.func.dfg.values_labels.as_mut() {
+ use crate::hash_map::Entry;
+
+ let start = ValueLabelStart {
+ from: self.srcloc,
+ label,
+ };
+
+ match values_labels.entry(val) {
+ Entry::Occupied(mut e) => match e.get_mut() {
+ ValueLabelAssignments::Starts(starts) => starts.push(start),
+ _ => panic!("Unexpected ValueLabelAssignments at this stage"),
+ },
+ Entry::Vacant(e) => {
+ e.insert(ValueLabelAssignments::Starts(vec![start]));
+ }
+ }
+ }
+ }
+
+ /// Creates a jump table in the function, to be used by `br_table` instructions.
+ pub fn create_jump_table(&mut self, data: JumpTableData) -> JumpTable {
+ self.func.create_jump_table(data)
+ }
+
+ /// Creates a stack slot in the function, to be used by `stack_load`, `stack_store` and
+ /// `stack_addr` instructions.
+ pub fn create_stack_slot(&mut self, data: StackSlotData) -> StackSlot {
+ self.func.create_stack_slot(data)
+ }
+
+ /// Adds a signature which can later be used to declare an external function import.
+ pub fn import_signature(&mut self, signature: Signature) -> SigRef {
+ self.func.import_signature(signature)
+ }
+
+ /// Declare an external function import.
+ pub fn import_function(&mut self, data: ExtFuncData) -> FuncRef {
+ self.func.import_function(data)
+ }
+
+ /// Declares a global value accessible to the function.
+ pub fn create_global_value(&mut self, data: GlobalValueData) -> GlobalValue {
+ self.func.create_global_value(data)
+ }
+
+ /// Declares a heap accessible to the function.
+ pub fn create_heap(&mut self, data: HeapData) -> Heap {
+ self.func.create_heap(data)
+ }
+
+ /// Returns an object with the [`InstBuilder`](cranelift_codegen::ir::InstBuilder)
+ /// trait that allows to conveniently append an instruction to the current `Block` being built.
+ pub fn ins<'short>(&'short mut self) -> FuncInstBuilder<'short, 'a> {
+ let block = self
+ .position
+ .expect("Please call switch_to_block before inserting instructions");
+ FuncInstBuilder::new(self, block)
+ }
+
+ /// Make sure that the current block is inserted in the layout.
+ pub fn ensure_inserted_block(&mut self) {
+ let block = self.position.unwrap();
+ if self.func_ctx.blocks[block].pristine {
+ if !self.func.layout.is_block_inserted(block) {
+ self.func.layout.append_block(block);
+ }
+ self.func_ctx.blocks[block].pristine = false;
+ } else {
+ debug_assert!(
+ !self.func_ctx.blocks[block].filled,
+ "you cannot add an instruction to a block already filled"
+ );
+ }
+ }
+
+ /// Returns a `FuncCursor` pointed at the current position ready for inserting instructions.
+ ///
+ /// This can be used to insert SSA code that doesn't need to access locals and that doesn't
+ /// need to know about `FunctionBuilder` at all.
+ pub fn cursor(&mut self) -> FuncCursor {
+ self.ensure_inserted_block();
+ FuncCursor::new(self.func)
+ .with_srcloc(self.srcloc)
+ .at_bottom(self.position.unwrap())
+ }
+
+ /// Append parameters to the given `Block` corresponding to the function
+ /// parameters. This can be used to set up the block parameters for the
+ /// entry block.
+ pub fn append_block_params_for_function_params(&mut self, block: Block) {
+ debug_assert!(
+ !self.func_ctx.ssa.has_any_predecessors(block),
+ "block parameters for function parameters should only be added to the entry block"
+ );
+
+ // These parameters count as "user" parameters here because they aren't
+ // inserted by the SSABuilder.
+ let user_param_count = &mut self.func_ctx.blocks[block].user_param_count;
+ for argtyp in &self.func.signature.params {
+ *user_param_count += 1;
+ self.func.dfg.append_block_param(block, argtyp.value_type);
+ }
+ }
+
+ /// Append parameters to the given `Block` corresponding to the function
+ /// return values. This can be used to set up the block parameters for a
+ /// function exit block.
+ pub fn append_block_params_for_function_returns(&mut self, block: Block) {
+ // These parameters count as "user" parameters here because they aren't
+ // inserted by the SSABuilder.
+ let user_param_count = &mut self.func_ctx.blocks[block].user_param_count;
+ for argtyp in &self.func.signature.returns {
+ *user_param_count += 1;
+ self.func.dfg.append_block_param(block, argtyp.value_type);
+ }
+ }
+
+ /// Declare that translation of the current function is complete. This
+ /// resets the state of the `FunctionBuilder` in preparation to be used
+ /// for another function.
+ pub fn finalize(&mut self) {
+ // Check that all the `Block`s are filled and sealed.
+ debug_assert!(
+ self.func_ctx.blocks.iter().all(
+ |(block, block_data)| block_data.pristine || self.func_ctx.ssa.is_sealed(block)
+ ),
+ "all blocks should be sealed before dropping a FunctionBuilder"
+ );
+ debug_assert!(
+ self.func_ctx
+ .blocks
+ .values()
+ .all(|block_data| block_data.pristine || block_data.filled),
+ "all blocks should be filled before dropping a FunctionBuilder"
+ );
+
+ // In debug mode, check that all blocks are valid basic blocks.
+ #[cfg(debug_assertions)]
+ {
+ // Iterate manually to provide more helpful error messages.
+ for block in self.func_ctx.blocks.keys() {
+ if let Err((inst, _msg)) = self.func.is_block_basic(block) {
+ let inst_str = self.func.dfg.display_inst(inst, None);
+ panic!("{} failed basic block invariants on {}", block, inst_str);
+ }
+ }
+ }
+
+ // Clear the state (but preserve the allocated buffers) in preparation
+ // for translation another function.
+ self.func_ctx.clear();
+
+ // Reset srcloc and position to initial states.
+ self.srcloc = Default::default();
+ self.position = Default::default();
+ }
+}
+
+/// All the functions documented in the previous block are write-only and help you build a valid
+/// Cranelift IR functions via multiple debug asserts. However, you might need to improve the
+/// performance of your translation perform more complex transformations to your Cranelift IR
+/// function. The functions below help you inspect the function you're creating and modify it
+/// in ways that can be unsafe if used incorrectly.
+impl<'a> FunctionBuilder<'a> {
+ /// Retrieves all the parameters for a `Block` currently inferred from the jump instructions
+ /// inserted that target it and the SSA construction.
+ pub fn block_params(&self, block: Block) -> &[Value] {
+ self.func.dfg.block_params(block)
+ }
+
+ /// Retrieves the signature with reference `sigref` previously added with `import_signature`.
+ pub fn signature(&self, sigref: SigRef) -> Option<&Signature> {
+ self.func.dfg.signatures.get(sigref)
+ }
+
+ /// Creates a parameter for a specific `Block` by appending it to the list of already existing
+ /// parameters.
+ ///
+ /// **Note:** this function has to be called at the creation of the `Block` before adding
+ /// instructions to it, otherwise this could interfere with SSA construction.
+ pub fn append_block_param(&mut self, block: Block, ty: Type) -> Value {
+ debug_assert!(
+ self.func_ctx.blocks[block].pristine,
+ "You can't add block parameters after adding any instruction"
+ );
+ debug_assert_eq!(
+ self.func_ctx.blocks[block].user_param_count,
+ self.func.dfg.num_block_params(block)
+ );
+ self.func_ctx.blocks[block].user_param_count += 1;
+ self.func.dfg.append_block_param(block, ty)
+ }
+
+ /// Returns the result values of an instruction.
+ pub fn inst_results(&self, inst: Inst) -> &[Value] {
+ self.func.dfg.inst_results(inst)
+ }
+
+ /// Changes the destination of a jump instruction after creation.
+ ///
+ /// **Note:** You are responsible for maintaining the coherence with the arguments of
+ /// other jump instructions.
+ pub fn change_jump_destination(&mut self, inst: Inst, new_dest: Block) {
+ let old_dest = self.func.dfg[inst]
+ .branch_destination_mut()
+ .expect("you want to change the jump destination of a non-jump instruction");
+ let pred = self.func_ctx.ssa.remove_block_predecessor(*old_dest, inst);
+ *old_dest = new_dest;
+ self.func_ctx
+ .ssa
+ .declare_block_predecessor(new_dest, pred, inst);
+ }
+
+ /// Returns `true` if and only if the current `Block` is sealed and has no predecessors declared.
+ ///
+ /// The entry block of a function is never unreachable.
+ pub fn is_unreachable(&self) -> bool {
+ let is_entry = match self.func.layout.entry_block() {
+ None => false,
+ Some(entry) => self.position.unwrap() == entry,
+ };
+ !is_entry
+ && self.func_ctx.ssa.is_sealed(self.position.unwrap())
+ && !self
+ .func_ctx
+ .ssa
+ .has_any_predecessors(self.position.unwrap())
+ }
+
+ /// Returns `true` if and only if no instructions have been added since the last call to
+ /// `switch_to_block`.
+ pub fn is_pristine(&self) -> bool {
+ self.func_ctx.blocks[self.position.unwrap()].pristine
+ }
+
+ /// Returns `true` if and only if a terminator instruction has been inserted since the
+ /// last call to `switch_to_block`.
+ pub fn is_filled(&self) -> bool {
+ self.func_ctx.blocks[self.position.unwrap()].filled
+ }
+
+ /// Returns a displayable object for the function as it is.
+ ///
+ /// Useful for debug purposes. Use it with `None` for standard printing.
+ // Clippy thinks the lifetime that follows is needless, but rustc needs it
+ #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_lifetimes))]
+ pub fn display<'b, I: Into<Option<&'b dyn TargetIsa>>>(&'b self, isa: I) -> DisplayFunction {
+ self.func.display(isa)
+ }
+}
+
+/// Helper functions
+impl<'a> FunctionBuilder<'a> {
+ /// Calls libc.memcpy
+ ///
+ /// Copies the `size` bytes from `src` to `dest`, assumes that `src + size`
+ /// won't overlap onto `dest`. If `dest` and `src` overlap, the behavior is
+ /// undefined. Applications in which `dest` and `src` might overlap should
+ /// use `call_memmove` instead.
+ pub fn call_memcpy(
+ &mut self,
+ config: TargetFrontendConfig,
+ dest: Value,
+ src: Value,
+ size: Value,
+ ) {
+ let pointer_type = config.pointer_type();
+ let signature = {
+ let mut s = Signature::new(config.default_call_conv);
+ s.params.push(AbiParam::new(pointer_type));
+ s.params.push(AbiParam::new(pointer_type));
+ s.params.push(AbiParam::new(pointer_type));
+ self.import_signature(s)
+ };
+
+ let libc_memcpy = self.import_function(ExtFuncData {
+ name: ExternalName::LibCall(LibCall::Memcpy),
+ signature,
+ colocated: false,
+ });
+
+ self.ins().call(libc_memcpy, &[dest, src, size]);
+ }
+
+ /// Optimised memcpy or memmove for small copies.
+ ///
+ /// # Codegen safety
+ ///
+ /// The following properties must hold to prevent UB:
+ ///
+ /// * `src_align` and `dest_align` are an upper-bound on the alignment of `src` respectively `dest`.
+ /// * If `non_overlapping` is true, then this must be correct.
+ pub fn emit_small_memory_copy(
+ &mut self,
+ config: TargetFrontendConfig,
+ dest: Value,
+ src: Value,
+ size: u64,
+ dest_align: u8,
+ src_align: u8,
+ non_overlapping: bool,
+ ) {
+ // Currently the result of guess work, not actual profiling.
+ const THRESHOLD: u64 = 4;
+
+ if size == 0 {
+ return;
+ }
+
+ let access_size = greatest_divisible_power_of_two(size);
+ assert!(
+ access_size.is_power_of_two(),
+ "`size` is not a power of two"
+ );
+ assert!(
+ access_size >= u64::from(::core::cmp::min(src_align, dest_align)),
+ "`size` is smaller than `dest` and `src`'s alignment value."
+ );
+
+ let (access_size, int_type) = if access_size <= 8 {
+ (access_size, Type::int((access_size * 8) as u16).unwrap())
+ } else {
+ (8, types::I64)
+ };
+
+ let load_and_store_amount = size / access_size;
+
+ if load_and_store_amount > THRESHOLD {
+ let size_value = self.ins().iconst(config.pointer_type(), size as i64);
+ if non_overlapping {
+ self.call_memcpy(config, dest, src, size_value);
+ } else {
+ self.call_memmove(config, dest, src, size_value);
+ }
+ return;
+ }
+
+ let mut flags = MemFlags::new();
+ flags.set_aligned();
+
+ // Load all of the memory first. This is necessary in case `dest` overlaps.
+ // It can also improve performance a bit.
+ let registers: smallvec::SmallVec<[_; THRESHOLD as usize]> = (0..load_and_store_amount)
+ .map(|i| {
+ let offset = (access_size * i) as i32;
+ (self.ins().load(int_type, flags, src, offset), offset)
+ })
+ .collect();
+
+ for (value, offset) in registers {
+ self.ins().store(flags, value, dest, offset);
+ }
+ }
+
+ /// Calls libc.memset
+ ///
+ /// Writes `size` bytes of i8 value `ch` to memory starting at `buffer`.
+ pub fn call_memset(
+ &mut self,
+ config: TargetFrontendConfig,
+ buffer: Value,
+ ch: Value,
+ size: Value,
+ ) {
+ let pointer_type = config.pointer_type();
+ let signature = {
+ let mut s = Signature::new(config.default_call_conv);
+ s.params.push(AbiParam::new(pointer_type));
+ s.params.push(AbiParam::new(types::I32));
+ s.params.push(AbiParam::new(pointer_type));
+ self.import_signature(s)
+ };
+
+ let libc_memset = self.import_function(ExtFuncData {
+ name: ExternalName::LibCall(LibCall::Memset),
+ signature,
+ colocated: false,
+ });
+
+ let ch = self.ins().uextend(types::I32, ch);
+ self.ins().call(libc_memset, &[buffer, ch, size]);
+ }
+
+ /// Calls libc.memset
+ ///
+ /// Writes `size` bytes of value `ch` to memory starting at `buffer`.
+ pub fn emit_small_memset(
+ &mut self,
+ config: TargetFrontendConfig,
+ buffer: Value,
+ ch: u8,
+ size: u64,
+ buffer_align: u8,
+ ) {
+ // Currently the result of guess work, not actual profiling.
+ const THRESHOLD: u64 = 4;
+
+ if size == 0 {
+ return;
+ }
+
+ let access_size = greatest_divisible_power_of_two(size);
+ assert!(
+ access_size.is_power_of_two(),
+ "`size` is not a power of two"
+ );
+ assert!(
+ access_size >= u64::from(buffer_align),
+ "`size` is smaller than `dest` and `src`'s alignment value."
+ );
+
+ let (access_size, int_type) = if access_size <= 8 {
+ (access_size, Type::int((access_size * 8) as u16).unwrap())
+ } else {
+ (8, types::I64)
+ };
+
+ let load_and_store_amount = size / access_size;
+
+ if load_and_store_amount > THRESHOLD {
+ let ch = self.ins().iconst(types::I8, i64::from(ch));
+ let size = self.ins().iconst(config.pointer_type(), size as i64);
+ self.call_memset(config, buffer, ch, size);
+ } else {
+ let mut flags = MemFlags::new();
+ flags.set_aligned();
+
+ let ch = u64::from(ch);
+ let raw_value = if int_type == types::I64 {
+ (ch << 32) | (ch << 16) | (ch << 8) | ch
+ } else if int_type == types::I32 {
+ (ch << 16) | (ch << 8) | ch
+ } else if int_type == types::I16 {
+ (ch << 8) | ch
+ } else {
+ assert_eq!(int_type, types::I8);
+ ch
+ };
+
+ let value = self.ins().iconst(int_type, raw_value as i64);
+ for i in 0..load_and_store_amount {
+ let offset = (access_size * i) as i32;
+ self.ins().store(flags, value, buffer, offset);
+ }
+ }
+ }
+
+ /// Calls libc.memmove
+ ///
+ /// Copies `size` bytes from memory starting at `source` to memory starting
+ /// at `dest`. `source` is always read before writing to `dest`.
+ pub fn call_memmove(
+ &mut self,
+ config: TargetFrontendConfig,
+ dest: Value,
+ source: Value,
+ size: Value,
+ ) {
+ let pointer_type = config.pointer_type();
+ let signature = {
+ let mut s = Signature::new(config.default_call_conv);
+ s.params.push(AbiParam::new(pointer_type));
+ s.params.push(AbiParam::new(pointer_type));
+ s.params.push(AbiParam::new(pointer_type));
+ self.import_signature(s)
+ };
+
+ let libc_memmove = self.import_function(ExtFuncData {
+ name: ExternalName::LibCall(LibCall::Memmove),
+ signature,
+ colocated: false,
+ });
+
+ self.ins().call(libc_memmove, &[dest, source, size]);
+ }
+}
+
+fn greatest_divisible_power_of_two(size: u64) -> u64 {
+ (size as i64 & -(size as i64)) as u64
+}
+
+// Helper functions
+impl<'a> FunctionBuilder<'a> {
+ /// A Block is 'filled' when a terminator instruction is present.
+ fn fill_current_block(&mut self) {
+ self.func_ctx.blocks[self.position.unwrap()].filled = true;
+ }
+
+ fn declare_successor(&mut self, dest_block: Block, jump_inst: Inst) {
+ self.func_ctx
+ .ssa
+ .declare_block_predecessor(dest_block, self.position.unwrap(), jump_inst);
+ }
+
+ fn handle_ssa_side_effects(&mut self, side_effects: SideEffects) {
+ for split_block in side_effects.split_blocks_created {
+ self.func_ctx.blocks[split_block].filled = true
+ }
+ for modified_block in side_effects.instructions_added_to_blocks {
+ self.func_ctx.blocks[modified_block].pristine = false
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::greatest_divisible_power_of_two;
+ use crate::frontend::{FunctionBuilder, FunctionBuilderContext};
+ use crate::Variable;
+ use alloc::string::ToString;
+ use cranelift_codegen::entity::EntityRef;
+ use cranelift_codegen::ir::types::*;
+ use cranelift_codegen::ir::{AbiParam, ExternalName, Function, InstBuilder, Signature};
+ use cranelift_codegen::isa::CallConv;
+ use cranelift_codegen::settings;
+ use cranelift_codegen::verifier::verify_function;
+
+ fn sample_function(lazy_seal: bool) {
+ let mut sig = Signature::new(CallConv::SystemV);
+ sig.returns.push(AbiParam::new(I32));
+ sig.params.push(AbiParam::new(I32));
+
+ let mut fn_ctx = FunctionBuilderContext::new();
+ let mut func = Function::with_name_signature(ExternalName::testcase("sample"), sig);
+ {
+ let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx);
+
+ let block0 = builder.create_block();
+ let block1 = builder.create_block();
+ let block2 = builder.create_block();
+ let block3 = builder.create_block();
+ let x = Variable::new(0);
+ let y = Variable::new(1);
+ let z = Variable::new(2);
+ builder.declare_var(x, I32);
+ builder.declare_var(y, I32);
+ builder.declare_var(z, I32);
+ builder.append_block_params_for_function_params(block0);
+
+ builder.switch_to_block(block0);
+ if !lazy_seal {
+ builder.seal_block(block0);
+ }
+ {
+ let tmp = builder.block_params(block0)[0]; // the first function parameter
+ builder.def_var(x, tmp);
+ }
+ {
+ let tmp = builder.ins().iconst(I32, 2);
+ builder.def_var(y, tmp);
+ }
+ {
+ let arg1 = builder.use_var(x);
+ let arg2 = builder.use_var(y);
+ let tmp = builder.ins().iadd(arg1, arg2);
+ builder.def_var(z, tmp);
+ }
+ builder.ins().jump(block1, &[]);
+
+ builder.switch_to_block(block1);
+ {
+ let arg1 = builder.use_var(y);
+ let arg2 = builder.use_var(z);
+ let tmp = builder.ins().iadd(arg1, arg2);
+ builder.def_var(z, tmp);
+ }
+ {
+ let arg = builder.use_var(y);
+ builder.ins().brnz(arg, block3, &[]);
+ }
+ builder.ins().jump(block2, &[]);
+
+ builder.switch_to_block(block2);
+ if !lazy_seal {
+ builder.seal_block(block2);
+ }
+ {
+ let arg1 = builder.use_var(z);
+ let arg2 = builder.use_var(x);
+ let tmp = builder.ins().isub(arg1, arg2);
+ builder.def_var(z, tmp);
+ }
+ {
+ let arg = builder.use_var(y);
+ builder.ins().return_(&[arg]);
+ }
+
+ builder.switch_to_block(block3);
+ if !lazy_seal {
+ builder.seal_block(block3);
+ }
+
+ {
+ let arg1 = builder.use_var(y);
+ let arg2 = builder.use_var(x);
+ let tmp = builder.ins().isub(arg1, arg2);
+ builder.def_var(y, tmp);
+ }
+ builder.ins().jump(block1, &[]);
+ if !lazy_seal {
+ builder.seal_block(block1);
+ }
+
+ if lazy_seal {
+ builder.seal_all_blocks();
+ }
+
+ builder.finalize();
+ }
+
+ let flags = settings::Flags::new(settings::builder());
+ // println!("{}", func.display(None));
+ if let Err(errors) = verify_function(&func, &flags) {
+ panic!("{}\n{}", func.display(None), errors)
+ }
+ }
+
+ #[test]
+ fn sample() {
+ sample_function(false)
+ }
+
+ #[test]
+ fn sample_with_lazy_seal() {
+ sample_function(true)
+ }
+
+ #[test]
+ fn memcpy() {
+ use core::str::FromStr;
+ use cranelift_codegen::{isa, settings};
+
+ let shared_builder = settings::builder();
+ let shared_flags = settings::Flags::new(shared_builder);
+
+ let triple =
+ ::target_lexicon::Triple::from_str("riscv32").expect("Couldn't create riscv32 triple");
+
+ let target = isa::lookup(triple)
+ .ok()
+ .map(|b| b.finish(shared_flags))
+ .expect("This test requires riscv32 support.");
+
+ let mut sig = Signature::new(target.default_call_conv());
+ sig.returns.push(AbiParam::new(I32));
+
+ let mut fn_ctx = FunctionBuilderContext::new();
+ let mut func = Function::with_name_signature(ExternalName::testcase("sample"), sig);
+ {
+ let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx);
+
+ let block0 = builder.create_block();
+ let x = Variable::new(0);
+ let y = Variable::new(1);
+ let z = Variable::new(2);
+ builder.declare_var(x, target.pointer_type());
+ builder.declare_var(y, target.pointer_type());
+ builder.declare_var(z, I32);
+ builder.append_block_params_for_function_params(block0);
+ builder.switch_to_block(block0);
+
+ let src = builder.use_var(x);
+ let dest = builder.use_var(y);
+ let size = builder.use_var(y);
+ builder.call_memcpy(target.frontend_config(), dest, src, size);
+ builder.ins().return_(&[size]);
+
+ builder.seal_all_blocks();
+ builder.finalize();
+ }
+
+ assert_eq!(
+ func.display(None).to_string(),
+ "function %sample() -> i32 system_v {
+ sig0 = (i32, i32, i32) system_v
+ fn0 = %Memcpy sig0
+
+block0:
+ v3 = iconst.i32 0
+ v1 -> v3
+ v2 = iconst.i32 0
+ v0 -> v2
+ call fn0(v1, v0, v1)
+ return v1
+}
+"
+ );
+ }
+
+ #[test]
+ fn small_memcpy() {
+ use core::str::FromStr;
+ use cranelift_codegen::{isa, settings};
+
+ let shared_builder = settings::builder();
+ let shared_flags = settings::Flags::new(shared_builder);
+
+ let triple =
+ ::target_lexicon::Triple::from_str("riscv32").expect("Couldn't create riscv32 triple");
+
+ let target = isa::lookup(triple)
+ .ok()
+ .map(|b| b.finish(shared_flags))
+ .expect("This test requires riscv32 support.");
+
+ let mut sig = Signature::new(target.default_call_conv());
+ sig.returns.push(AbiParam::new(I32));
+
+ let mut fn_ctx = FunctionBuilderContext::new();
+ let mut func = Function::with_name_signature(ExternalName::testcase("sample"), sig);
+ {
+ let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx);
+
+ let block0 = builder.create_block();
+ let x = Variable::new(0);
+ let y = Variable::new(16);
+ builder.declare_var(x, target.pointer_type());
+ builder.declare_var(y, target.pointer_type());
+ builder.append_block_params_for_function_params(block0);
+ builder.switch_to_block(block0);
+
+ let src = builder.use_var(x);
+ let dest = builder.use_var(y);
+ let size = 8;
+ builder.emit_small_memory_copy(target.frontend_config(), dest, src, size, 8, 8, true);
+ builder.ins().return_(&[dest]);
+
+ builder.seal_all_blocks();
+ builder.finalize();
+ }
+
+ assert_eq!(
+ func.display(None).to_string(),
+ "function %sample() -> i32 system_v {
+block0:
+ v4 = iconst.i32 0
+ v1 -> v4
+ v3 = iconst.i32 0
+ v0 -> v3
+ v2 = load.i64 aligned v0
+ store aligned v2, v1
+ return v1
+}
+"
+ );
+ }
+
+ #[test]
+ fn not_so_small_memcpy() {
+ use core::str::FromStr;
+ use cranelift_codegen::{isa, settings};
+
+ let shared_builder = settings::builder();
+ let shared_flags = settings::Flags::new(shared_builder);
+
+ let triple =
+ ::target_lexicon::Triple::from_str("riscv32").expect("Couldn't create riscv32 triple");
+
+ let target = isa::lookup(triple)
+ .ok()
+ .map(|b| b.finish(shared_flags))
+ .expect("This test requires riscv32 support.");
+
+ let mut sig = Signature::new(target.default_call_conv());
+ sig.returns.push(AbiParam::new(I32));
+
+ let mut fn_ctx = FunctionBuilderContext::new();
+ let mut func = Function::with_name_signature(ExternalName::testcase("sample"), sig);
+ {
+ let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx);
+
+ let block0 = builder.create_block();
+ let x = Variable::new(0);
+ let y = Variable::new(16);
+ builder.declare_var(x, target.pointer_type());
+ builder.declare_var(y, target.pointer_type());
+ builder.append_block_params_for_function_params(block0);
+ builder.switch_to_block(block0);
+
+ let src = builder.use_var(x);
+ let dest = builder.use_var(y);
+ let size = 8192;
+ builder.emit_small_memory_copy(target.frontend_config(), dest, src, size, 8, 8, true);
+ builder.ins().return_(&[dest]);
+
+ builder.seal_all_blocks();
+ builder.finalize();
+ }
+
+ assert_eq!(
+ func.display(None).to_string(),
+ "function %sample() -> i32 system_v {
+ sig0 = (i32, i32, i32) system_v
+ fn0 = %Memcpy sig0
+
+block0:
+ v4 = iconst.i32 0
+ v1 -> v4
+ v3 = iconst.i32 0
+ v0 -> v3
+ v2 = iconst.i32 8192
+ call fn0(v1, v0, v2)
+ return v1
+}
+"
+ );
+ }
+
+ #[test]
+ fn small_memset() {
+ use core::str::FromStr;
+ use cranelift_codegen::{isa, settings};
+
+ let shared_builder = settings::builder();
+ let shared_flags = settings::Flags::new(shared_builder);
+
+ let triple =
+ ::target_lexicon::Triple::from_str("riscv32").expect("Couldn't create riscv32 triple");
+
+ let target = isa::lookup(triple)
+ .ok()
+ .map(|b| b.finish(shared_flags))
+ .expect("This test requires riscv32 support.");
+
+ let mut sig = Signature::new(target.default_call_conv());
+ sig.returns.push(AbiParam::new(I32));
+
+ let mut fn_ctx = FunctionBuilderContext::new();
+ let mut func = Function::with_name_signature(ExternalName::testcase("sample"), sig);
+ {
+ let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx);
+
+ let block0 = builder.create_block();
+ let y = Variable::new(16);
+ builder.declare_var(y, target.pointer_type());
+ builder.append_block_params_for_function_params(block0);
+ builder.switch_to_block(block0);
+
+ let dest = builder.use_var(y);
+ let size = 8;
+ builder.emit_small_memset(target.frontend_config(), dest, 1, size, 8);
+ builder.ins().return_(&[dest]);
+
+ builder.seal_all_blocks();
+ builder.finalize();
+ }
+
+ assert_eq!(
+ func.display(None).to_string(),
+ "function %sample() -> i32 system_v {
+block0:
+ v2 = iconst.i32 0
+ v0 -> v2
+ v1 = iconst.i64 0x0001_0001_0101
+ store aligned v1, v0
+ return v0
+}
+"
+ );
+ }
+
+ #[test]
+ fn not_so_small_memset() {
+ use core::str::FromStr;
+ use cranelift_codegen::{isa, settings};
+
+ let shared_builder = settings::builder();
+ let shared_flags = settings::Flags::new(shared_builder);
+
+ let triple =
+ ::target_lexicon::Triple::from_str("riscv32").expect("Couldn't create riscv32 triple");
+
+ let target = isa::lookup(triple)
+ .ok()
+ .map(|b| b.finish(shared_flags))
+ .expect("This test requires riscv32 support.");
+
+ let mut sig = Signature::new(target.default_call_conv());
+ sig.returns.push(AbiParam::new(I32));
+
+ let mut fn_ctx = FunctionBuilderContext::new();
+ let mut func = Function::with_name_signature(ExternalName::testcase("sample"), sig);
+ {
+ let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx);
+
+ let block0 = builder.create_block();
+ let y = Variable::new(16);
+ builder.declare_var(y, target.pointer_type());
+ builder.append_block_params_for_function_params(block0);
+ builder.switch_to_block(block0);
+
+ let dest = builder.use_var(y);
+ let size = 8192;
+ builder.emit_small_memset(target.frontend_config(), dest, 1, size, 8);
+ builder.ins().return_(&[dest]);
+
+ builder.seal_all_blocks();
+ builder.finalize();
+ }
+
+ assert_eq!(
+ func.display(None).to_string(),
+ "function %sample() -> i32 system_v {
+ sig0 = (i32, i32, i32) system_v
+ fn0 = %Memset sig0
+
+block0:
+ v4 = iconst.i32 0
+ v0 -> v4
+ v1 = iconst.i8 1
+ v2 = iconst.i32 8192
+ v3 = uextend.i32 v1
+ call fn0(v0, v3, v2)
+ return v0
+}
+"
+ );
+ }
+
+ #[test]
+ fn undef_vector_vars() {
+ let mut sig = Signature::new(CallConv::SystemV);
+ sig.returns.push(AbiParam::new(I8X16));
+ sig.returns.push(AbiParam::new(B8X16));
+ sig.returns.push(AbiParam::new(F32X4));
+
+ let mut fn_ctx = FunctionBuilderContext::new();
+ let mut func = Function::with_name_signature(ExternalName::testcase("sample"), sig);
+ {
+ let mut builder = FunctionBuilder::new(&mut func, &mut fn_ctx);
+
+ let block0 = builder.create_block();
+ let a = Variable::new(0);
+ let b = Variable::new(1);
+ let c = Variable::new(2);
+ builder.declare_var(a, I8X16);
+ builder.declare_var(b, B8X16);
+ builder.declare_var(c, F32X4);
+ builder.switch_to_block(block0);
+
+ let a = builder.use_var(a);
+ let b = builder.use_var(b);
+ let c = builder.use_var(c);
+ builder.ins().return_(&[a, b, c]);
+
+ builder.seal_all_blocks();
+ builder.finalize();
+ }
+
+ assert_eq!(
+ func.display(None).to_string(),
+ "function %sample() -> i8x16, b8x16, f32x4 system_v {
+ const0 = 0x00000000000000000000000000000000
+
+block0:
+ v5 = f32const 0.0
+ v6 = splat.f32x4 v5
+ v2 -> v6
+ v4 = vconst.b8x16 const0
+ v1 -> v4
+ v3 = vconst.i8x16 const0
+ v0 -> v3
+ return v0, v1, v2
+}
+"
+ );
+ }
+
+ #[test]
+ fn test_greatest_divisible_power_of_two() {
+ assert_eq!(64, greatest_divisible_power_of_two(64));
+ assert_eq!(16, greatest_divisible_power_of_two(48));
+ assert_eq!(8, greatest_divisible_power_of_two(24));
+ assert_eq!(1, greatest_divisible_power_of_two(25));
+ }
+}
diff --git a/third_party/rust/cranelift-frontend/src/lib.rs b/third_party/rust/cranelift-frontend/src/lib.rs
new file mode 100644
index 0000000000..40bd27bfbc
--- /dev/null
+++ b/third_party/rust/cranelift-frontend/src/lib.rs
@@ -0,0 +1,206 @@
+//! Cranelift IR builder library.
+//!
+//! Provides a straightforward way to create a Cranelift IR function and fill it with instructions
+//! corresponding to your source program written in another language.
+//!
+//! To get started, create an [`FunctionBuilderContext`](struct.FunctionBuilderContext.html) and
+//! pass it as an argument to a [`FunctionBuilder`](struct.FunctionBuilder.html).
+//!
+//! # Mutable variables and Cranelift IR values
+//!
+//! The most interesting feature of this API is that it provides a single way to deal with all your
+//! variable problems. Indeed, the [`FunctionBuilder`](struct.FunctionBuilder.html) struct has a
+//! type `Variable` that should be an index of your source language variables. Then, through
+//! calling the functions
+//! [`declare_var`](struct.FunctionBuilder.html#method.declare_var),
+//! [`def_var`](struct.FunctionBuilder.html#method.def_var) and
+//! [`use_var`](struct.FunctionBuilder.html#method.use_var), the
+//! [`FunctionBuilder`](struct.FunctionBuilder.html) will create for you all the Cranelift IR
+//! values corresponding to your variables.
+//!
+//! This API has been designed to help you translate your mutable variables into
+//! [`SSA`](https://en.wikipedia.org/wiki/Static_single_assignment_form) form.
+//! [`use_var`](struct.FunctionBuilder.html#method.use_var) will return the Cranelift IR value
+//! that corresponds to your mutable variable at a precise point in the program. However, if you know
+//! beforehand that one of your variables is defined only once, for instance if it is the result
+//! of an intermediate expression in an expression-based language, then you can translate it
+//! directly by the Cranelift IR value returned by the instruction builder. Using the
+//! [`use_var`](struct.FunctionBuilder.html#method.use_var) API for such an immutable variable
+//! would also work but with a slight additional overhead (the SSA algorithm does not know
+//! beforehand if a variable is immutable or not).
+//!
+//! The moral is that you should use these three functions to handle all your mutable variables,
+//! even those that are not present in the source code but artifacts of the translation. It is up
+//! to you to keep a mapping between the mutable variables of your language and their `Variable`
+//! index that is used by Cranelift. Caution: as the `Variable` is used by Cranelift to index an
+//! array containing information about your mutable variables, when you create a new `Variable`
+//! with [`Variable::new(var_index)`] you should make sure that `var_index` is provided by a
+//! counter incremented by 1 each time you encounter a new mutable variable.
+//!
+//! # Example
+//!
+//! Here is a pseudo-program we want to transform into Cranelift IR:
+//!
+//! ```clif
+//! function(x) {
+//! x, y, z : i32
+//! block0:
+//! y = 2;
+//! z = x + y;
+//! jump block1
+//! block1:
+//! z = z + y;
+//! brnz y, block3;
+//! jump block2
+//! block2:
+//! z = z - x;
+//! return y
+//! block3:
+//! y = y - x
+//! jump block1
+//! }
+//! ```
+//!
+//! Here is how you build the corresponding Cranelift IR function using `FunctionBuilderContext`:
+//!
+//! ```rust
+//! extern crate cranelift_codegen;
+//! extern crate cranelift_frontend;
+//!
+//! use cranelift_codegen::entity::EntityRef;
+//! use cranelift_codegen::ir::types::*;
+//! use cranelift_codegen::ir::{AbiParam, ExternalName, Function, InstBuilder, Signature};
+//! use cranelift_codegen::isa::CallConv;
+//! use cranelift_codegen::settings;
+//! use cranelift_codegen::verifier::verify_function;
+//! use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
+//!
+//! let mut sig = Signature::new(CallConv::SystemV);
+//! sig.returns.push(AbiParam::new(I32));
+//! sig.params.push(AbiParam::new(I32));
+//! let mut fn_builder_ctx = FunctionBuilderContext::new();
+//! let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+//! {
+//! let mut builder = FunctionBuilder::new(&mut func, &mut fn_builder_ctx);
+//!
+//! let block0 = builder.create_block();
+//! let block1 = builder.create_block();
+//! let block2 = builder.create_block();
+//! let block3 = builder.create_block();
+//! let x = Variable::new(0);
+//! let y = Variable::new(1);
+//! let z = Variable::new(2);
+//! builder.declare_var(x, I32);
+//! builder.declare_var(y, I32);
+//! builder.declare_var(z, I32);
+//! builder.append_block_params_for_function_params(block0);
+//!
+//! builder.switch_to_block(block0);
+//! builder.seal_block(block0);
+//! {
+//! let tmp = builder.block_params(block0)[0]; // the first function parameter
+//! builder.def_var(x, tmp);
+//! }
+//! {
+//! let tmp = builder.ins().iconst(I32, 2);
+//! builder.def_var(y, tmp);
+//! }
+//! {
+//! let arg1 = builder.use_var(x);
+//! let arg2 = builder.use_var(y);
+//! let tmp = builder.ins().iadd(arg1, arg2);
+//! builder.def_var(z, tmp);
+//! }
+//! builder.ins().jump(block1, &[]);
+//!
+//! builder.switch_to_block(block1);
+//! {
+//! let arg1 = builder.use_var(y);
+//! let arg2 = builder.use_var(z);
+//! let tmp = builder.ins().iadd(arg1, arg2);
+//! builder.def_var(z, tmp);
+//! }
+//! {
+//! let arg = builder.use_var(y);
+//! builder.ins().brnz(arg, block3, &[]);
+//! }
+//! builder.ins().jump(block2, &[]);
+//!
+//! builder.switch_to_block(block2);
+//! builder.seal_block(block2);
+//! {
+//! let arg1 = builder.use_var(z);
+//! let arg2 = builder.use_var(x);
+//! let tmp = builder.ins().isub(arg1, arg2);
+//! builder.def_var(z, tmp);
+//! }
+//! {
+//! let arg = builder.use_var(y);
+//! builder.ins().return_(&[arg]);
+//! }
+//!
+//! builder.switch_to_block(block3);
+//! builder.seal_block(block3);
+//!
+//! {
+//! let arg1 = builder.use_var(y);
+//! let arg2 = builder.use_var(x);
+//! let tmp = builder.ins().isub(arg1, arg2);
+//! builder.def_var(y, tmp);
+//! }
+//! builder.ins().jump(block1, &[]);
+//! builder.seal_block(block1);
+//!
+//! builder.finalize();
+//! }
+//!
+//! let flags = settings::Flags::new(settings::builder());
+//! let res = verify_function(&func, &flags);
+//! println!("{}", func.display(None));
+//! if let Err(errors) = res {
+//! panic!("{}", errors);
+//! }
+//! ```
+
+#![deny(missing_docs, trivial_numeric_casts, unused_extern_crates)]
+#![warn(unused_import_braces)]
+#![cfg_attr(feature = "std", deny(unstable_features))]
+#![cfg_attr(feature = "cargo-clippy", allow(clippy::new_without_default))]
+#![cfg_attr(
+ feature = "cargo-clippy",
+ warn(
+ clippy::float_arithmetic,
+ clippy::mut_mut,
+ clippy::nonminimal_bool,
+ clippy::map_unwrap_or,
+ clippy::clippy::print_stdout,
+ clippy::unicode_not_nfc,
+ clippy::use_self
+ )
+)]
+#![no_std]
+
+#[allow(unused_imports)] // #[macro_use] is required for no_std
+#[macro_use]
+extern crate alloc;
+
+#[cfg(feature = "std")]
+#[macro_use]
+extern crate std;
+
+#[cfg(not(feature = "std"))]
+use hashbrown::{hash_map, HashMap};
+#[cfg(feature = "std")]
+use std::collections::{hash_map, HashMap};
+
+pub use crate::frontend::{FunctionBuilder, FunctionBuilderContext};
+pub use crate::switch::Switch;
+pub use crate::variable::Variable;
+
+mod frontend;
+mod ssa;
+mod switch;
+mod variable;
+
+/// Version number of this crate.
+pub const VERSION: &str = env!("CARGO_PKG_VERSION");
diff --git a/third_party/rust/cranelift-frontend/src/ssa.rs b/third_party/rust/cranelift-frontend/src/ssa.rs
new file mode 100644
index 0000000000..4eef234290
--- /dev/null
+++ b/third_party/rust/cranelift-frontend/src/ssa.rs
@@ -0,0 +1,1366 @@
+//! A SSA-building API that handles incomplete CFGs.
+//!
+//! The algorithm is based upon Braun M., Buchwald S., Hack S., Leißa R., Mallon C.,
+//! Zwinkau A. (2013) Simple and Efficient Construction of Static Single Assignment Form.
+//! In: Jhala R., De Bosschere K. (eds) Compiler Construction. CC 2013.
+//! Lecture Notes in Computer Science, vol 7791. Springer, Berlin, Heidelberg
+//!
+//! https://link.springer.com/content/pdf/10.1007/978-3-642-37051-9_6.pdf
+
+use crate::Variable;
+use alloc::vec::Vec;
+use core::convert::TryInto;
+use core::mem;
+use cranelift_codegen::cursor::{Cursor, FuncCursor};
+use cranelift_codegen::entity::SecondaryMap;
+use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
+use cranelift_codegen::ir::instructions::BranchInfo;
+use cranelift_codegen::ir::types::{F32, F64};
+use cranelift_codegen::ir::{Block, Function, Inst, InstBuilder, InstructionData, Type, Value};
+use cranelift_codegen::packed_option::PackedOption;
+use smallvec::SmallVec;
+
+/// Structure containing the data relevant the construction of SSA for a given function.
+///
+/// The parameter struct `Variable` corresponds to the way variables are represented in the
+/// non-SSA language you're translating from.
+///
+/// The SSA building relies on information about the variables used and defined.
+///
+/// This SSA building module allows you to def and use variables on the fly while you are
+/// constructing the CFG, no need for a separate SSA pass after the CFG is completed.
+///
+/// A basic block is said _filled_ if all the instruction that it contains have been translated,
+/// and it is said _sealed_ if all of its predecessors have been declared. Only filled predecessors
+/// can be declared.
+pub struct SSABuilder {
+ // TODO: Consider a sparse representation rather than SecondaryMap-of-SecondaryMap.
+ /// Records for every variable and for every relevant block, the last definition of
+ /// the variable in the block.
+ variables: SecondaryMap<Variable, SecondaryMap<Block, PackedOption<Value>>>,
+
+ /// Records the position of the basic blocks and the list of values used but not defined in the
+ /// block.
+ ssa_blocks: SecondaryMap<Block, SSABlockData>,
+
+ /// Call stack for use in the `use_var`/`predecessors_lookup` state machine.
+ calls: Vec<Call>,
+ /// Result stack for use in the `use_var`/`predecessors_lookup` state machine.
+ results: Vec<Value>,
+
+ /// Side effects accumulated in the `use_var`/`predecessors_lookup` state machine.
+ side_effects: SideEffects,
+}
+
+/// Side effects of a `use_var` or a `seal_block` method call.
+pub struct SideEffects {
+ /// When we want to append jump arguments to a `br_table` instruction, the critical edge is
+ /// splitted and the newly created `Block`s are signaled here.
+ pub split_blocks_created: Vec<Block>,
+ /// When a variable is used but has never been defined before (this happens in the case of
+ /// unreachable code), a placeholder `iconst` or `fconst` value is added to the right `Block`.
+ /// This field signals if it is the case and return the `Block` to which the initialization has
+ /// been added.
+ pub instructions_added_to_blocks: Vec<Block>,
+}
+
+impl SideEffects {
+ fn new() -> Self {
+ Self {
+ split_blocks_created: Vec::new(),
+ instructions_added_to_blocks: Vec::new(),
+ }
+ }
+
+ fn is_empty(&self) -> bool {
+ self.split_blocks_created.is_empty() && self.instructions_added_to_blocks.is_empty()
+ }
+}
+
+#[derive(Clone)]
+struct PredBlock {
+ block: Block,
+ branch: Inst,
+}
+
+impl PredBlock {
+ fn new(block: Block, branch: Inst) -> Self {
+ Self { block, branch }
+ }
+}
+
+type PredBlockSmallVec = SmallVec<[PredBlock; 4]>;
+
+#[derive(Clone, Default)]
+struct SSABlockData {
+ // The predecessors of the Block with the block and branch instruction.
+ predecessors: PredBlockSmallVec,
+ // A block is sealed if all of its predecessors have been declared.
+ sealed: bool,
+ // List of current Block arguments for which an earlier def has not been found yet.
+ undef_variables: Vec<(Variable, Value)>,
+}
+
+impl SSABlockData {
+ fn add_predecessor(&mut self, pred: Block, inst: Inst) {
+ debug_assert!(!self.sealed, "sealed blocks cannot accept new predecessors");
+ self.predecessors.push(PredBlock::new(pred, inst));
+ }
+
+ fn remove_predecessor(&mut self, inst: Inst) -> Block {
+ let pred = self
+ .predecessors
+ .iter()
+ .position(|&PredBlock { branch, .. }| branch == inst)
+ .expect("the predecessor you are trying to remove is not declared");
+ self.predecessors.swap_remove(pred).block
+ }
+}
+
+impl SSABuilder {
+ /// Allocate a new blank SSA builder struct. Use the API function to interact with the struct.
+ pub fn new() -> Self {
+ Self {
+ variables: SecondaryMap::with_default(SecondaryMap::new()),
+ ssa_blocks: SecondaryMap::new(),
+ calls: Vec::new(),
+ results: Vec::new(),
+ side_effects: SideEffects::new(),
+ }
+ }
+
+ /// Clears a `SSABuilder` from all its data, letting it in a pristine state without
+ /// deallocating memory.
+ pub fn clear(&mut self) {
+ self.variables.clear();
+ self.ssa_blocks.clear();
+ debug_assert!(self.calls.is_empty());
+ debug_assert!(self.results.is_empty());
+ debug_assert!(self.side_effects.is_empty());
+ }
+
+ /// Tests whether an `SSABuilder` is in a cleared state.
+ pub fn is_empty(&self) -> bool {
+ self.variables.is_empty()
+ && self.ssa_blocks.is_empty()
+ && self.calls.is_empty()
+ && self.results.is_empty()
+ && self.side_effects.is_empty()
+ }
+}
+
+/// Small enum used for clarity in some functions.
+#[derive(Debug)]
+enum ZeroOneOrMore<T> {
+ Zero,
+ One(T),
+ More,
+}
+
+/// Cases used internally by `use_var_nonlocal()` for avoiding the borrow checker.
+#[derive(Debug)]
+enum UseVarCases {
+ Unsealed(Value),
+ SealedOnePredecessor(Block),
+ SealedMultiplePredecessors(Value, Block),
+}
+
+/// States for the `use_var`/`predecessors_lookup` state machine.
+enum Call {
+ UseVar(Block),
+ FinishSealedOnePredecessor(Block),
+ FinishPredecessorsLookup(Value, Block),
+}
+
+/// Emit instructions to produce a zero value in the given type.
+fn emit_zero(ty: Type, mut cur: FuncCursor) -> Value {
+ if ty.is_int() {
+ cur.ins().iconst(ty, 0)
+ } else if ty.is_bool() {
+ cur.ins().bconst(ty, false)
+ } else if ty == F32 {
+ cur.ins().f32const(Ieee32::with_bits(0))
+ } else if ty == F64 {
+ cur.ins().f64const(Ieee64::with_bits(0))
+ } else if ty.is_ref() {
+ cur.ins().null(ty)
+ } else if ty.is_vector() {
+ let scalar_ty = ty.lane_type();
+ if scalar_ty.is_int() || scalar_ty.is_bool() {
+ let zero = cur.func.dfg.constants.insert(
+ core::iter::repeat(0)
+ .take(ty.bytes().try_into().unwrap())
+ .collect(),
+ );
+ cur.ins().vconst(ty, zero)
+ } else if scalar_ty == F32 {
+ let scalar = cur.ins().f32const(Ieee32::with_bits(0));
+ cur.ins().splat(ty, scalar)
+ } else if scalar_ty == F64 {
+ let scalar = cur.ins().f64const(Ieee64::with_bits(0));
+ cur.ins().splat(ty, scalar)
+ } else {
+ panic!("unimplemented scalar type: {:?}", ty)
+ }
+ } else {
+ panic!("unimplemented type: {:?}", ty)
+ }
+}
+
+/// The following methods are the API of the SSA builder. Here is how it should be used when
+/// translating to Cranelift IR:
+///
+/// - for each basic block, create a corresponding data for SSA construction with `declare_block`;
+///
+/// - while traversing a basic block and translating instruction, use `def_var` and `use_var`
+/// to record definitions and uses of variables, these methods will give you the corresponding
+/// SSA values;
+///
+/// - when all the instructions in a basic block have translated, the block is said _filled_ and
+/// only then you can add it as a predecessor to other blocks with `declare_block_predecessor`;
+///
+/// - when you have constructed all the predecessor to a basic block,
+/// call `seal_block` on it with the `Function` that you are building.
+///
+/// This API will give you the correct SSA values to use as arguments of your instructions,
+/// as well as modify the jump instruction and `Block` parameters to account for the SSA
+/// Phi functions.
+///
+impl SSABuilder {
+ /// Declares a new definition of a variable in a given basic block.
+ /// The SSA value is passed as an argument because it should be created with
+ /// `ir::DataFlowGraph::append_result`.
+ pub fn def_var(&mut self, var: Variable, val: Value, block: Block) {
+ self.variables[var][block] = PackedOption::from(val);
+ }
+
+ /// Declares a use of a variable in a given basic block. Returns the SSA value corresponding
+ /// to the current SSA definition of this variable and a list of newly created Blocks that
+ /// are the results of critical edge splitting for `br_table` with arguments.
+ ///
+ /// If the variable has never been defined in this blocks or recursively in its predecessors,
+ /// this method will silently create an initializer with `iconst` or `fconst`. You are
+ /// responsible for making sure that you initialize your variables.
+ pub fn use_var(
+ &mut self,
+ func: &mut Function,
+ var: Variable,
+ ty: Type,
+ block: Block,
+ ) -> (Value, SideEffects) {
+ // First, try Local Value Numbering (Algorithm 1 in the paper).
+ // If the variable already has a known Value in this block, use that.
+ if let Some(var_defs) = self.variables.get(var) {
+ if let Some(val) = var_defs[block].expand() {
+ return (val, SideEffects::new());
+ }
+ }
+
+ // Otherwise, use Global Value Numbering (Algorithm 2 in the paper).
+ // This resolves the Value with respect to its predecessors.
+ debug_assert!(self.calls.is_empty());
+ debug_assert!(self.results.is_empty());
+ debug_assert!(self.side_effects.is_empty());
+
+ // Prepare the 'calls' and 'results' stacks for the state machine.
+ self.use_var_nonlocal(func, var, ty, block);
+
+ let value = self.run_state_machine(func, var, ty);
+ let side_effects = mem::replace(&mut self.side_effects, SideEffects::new());
+
+ (value, side_effects)
+ }
+
+ /// Resolve the minimal SSA Value of `var` in `block` by traversing predecessors.
+ ///
+ /// This function sets up state for `run_state_machine()` but does not execute it.
+ fn use_var_nonlocal(&mut self, func: &mut Function, var: Variable, ty: Type, block: Block) {
+ // This function is split into two parts to appease the borrow checker.
+ // Part 1: With a mutable borrow of self, update the DataFlowGraph if necessary.
+ let data = &mut self.ssa_blocks[block];
+ let case = if data.sealed {
+ // The block has multiple predecessors so we append a Block parameter that
+ // will serve as a value.
+ if data.predecessors.len() == 1 {
+ // Optimize the common case of one predecessor: no param needed.
+ UseVarCases::SealedOnePredecessor(data.predecessors[0].block)
+ } else {
+ // Break potential cycles by eagerly adding an operandless param.
+ let val = func.dfg.append_block_param(block, ty);
+ UseVarCases::SealedMultiplePredecessors(val, block)
+ }
+ } else {
+ let val = func.dfg.append_block_param(block, ty);
+ data.undef_variables.push((var, val));
+ UseVarCases::Unsealed(val)
+ };
+
+ // Part 2: Prepare SSABuilder state for run_state_machine().
+ match case {
+ UseVarCases::SealedOnePredecessor(pred) => {
+ // Get the Value directly from the single predecessor.
+ self.calls.push(Call::FinishSealedOnePredecessor(block));
+ self.calls.push(Call::UseVar(pred));
+ }
+ UseVarCases::Unsealed(val) => {
+ // Define the operandless param added above to prevent lookup cycles.
+ self.def_var(var, val, block);
+
+ // Nothing more can be known at this point.
+ self.results.push(val);
+ }
+ UseVarCases::SealedMultiplePredecessors(val, block) => {
+ // Define the operandless param added above to prevent lookup cycles.
+ self.def_var(var, val, block);
+
+ // Look up a use_var for each precessor.
+ self.begin_predecessors_lookup(val, block);
+ }
+ }
+ }
+
+ /// For blocks with a single predecessor, once we've determined the value,
+ /// record a local def for it for future queries to find.
+ fn finish_sealed_one_predecessor(&mut self, var: Variable, block: Block) {
+ let val = *self.results.last().unwrap();
+ self.def_var(var, val, block);
+ }
+
+ /// Declares a new basic block to construct corresponding data for SSA construction.
+ /// No predecessors are declared here and the block is not sealed.
+ /// Predecessors have to be added with `declare_block_predecessor`.
+ pub fn declare_block(&mut self, block: Block) {
+ self.ssa_blocks[block] = SSABlockData {
+ predecessors: PredBlockSmallVec::new(),
+ sealed: false,
+ undef_variables: Vec::new(),
+ };
+ }
+
+ /// Declares a new predecessor for a `Block` and record the branch instruction
+ /// of the predecessor that leads to it.
+ ///
+ /// The precedent `Block` must be filled before added as predecessor.
+ /// Note that you must provide no jump arguments to the branch
+ /// instruction when you create it since `SSABuilder` will fill them for you.
+ ///
+ /// Callers are expected to avoid adding the same predecessor more than once in the case
+ /// of a jump table.
+ pub fn declare_block_predecessor(&mut self, block: Block, pred: Block, inst: Inst) {
+ debug_assert!(!self.is_sealed(block));
+ self.ssa_blocks[block].add_predecessor(pred, inst)
+ }
+
+ /// Remove a previously declared Block predecessor by giving a reference to the jump
+ /// instruction. Returns the basic block containing the instruction.
+ ///
+ /// Note: use only when you know what you are doing, this might break the SSA building problem
+ pub fn remove_block_predecessor(&mut self, block: Block, inst: Inst) -> Block {
+ debug_assert!(!self.is_sealed(block));
+ self.ssa_blocks[block].remove_predecessor(inst)
+ }
+
+ /// Completes the global value numbering for a `Block`, all of its predecessors having been
+ /// already sealed.
+ ///
+ /// This method modifies the function's `Layout` by adding arguments to the `Block`s to
+ /// take into account the Phi function placed by the SSA algorithm.
+ ///
+ /// Returns the list of newly created blocks for critical edge splitting.
+ pub fn seal_block(&mut self, block: Block, func: &mut Function) -> SideEffects {
+ self.seal_one_block(block, func);
+ mem::replace(&mut self.side_effects, SideEffects::new())
+ }
+
+ /// Completes the global value numbering for all unsealed `Block`s in `func`.
+ ///
+ /// It's more efficient to seal `Block`s as soon as possible, during
+ /// translation, but for frontends where this is impractical to do, this
+ /// function can be used at the end of translating all blocks to ensure
+ /// that everything is sealed.
+ pub fn seal_all_blocks(&mut self, func: &mut Function) -> SideEffects {
+ // Seal all `Block`s currently in the function. This can entail splitting
+ // and creation of new blocks, however such new blocks are sealed on
+ // the fly, so we don't need to account for them here.
+ for block in self.ssa_blocks.keys() {
+ if !self.is_sealed(block) {
+ self.seal_one_block(block, func);
+ }
+ }
+ mem::replace(&mut self.side_effects, SideEffects::new())
+ }
+
+ /// Helper function for `seal_block` and
+ /// `seal_all_blocks`.
+ fn seal_one_block(&mut self, block: Block, func: &mut Function) {
+ let block_data = &mut self.ssa_blocks[block];
+ debug_assert!(
+ !block_data.sealed,
+ "Attempting to seal {} which is already sealed.",
+ block
+ );
+
+ // Extract the undef_variables data from the block so that we
+ // can iterate over it without borrowing the whole builder.
+ let undef_vars = mem::replace(&mut block_data.undef_variables, Vec::new());
+
+ // For each undef var we look up values in the predecessors and create a block parameter
+ // only if necessary.
+ for (var, val) in undef_vars {
+ let ty = func.dfg.value_type(val);
+ self.predecessors_lookup(func, val, var, ty, block);
+ }
+ self.mark_block_sealed(block);
+ }
+
+ /// Set the `sealed` flag for `block`.
+ fn mark_block_sealed(&mut self, block: Block) {
+ // Then we mark the block as sealed.
+ let block_data = &mut self.ssa_blocks[block];
+ debug_assert!(!block_data.sealed);
+ debug_assert!(block_data.undef_variables.is_empty());
+ block_data.sealed = true;
+
+ // We could call data.predecessors.shrink_to_fit() here, if
+ // important, because no further predecessors will be added
+ // to this block.
+ }
+
+ /// Given the local SSA Value of a Variable in a Block, perform a recursive lookup on
+ /// predecessors to determine if it is redundant with another Value earlier in the CFG.
+ ///
+ /// If such a Value exists and is redundant, the local Value is replaced by the
+ /// corresponding non-local Value. If the original Value was a Block parameter,
+ /// the parameter may be removed if redundant. Parameters are placed eagerly by callers
+ /// to avoid infinite loops when looking up a Value for a Block that is in a CFG loop.
+ ///
+ /// Doing this lookup for each Value in each Block preserves SSA form during construction.
+ ///
+ /// Returns the chosen Value.
+ ///
+ /// ## Arguments
+ ///
+ /// `sentinel` is a dummy Block parameter inserted by `use_var_nonlocal()`.
+ /// Its purpose is to allow detection of CFG cycles while traversing predecessors.
+ ///
+ /// The `sentinel: Value` and the `ty: Type` are describing the `var: Variable`
+ /// that is being looked up.
+ fn predecessors_lookup(
+ &mut self,
+ func: &mut Function,
+ sentinel: Value,
+ var: Variable,
+ ty: Type,
+ block: Block,
+ ) -> Value {
+ debug_assert!(self.calls.is_empty());
+ debug_assert!(self.results.is_empty());
+ // self.side_effects may be non-empty here so that callers can
+ // accumulate side effects over multiple calls.
+ self.begin_predecessors_lookup(sentinel, block);
+ self.run_state_machine(func, var, ty)
+ }
+
+ /// Set up state for `run_state_machine()` to initiate non-local use lookups
+ /// in all predecessors of `dest_block`, and arrange for a call to
+ /// `finish_predecessors_lookup` once they complete.
+ fn begin_predecessors_lookup(&mut self, sentinel: Value, dest_block: Block) {
+ self.calls
+ .push(Call::FinishPredecessorsLookup(sentinel, dest_block));
+ // Iterate over the predecessors.
+ let mut calls = mem::replace(&mut self.calls, Vec::new());
+ calls.extend(
+ self.predecessors(dest_block)
+ .iter()
+ .rev()
+ .map(|&PredBlock { block: pred, .. }| Call::UseVar(pred)),
+ );
+ self.calls = calls;
+ }
+
+ /// Examine the values from the predecessors and compute a result value, creating
+ /// block parameters as needed.
+ fn finish_predecessors_lookup(
+ &mut self,
+ func: &mut Function,
+ sentinel: Value,
+ var: Variable,
+ dest_block: Block,
+ ) {
+ let mut pred_values: ZeroOneOrMore<Value> = ZeroOneOrMore::Zero;
+
+ // Determine how many predecessors are yielding unique, non-temporary Values.
+ let num_predecessors = self.predecessors(dest_block).len();
+ for &pred_val in self.results.iter().rev().take(num_predecessors) {
+ match pred_values {
+ ZeroOneOrMore::Zero => {
+ if pred_val != sentinel {
+ pred_values = ZeroOneOrMore::One(pred_val);
+ }
+ }
+ ZeroOneOrMore::One(old_val) => {
+ if pred_val != sentinel && pred_val != old_val {
+ pred_values = ZeroOneOrMore::More;
+ break;
+ }
+ }
+ ZeroOneOrMore::More => {
+ break;
+ }
+ }
+ }
+
+ // Those predecessors' Values have been examined: pop all their results.
+ self.results.truncate(self.results.len() - num_predecessors);
+
+ let result_val = match pred_values {
+ ZeroOneOrMore::Zero => {
+ // The variable is used but never defined before. This is an irregularity in the
+ // code, but rather than throwing an error we silently initialize the variable to
+ // 0. This will have no effect since this situation happens in unreachable code.
+ if !func.layout.is_block_inserted(dest_block) {
+ func.layout.append_block(dest_block);
+ }
+ self.side_effects
+ .instructions_added_to_blocks
+ .push(dest_block);
+ let zero = emit_zero(
+ func.dfg.value_type(sentinel),
+ FuncCursor::new(func).at_first_insertion_point(dest_block),
+ );
+ func.dfg.remove_block_param(sentinel);
+ func.dfg.change_to_alias(sentinel, zero);
+ zero
+ }
+ ZeroOneOrMore::One(pred_val) => {
+ // Here all the predecessors use a single value to represent our variable
+ // so we don't need to have it as a block argument.
+ // We need to replace all the occurrences of val with pred_val but since
+ // we can't afford a re-writing pass right now we just declare an alias.
+ // Resolve aliases eagerly so that we can check for cyclic aliasing,
+ // which can occur in unreachable code.
+ let mut resolved = func.dfg.resolve_aliases(pred_val);
+ if sentinel == resolved {
+ // Cycle detected. Break it by creating a zero value.
+ resolved = emit_zero(
+ func.dfg.value_type(sentinel),
+ FuncCursor::new(func).at_first_insertion_point(dest_block),
+ );
+ }
+ func.dfg.remove_block_param(sentinel);
+ func.dfg.change_to_alias(sentinel, resolved);
+ resolved
+ }
+ ZeroOneOrMore::More => {
+ // There is disagreement in the predecessors on which value to use so we have
+ // to keep the block argument. To avoid borrowing `self` for the whole loop,
+ // temporarily detach the predecessors list and replace it with an empty list.
+ let mut preds =
+ mem::replace(self.predecessors_mut(dest_block), PredBlockSmallVec::new());
+ for &mut PredBlock {
+ block: ref mut pred_block,
+ branch: ref mut last_inst,
+ } in &mut preds
+ {
+ // We already did a full `use_var` above, so we can do just the fast path.
+ let ssa_block_map = self.variables.get(var).unwrap();
+ let pred_val = ssa_block_map.get(*pred_block).unwrap().unwrap();
+ let jump_arg = self.append_jump_argument(
+ func,
+ *last_inst,
+ *pred_block,
+ dest_block,
+ pred_val,
+ var,
+ );
+ if let Some((middle_block, middle_jump_inst)) = jump_arg {
+ *pred_block = middle_block;
+ *last_inst = middle_jump_inst;
+ self.side_effects.split_blocks_created.push(middle_block);
+ }
+ }
+ // Now that we're done, move the predecessors list back.
+ debug_assert!(self.predecessors(dest_block).is_empty());
+ *self.predecessors_mut(dest_block) = preds;
+
+ sentinel
+ }
+ };
+
+ self.results.push(result_val);
+ }
+
+ /// Appends a jump argument to a jump instruction, returns block created in case of
+ /// critical edge splitting.
+ fn append_jump_argument(
+ &mut self,
+ func: &mut Function,
+ jump_inst: Inst,
+ jump_inst_block: Block,
+ dest_block: Block,
+ val: Value,
+ var: Variable,
+ ) -> Option<(Block, Inst)> {
+ match func.dfg.analyze_branch(jump_inst) {
+ BranchInfo::NotABranch => {
+ panic!("you have declared a non-branch instruction as a predecessor to a block");
+ }
+ // For a single destination appending a jump argument to the instruction
+ // is sufficient.
+ BranchInfo::SingleDest(_, _) => {
+ func.dfg.append_inst_arg(jump_inst, val);
+ None
+ }
+ BranchInfo::Table(jt, default_block) => {
+ // In the case of a jump table, the situation is tricky because br_table doesn't
+ // support arguments.
+ // We have to split the critical edge
+ let middle_block = func.dfg.make_block();
+ func.layout.append_block(middle_block);
+ self.declare_block(middle_block);
+ self.ssa_blocks[middle_block].add_predecessor(jump_inst_block, jump_inst);
+ self.mark_block_sealed(middle_block);
+
+ if let Some(default_block) = default_block {
+ if dest_block == default_block {
+ match func.dfg[jump_inst] {
+ InstructionData::BranchTable {
+ destination: ref mut dest,
+ ..
+ } => {
+ *dest = middle_block;
+ }
+ _ => panic!("should not happen"),
+ }
+ }
+ }
+
+ for old_dest in func.jump_tables[jt].as_mut_slice() {
+ if *old_dest == dest_block {
+ *old_dest = middle_block;
+ }
+ }
+ let mut cur = FuncCursor::new(func).at_bottom(middle_block);
+ let middle_jump_inst = cur.ins().jump(dest_block, &[val]);
+ self.def_var(var, val, middle_block);
+ Some((middle_block, middle_jump_inst))
+ }
+ }
+ }
+
+ /// Returns the list of `Block`s that have been declared as predecessors of the argument.
+ fn predecessors(&self, block: Block) -> &[PredBlock] {
+ &self.ssa_blocks[block].predecessors
+ }
+
+ /// Returns whether the given Block has any predecessor or not.
+ pub fn has_any_predecessors(&self, block: Block) -> bool {
+ !self.predecessors(block).is_empty()
+ }
+
+ /// Same as predecessors, but for &mut.
+ fn predecessors_mut(&mut self, block: Block) -> &mut PredBlockSmallVec {
+ &mut self.ssa_blocks[block].predecessors
+ }
+
+ /// Returns `true` if and only if `seal_block` has been called on the argument.
+ pub fn is_sealed(&self, block: Block) -> bool {
+ self.ssa_blocks[block].sealed
+ }
+
+ /// The main algorithm is naturally recursive: when there's a `use_var` in a
+ /// block with no corresponding local defs, it recurses and performs a
+ /// `use_var` in each predecessor. To avoid risking running out of callstack
+ /// space, we keep an explicit stack and use a small state machine rather
+ /// than literal recursion.
+ fn run_state_machine(&mut self, func: &mut Function, var: Variable, ty: Type) -> Value {
+ // Process the calls scheduled in `self.calls` until it is empty.
+ while let Some(call) = self.calls.pop() {
+ match call {
+ Call::UseVar(ssa_block) => {
+ // First we lookup for the current definition of the variable in this block
+ if let Some(var_defs) = self.variables.get(var) {
+ if let Some(val) = var_defs[ssa_block].expand() {
+ self.results.push(val);
+ continue;
+ }
+ }
+ self.use_var_nonlocal(func, var, ty, ssa_block);
+ }
+ Call::FinishSealedOnePredecessor(ssa_block) => {
+ self.finish_sealed_one_predecessor(var, ssa_block);
+ }
+ Call::FinishPredecessorsLookup(sentinel, dest_block) => {
+ self.finish_predecessors_lookup(func, sentinel, var, dest_block);
+ }
+ }
+ }
+ debug_assert_eq!(self.results.len(), 1);
+ self.results.pop().unwrap()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::ssa::SSABuilder;
+ use crate::Variable;
+ use cranelift_codegen::cursor::{Cursor, FuncCursor};
+ use cranelift_codegen::entity::EntityRef;
+ use cranelift_codegen::ir::instructions::BranchInfo;
+ use cranelift_codegen::ir::types::*;
+ use cranelift_codegen::ir::{Function, Inst, InstBuilder, JumpTableData, Opcode};
+ use cranelift_codegen::settings;
+ use cranelift_codegen::verify_function;
+
+ #[test]
+ fn simple_block() {
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ // Here is the pseudo-program we want to translate:
+ // block0:
+ // x = 1;
+ // y = 2;
+ // z = x + y;
+ // z = x + z;
+
+ ssa.declare_block(block0);
+ let x_var = Variable::new(0);
+ let x_ssa = {
+ let mut cur = FuncCursor::new(&mut func);
+ cur.insert_block(block0);
+ cur.ins().iconst(I32, 1)
+ };
+ ssa.def_var(x_var, x_ssa, block0);
+ let y_var = Variable::new(1);
+ let y_ssa = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iconst(I32, 2)
+ };
+ ssa.def_var(y_var, y_ssa, block0);
+ assert_eq!(ssa.use_var(&mut func, x_var, I32, block0).0, x_ssa);
+ assert_eq!(ssa.use_var(&mut func, y_var, I32, block0).0, y_ssa);
+
+ let z_var = Variable::new(2);
+ let x_use1 = ssa.use_var(&mut func, x_var, I32, block0).0;
+ let y_use1 = ssa.use_var(&mut func, y_var, I32, block0).0;
+ let z1_ssa = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iadd(x_use1, y_use1)
+ };
+ ssa.def_var(z_var, z1_ssa, block0);
+ assert_eq!(ssa.use_var(&mut func, z_var, I32, block0).0, z1_ssa);
+
+ let x_use2 = ssa.use_var(&mut func, x_var, I32, block0).0;
+ let z_use1 = ssa.use_var(&mut func, z_var, I32, block0).0;
+ let z2_ssa = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iadd(x_use2, z_use1)
+ };
+ ssa.def_var(z_var, z2_ssa, block0);
+ assert_eq!(ssa.use_var(&mut func, z_var, I32, block0).0, z2_ssa);
+ }
+
+ #[test]
+ fn sequence_of_blocks() {
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ let block1 = func.dfg.make_block();
+ let block2 = func.dfg.make_block();
+ // Here is the pseudo-program we want to translate:
+ // block0:
+ // x = 1;
+ // y = 2;
+ // z = x + y;
+ // brnz y, block1;
+ // jump block1;
+ // block1:
+ // z = x + z;
+ // jump block2;
+ // block2:
+ // y = x + y;
+ {
+ let mut cur = FuncCursor::new(&mut func);
+ cur.insert_block(block0);
+ cur.insert_block(block1);
+ cur.insert_block(block2);
+ }
+
+ // block0
+ ssa.declare_block(block0);
+ ssa.seal_block(block0, &mut func);
+ let x_var = Variable::new(0);
+ let x_ssa = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iconst(I32, 1)
+ };
+ ssa.def_var(x_var, x_ssa, block0);
+ let y_var = Variable::new(1);
+ let y_ssa = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iconst(I32, 2)
+ };
+ ssa.def_var(y_var, y_ssa, block0);
+ let z_var = Variable::new(2);
+ let x_use1 = ssa.use_var(&mut func, x_var, I32, block0).0;
+ let y_use1 = ssa.use_var(&mut func, y_var, I32, block0).0;
+ let z1_ssa = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iadd(x_use1, y_use1)
+ };
+ ssa.def_var(z_var, z1_ssa, block0);
+ let y_use2 = ssa.use_var(&mut func, y_var, I32, block0).0;
+ let brnz_block0_block2: Inst = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().brnz(y_use2, block2, &[])
+ };
+ let jump_block0_block1: Inst = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().jump(block1, &[])
+ };
+
+ assert_eq!(ssa.use_var(&mut func, x_var, I32, block0).0, x_ssa);
+ assert_eq!(ssa.use_var(&mut func, y_var, I32, block0).0, y_ssa);
+ assert_eq!(ssa.use_var(&mut func, z_var, I32, block0).0, z1_ssa);
+
+ // block1
+ ssa.declare_block(block1);
+ ssa.declare_block_predecessor(block1, block0, jump_block0_block1);
+ ssa.seal_block(block1, &mut func);
+
+ let x_use2 = ssa.use_var(&mut func, x_var, I32, block1).0;
+ let z_use1 = ssa.use_var(&mut func, z_var, I32, block1).0;
+ let z2_ssa = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().iadd(x_use2, z_use1)
+ };
+ ssa.def_var(z_var, z2_ssa, block1);
+ let jump_block1_block2: Inst = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().jump(block2, &[])
+ };
+
+ assert_eq!(x_use2, x_ssa);
+ assert_eq!(z_use1, z1_ssa);
+ assert_eq!(ssa.use_var(&mut func, z_var, I32, block1).0, z2_ssa);
+
+ // block2
+ ssa.declare_block(block2);
+ ssa.declare_block_predecessor(block2, block0, brnz_block0_block2);
+ ssa.declare_block_predecessor(block2, block1, jump_block1_block2);
+ ssa.seal_block(block2, &mut func);
+ let x_use3 = ssa.use_var(&mut func, x_var, I32, block2).0;
+ let y_use3 = ssa.use_var(&mut func, y_var, I32, block2).0;
+ let y2_ssa = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block2);
+ cur.ins().iadd(x_use3, y_use3)
+ };
+ ssa.def_var(y_var, y2_ssa, block2);
+
+ assert_eq!(x_ssa, x_use3);
+ assert_eq!(y_ssa, y_use3);
+ match func.dfg.analyze_branch(brnz_block0_block2) {
+ BranchInfo::SingleDest(dest, jump_args) => {
+ assert_eq!(dest, block2);
+ assert_eq!(jump_args.len(), 0);
+ }
+ _ => assert!(false),
+ };
+ match func.dfg.analyze_branch(jump_block0_block1) {
+ BranchInfo::SingleDest(dest, jump_args) => {
+ assert_eq!(dest, block1);
+ assert_eq!(jump_args.len(), 0);
+ }
+ _ => assert!(false),
+ };
+ match func.dfg.analyze_branch(jump_block1_block2) {
+ BranchInfo::SingleDest(dest, jump_args) => {
+ assert_eq!(dest, block2);
+ assert_eq!(jump_args.len(), 0);
+ }
+ _ => assert!(false),
+ };
+ }
+
+ #[test]
+ fn program_with_loop() {
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ let block1 = func.dfg.make_block();
+ let block2 = func.dfg.make_block();
+ let block3 = func.dfg.make_block();
+ {
+ let mut cur = FuncCursor::new(&mut func);
+ cur.insert_block(block0);
+ cur.insert_block(block1);
+ cur.insert_block(block2);
+ cur.insert_block(block3);
+ }
+ // Here is the pseudo-program we want to translate:
+ // block0:
+ // x = 1;
+ // y = 2;
+ // z = x + y;
+ // jump block1
+ // block1:
+ // z = z + y;
+ // brnz y, block3;
+ // jump block2;
+ // block2:
+ // z = z - x;
+ // return y
+ // block3:
+ // y = y - x
+ // jump block1
+
+ // block0
+ ssa.declare_block(block0);
+ ssa.seal_block(block0, &mut func);
+ let x_var = Variable::new(0);
+ let x1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iconst(I32, 1)
+ };
+ ssa.def_var(x_var, x1, block0);
+ let y_var = Variable::new(1);
+ let y1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iconst(I32, 2)
+ };
+ ssa.def_var(y_var, y1, block0);
+ let z_var = Variable::new(2);
+ let x2 = ssa.use_var(&mut func, x_var, I32, block0).0;
+ let y2 = ssa.use_var(&mut func, y_var, I32, block0).0;
+ let z1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iadd(x2, y2)
+ };
+ ssa.def_var(z_var, z1, block0);
+ let jump_block0_block1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().jump(block1, &[])
+ };
+ assert_eq!(ssa.use_var(&mut func, x_var, I32, block0).0, x1);
+ assert_eq!(ssa.use_var(&mut func, y_var, I32, block0).0, y1);
+ assert_eq!(x2, x1);
+ assert_eq!(y2, y1);
+
+ // block1
+ ssa.declare_block(block1);
+ ssa.declare_block_predecessor(block1, block0, jump_block0_block1);
+ let z2 = ssa.use_var(&mut func, z_var, I32, block1).0;
+ let y3 = ssa.use_var(&mut func, y_var, I32, block1).0;
+ let z3 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().iadd(z2, y3)
+ };
+ ssa.def_var(z_var, z3, block1);
+ let y4 = ssa.use_var(&mut func, y_var, I32, block1).0;
+ assert_eq!(y4, y3);
+ let brnz_block1_block3 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().brnz(y4, block3, &[])
+ };
+ let jump_block1_block2 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().jump(block2, &[])
+ };
+
+ // block2
+ ssa.declare_block(block2);
+ ssa.declare_block_predecessor(block2, block1, jump_block1_block2);
+ ssa.seal_block(block2, &mut func);
+ let z4 = ssa.use_var(&mut func, z_var, I32, block2).0;
+ assert_eq!(z4, z3);
+ let x3 = ssa.use_var(&mut func, x_var, I32, block2).0;
+ let z5 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block2);
+ cur.ins().isub(z4, x3)
+ };
+ ssa.def_var(z_var, z5, block2);
+ let y5 = ssa.use_var(&mut func, y_var, I32, block2).0;
+ assert_eq!(y5, y3);
+ {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block2);
+ cur.ins().return_(&[y5])
+ };
+
+ // block3
+ ssa.declare_block(block3);
+ ssa.declare_block_predecessor(block3, block1, brnz_block1_block3);
+ ssa.seal_block(block3, &mut func);
+ let y6 = ssa.use_var(&mut func, y_var, I32, block3).0;
+ assert_eq!(y6, y3);
+ let x4 = ssa.use_var(&mut func, x_var, I32, block3).0;
+ assert_eq!(x4, x3);
+ let y7 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block3);
+ cur.ins().isub(y6, x4)
+ };
+ ssa.def_var(y_var, y7, block3);
+ let jump_block3_block1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block3);
+ cur.ins().jump(block1, &[])
+ };
+
+ // block1 after all predecessors have been visited.
+ ssa.declare_block_predecessor(block1, block3, jump_block3_block1);
+ ssa.seal_block(block1, &mut func);
+ assert_eq!(func.dfg.block_params(block1)[0], z2);
+ assert_eq!(func.dfg.block_params(block1)[1], y3);
+ assert_eq!(func.dfg.resolve_aliases(x3), x1);
+ }
+
+ #[test]
+ fn br_table_with_args() {
+ // This tests the on-demand splitting of critical edges for br_table with jump arguments
+ //
+ // Here is the pseudo-program we want to translate:
+ //
+ // function %f {
+ // jt = jump_table [block2, block1]
+ // block0:
+ // x = 1;
+ // br_table x, block2, jt
+ // block1:
+ // x = 2
+ // jump block2
+ // block2:
+ // x = x + 1
+ // return
+ // }
+
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ let block1 = func.dfg.make_block();
+ let block2 = func.dfg.make_block();
+ let mut jump_table = JumpTableData::new();
+ jump_table.push_entry(block2);
+ jump_table.push_entry(block1);
+ {
+ let mut cur = FuncCursor::new(&mut func);
+ cur.insert_block(block0);
+ cur.insert_block(block1);
+ cur.insert_block(block2);
+ }
+
+ // block0
+ let x1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iconst(I32, 1)
+ };
+ ssa.declare_block(block0);
+ ssa.seal_block(block0, &mut func);
+ let x_var = Variable::new(0);
+ ssa.def_var(x_var, x1, block0);
+ ssa.use_var(&mut func, x_var, I32, block0).0;
+ let br_table = {
+ let jt = func.create_jump_table(jump_table);
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().br_table(x1, block2, jt)
+ };
+
+ // block1
+ ssa.declare_block(block1);
+ ssa.declare_block_predecessor(block1, block0, br_table);
+ ssa.seal_block(block1, &mut func);
+ let x2 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().iconst(I32, 2)
+ };
+ ssa.def_var(x_var, x2, block1);
+ let jump_block1_block2 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().jump(block2, &[])
+ };
+
+ // block2
+ ssa.declare_block(block2);
+ ssa.declare_block_predecessor(block2, block1, jump_block1_block2);
+ ssa.declare_block_predecessor(block2, block0, br_table);
+ ssa.seal_block(block2, &mut func);
+ let x3 = ssa.use_var(&mut func, x_var, I32, block2).0;
+ let x4 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block2);
+ cur.ins().iadd_imm(x3, 1)
+ };
+ ssa.def_var(x_var, x4, block2);
+ {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block2);
+ cur.ins().return_(&[])
+ };
+
+ let flags = settings::Flags::new(settings::builder());
+ match verify_function(&func, &flags) {
+ Ok(()) => {}
+ Err(_errors) => {
+ #[cfg(feature = "std")]
+ panic!(_errors);
+ #[cfg(not(feature = "std"))]
+ panic!("function failed to verify");
+ }
+ }
+ }
+
+ #[test]
+ fn undef_values_reordering() {
+ // Here is the pseudo-program we want to translate:
+ // block0:
+ // x = 0;
+ // y = 1;
+ // z = 2;
+ // jump block1;
+ // block1:
+ // x = z + x;
+ // y = y - x;
+ // jump block1;
+ //
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ let block1 = func.dfg.make_block();
+ {
+ let mut cur = FuncCursor::new(&mut func);
+ cur.insert_block(block0);
+ cur.insert_block(block1);
+ }
+
+ // block0
+ ssa.declare_block(block0);
+ let x_var = Variable::new(0);
+ ssa.seal_block(block0, &mut func);
+ let x1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iconst(I32, 0)
+ };
+ ssa.def_var(x_var, x1, block0);
+ let y_var = Variable::new(1);
+ let y1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iconst(I32, 1)
+ };
+ ssa.def_var(y_var, y1, block0);
+ let z_var = Variable::new(2);
+ let z1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().iconst(I32, 2)
+ };
+ ssa.def_var(z_var, z1, block0);
+ let jump_block0_block1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().jump(block1, &[])
+ };
+
+ // block1
+ ssa.declare_block(block1);
+ ssa.declare_block_predecessor(block1, block0, jump_block0_block1);
+ let z2 = ssa.use_var(&mut func, z_var, I32, block1).0;
+ assert_eq!(func.dfg.block_params(block1)[0], z2);
+ let x2 = ssa.use_var(&mut func, x_var, I32, block1).0;
+ assert_eq!(func.dfg.block_params(block1)[1], x2);
+ let x3 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().iadd(x2, z2)
+ };
+ ssa.def_var(x_var, x3, block1);
+ let x4 = ssa.use_var(&mut func, x_var, I32, block1).0;
+ let y3 = ssa.use_var(&mut func, y_var, I32, block1).0;
+ assert_eq!(func.dfg.block_params(block1)[2], y3);
+ let y4 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().isub(y3, x4)
+ };
+ ssa.def_var(y_var, y4, block1);
+ let jump_block1_block1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ cur.ins().jump(block1, &[])
+ };
+ ssa.declare_block_predecessor(block1, block1, jump_block1_block1);
+ ssa.seal_block(block1, &mut func);
+ // At sealing the "z" argument disappear but the remaining "x" and "y" args have to be
+ // in the right order.
+ assert_eq!(func.dfg.block_params(block1)[1], y3);
+ assert_eq!(func.dfg.block_params(block1)[0], x2);
+ }
+
+ #[test]
+ fn undef() {
+ // Use vars of various types which have not been defined.
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ ssa.declare_block(block0);
+ ssa.seal_block(block0, &mut func);
+ let i32_var = Variable::new(0);
+ let f32_var = Variable::new(1);
+ let f64_var = Variable::new(2);
+ let b1_var = Variable::new(3);
+ let f32x4_var = Variable::new(4);
+ ssa.use_var(&mut func, i32_var, I32, block0);
+ ssa.use_var(&mut func, f32_var, F32, block0);
+ ssa.use_var(&mut func, f64_var, F64, block0);
+ ssa.use_var(&mut func, b1_var, B1, block0);
+ ssa.use_var(&mut func, f32x4_var, F32X4, block0);
+ assert_eq!(func.dfg.num_block_params(block0), 0);
+ }
+
+ #[test]
+ fn undef_in_entry() {
+ // Use a var which has not been defined. The search should hit the
+ // top of the entry block, and then fall back to inserting an iconst.
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ ssa.declare_block(block0);
+ ssa.seal_block(block0, &mut func);
+ let x_var = Variable::new(0);
+ assert_eq!(func.dfg.num_block_params(block0), 0);
+ ssa.use_var(&mut func, x_var, I32, block0);
+ assert_eq!(func.dfg.num_block_params(block0), 0);
+ assert_eq!(
+ func.dfg[func.layout.first_inst(block0).unwrap()].opcode(),
+ Opcode::Iconst
+ );
+ }
+
+ #[test]
+ fn undef_in_entry_sealed_after() {
+ // Use a var which has not been defined, but the block is not sealed
+ // until afterward. Before sealing, the SSA builder should insert an
+ // block param; after sealing, it should be removed.
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ ssa.declare_block(block0);
+ let x_var = Variable::new(0);
+ assert_eq!(func.dfg.num_block_params(block0), 0);
+ ssa.use_var(&mut func, x_var, I32, block0);
+ assert_eq!(func.dfg.num_block_params(block0), 1);
+ ssa.seal_block(block0, &mut func);
+ assert_eq!(func.dfg.num_block_params(block0), 0);
+ assert_eq!(
+ func.dfg[func.layout.first_inst(block0).unwrap()].opcode(),
+ Opcode::Iconst
+ );
+ }
+
+ #[test]
+ fn unreachable_use() {
+ // Here is the pseudo-program we want to translate:
+ // block0:
+ // return;
+ // block1:
+ // brz x, block1;
+ // jump block1;
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ let block1 = func.dfg.make_block();
+ {
+ let mut cur = FuncCursor::new(&mut func);
+ cur.insert_block(block0);
+ cur.insert_block(block1);
+ }
+
+ // block0
+ ssa.declare_block(block0);
+ ssa.seal_block(block0, &mut func);
+ {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().return_(&[]);
+ }
+
+ // block1
+ ssa.declare_block(block1);
+ {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ let x_var = Variable::new(0);
+ let x_val = ssa.use_var(&mut cur.func, x_var, I32, block1).0;
+ let brz = cur.ins().brz(x_val, block1, &[]);
+ let jump_block1_block1 = cur.ins().jump(block1, &[]);
+ ssa.declare_block_predecessor(block1, block1, brz);
+ ssa.declare_block_predecessor(block1, block1, jump_block1_block1);
+ }
+ ssa.seal_block(block1, &mut func);
+
+ let flags = settings::Flags::new(settings::builder());
+ match verify_function(&func, &flags) {
+ Ok(()) => {}
+ Err(_errors) => {
+ #[cfg(feature = "std")]
+ panic!(_errors);
+ #[cfg(not(feature = "std"))]
+ panic!("function failed to verify");
+ }
+ }
+ }
+
+ #[test]
+ fn unreachable_use_with_multiple_preds() {
+ // Here is the pseudo-program we want to translate:
+ // block0:
+ // return;
+ // block1:
+ // brz x, block2;
+ // jump block1;
+ // block2:
+ // jump block1;
+ let mut func = Function::new();
+ let mut ssa = SSABuilder::new();
+ let block0 = func.dfg.make_block();
+ let block1 = func.dfg.make_block();
+ let block2 = func.dfg.make_block();
+ {
+ let mut cur = FuncCursor::new(&mut func);
+ cur.insert_block(block0);
+ cur.insert_block(block1);
+ cur.insert_block(block2);
+ }
+
+ // block0
+ ssa.declare_block(block0);
+ ssa.seal_block(block0, &mut func);
+ {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block0);
+ cur.ins().return_(&[]);
+ }
+
+ // block1
+ ssa.declare_block(block1);
+ let brz = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block1);
+ let x_var = Variable::new(0);
+ let x_val = ssa.use_var(&mut cur.func, x_var, I32, block1).0;
+ let brz = cur.ins().brz(x_val, block2, &[]);
+ let jump_block1_block1 = cur.ins().jump(block1, &[]);
+ ssa.declare_block_predecessor(block1, block1, jump_block1_block1);
+ brz
+ };
+
+ // block2
+ ssa.declare_block(block2);
+ ssa.declare_block_predecessor(block2, block1, brz);
+ ssa.seal_block(block2, &mut func);
+ let jump_block2_block1 = {
+ let mut cur = FuncCursor::new(&mut func).at_bottom(block2);
+ cur.ins().jump(block1, &[])
+ };
+
+ // seal block1
+ ssa.declare_block_predecessor(block1, block2, jump_block2_block1);
+ ssa.seal_block(block1, &mut func);
+ let flags = settings::Flags::new(settings::builder());
+ match verify_function(&func, &flags) {
+ Ok(()) => {}
+ Err(_errors) => {
+ #[cfg(feature = "std")]
+ panic!(_errors);
+ #[cfg(not(feature = "std"))]
+ panic!("function failed to verify");
+ }
+ }
+ }
+}
diff --git a/third_party/rust/cranelift-frontend/src/switch.rs b/third_party/rust/cranelift-frontend/src/switch.rs
new file mode 100644
index 0000000000..f4711e4591
--- /dev/null
+++ b/third_party/rust/cranelift-frontend/src/switch.rs
@@ -0,0 +1,661 @@
+use super::HashMap;
+use crate::frontend::FunctionBuilder;
+use alloc::vec::Vec;
+use core::convert::TryFrom;
+use cranelift_codegen::ir::condcodes::IntCC;
+use cranelift_codegen::ir::*;
+use log::debug;
+
+type EntryIndex = u128;
+
+/// Unlike with `br_table`, `Switch` cases may be sparse or non-0-based.
+/// They emit efficient code using branches, jump tables, or a combination of both.
+///
+/// # Example
+///
+/// ```rust
+/// # use cranelift_codegen::ir::types::*;
+/// # use cranelift_codegen::ir::{ExternalName, Function, Signature, InstBuilder};
+/// # use cranelift_codegen::isa::CallConv;
+/// # use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Switch};
+/// #
+/// # let mut sig = Signature::new(CallConv::SystemV);
+/// # let mut fn_builder_ctx = FunctionBuilderContext::new();
+/// # let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+/// # let mut builder = FunctionBuilder::new(&mut func, &mut fn_builder_ctx);
+/// #
+/// # let entry = builder.create_block();
+/// # builder.switch_to_block(entry);
+/// #
+/// let block0 = builder.create_block();
+/// let block1 = builder.create_block();
+/// let block2 = builder.create_block();
+/// let fallback = builder.create_block();
+///
+/// let val = builder.ins().iconst(I32, 1);
+///
+/// let mut switch = Switch::new();
+/// switch.set_entry(0, block0);
+/// switch.set_entry(1, block1);
+/// switch.set_entry(7, block2);
+/// switch.emit(&mut builder, val, fallback);
+/// ```
+#[derive(Debug, Default)]
+pub struct Switch {
+ cases: HashMap<EntryIndex, Block>,
+}
+
+impl Switch {
+ /// Create a new empty switch
+ pub fn new() -> Self {
+ Self {
+ cases: HashMap::new(),
+ }
+ }
+
+ /// Set a switch entry
+ pub fn set_entry(&mut self, index: EntryIndex, block: Block) {
+ let prev = self.cases.insert(index, block);
+ assert!(
+ prev.is_none(),
+ "Tried to set the same entry {} twice",
+ index
+ );
+ }
+
+ /// Get a reference to all existing entries
+ pub fn entries(&self) -> &HashMap<EntryIndex, Block> {
+ &self.cases
+ }
+
+ /// Turn the `cases` `HashMap` into a list of `ContiguousCaseRange`s.
+ ///
+ /// # Postconditions
+ ///
+ /// * Every entry will be represented.
+ /// * The `ContiguousCaseRange`s will not overlap.
+ /// * Between two `ContiguousCaseRange`s there will be at least one entry index.
+ /// * No `ContiguousCaseRange`s will be empty.
+ fn collect_contiguous_case_ranges(self) -> Vec<ContiguousCaseRange> {
+ debug!("build_contiguous_case_ranges before: {:#?}", self.cases);
+ let mut cases = self.cases.into_iter().collect::<Vec<(_, _)>>();
+ cases.sort_by_key(|&(index, _)| index);
+
+ let mut contiguous_case_ranges: Vec<ContiguousCaseRange> = vec![];
+ let mut last_index = None;
+ for (index, block) in cases {
+ match last_index {
+ None => contiguous_case_ranges.push(ContiguousCaseRange::new(index)),
+ Some(last_index) => {
+ if index > last_index + 1 {
+ contiguous_case_ranges.push(ContiguousCaseRange::new(index));
+ }
+ }
+ }
+ contiguous_case_ranges
+ .last_mut()
+ .unwrap()
+ .blocks
+ .push(block);
+ last_index = Some(index);
+ }
+
+ debug!(
+ "build_contiguous_case_ranges after: {:#?}",
+ contiguous_case_ranges
+ );
+
+ contiguous_case_ranges
+ }
+
+ /// Binary search for the right `ContiguousCaseRange`.
+ fn build_search_tree(
+ bx: &mut FunctionBuilder,
+ val: Value,
+ otherwise: Block,
+ contiguous_case_ranges: Vec<ContiguousCaseRange>,
+ ) -> Vec<(EntryIndex, Block, Vec<Block>)> {
+ let mut cases_and_jt_blocks = Vec::new();
+
+ // Avoid allocation in the common case
+ if contiguous_case_ranges.len() <= 3 {
+ Self::build_search_branches(
+ bx,
+ val,
+ otherwise,
+ contiguous_case_ranges,
+ &mut cases_and_jt_blocks,
+ );
+ return cases_and_jt_blocks;
+ }
+
+ let mut stack: Vec<(Option<Block>, Vec<ContiguousCaseRange>)> = Vec::new();
+ stack.push((None, contiguous_case_ranges));
+
+ while let Some((block, contiguous_case_ranges)) = stack.pop() {
+ if let Some(block) = block {
+ bx.switch_to_block(block);
+ }
+
+ if contiguous_case_ranges.len() <= 3 {
+ Self::build_search_branches(
+ bx,
+ val,
+ otherwise,
+ contiguous_case_ranges,
+ &mut cases_and_jt_blocks,
+ );
+ } else {
+ let split_point = contiguous_case_ranges.len() / 2;
+ let mut left = contiguous_case_ranges;
+ let right = left.split_off(split_point);
+
+ let left_block = bx.create_block();
+ let right_block = bx.create_block();
+
+ let first_index = right[0].first_index;
+ let should_take_right_side =
+ icmp_imm_u128(bx, IntCC::UnsignedGreaterThanOrEqual, val, first_index);
+ bx.ins().brnz(should_take_right_side, right_block, &[]);
+ bx.ins().jump(left_block, &[]);
+
+ bx.seal_block(left_block);
+ bx.seal_block(right_block);
+
+ stack.push((Some(left_block), left));
+ stack.push((Some(right_block), right));
+ }
+ }
+
+ cases_and_jt_blocks
+ }
+
+ /// Linear search for the right `ContiguousCaseRange`.
+ fn build_search_branches(
+ bx: &mut FunctionBuilder,
+ val: Value,
+ otherwise: Block,
+ contiguous_case_ranges: Vec<ContiguousCaseRange>,
+ cases_and_jt_blocks: &mut Vec<(EntryIndex, Block, Vec<Block>)>,
+ ) {
+ let mut was_branch = false;
+ let ins_fallthrough_jump = |was_branch: bool, bx: &mut FunctionBuilder| {
+ if was_branch {
+ let block = bx.create_block();
+ bx.ins().jump(block, &[]);
+ bx.seal_block(block);
+ bx.switch_to_block(block);
+ }
+ };
+ for ContiguousCaseRange {
+ first_index,
+ blocks,
+ } in contiguous_case_ranges.into_iter().rev()
+ {
+ match (blocks.len(), first_index) {
+ (1, 0) => {
+ ins_fallthrough_jump(was_branch, bx);
+ bx.ins().brz(val, blocks[0], &[]);
+ }
+ (1, _) => {
+ ins_fallthrough_jump(was_branch, bx);
+ let is_good_val = icmp_imm_u128(bx, IntCC::Equal, val, first_index);
+ bx.ins().brnz(is_good_val, blocks[0], &[]);
+ }
+ (_, 0) => {
+ // if `first_index` is 0, then `icmp_imm uge val, first_index` is trivially true
+ let jt_block = bx.create_block();
+ bx.ins().jump(jt_block, &[]);
+ bx.seal_block(jt_block);
+ cases_and_jt_blocks.push((first_index, jt_block, blocks));
+ // `jump otherwise` below must not be hit, because the current block has been
+ // filled above. This is the last iteration anyway, as 0 is the smallest
+ // unsigned int, so just return here.
+ return;
+ }
+ (_, _) => {
+ ins_fallthrough_jump(was_branch, bx);
+ let jt_block = bx.create_block();
+ let is_good_val =
+ icmp_imm_u128(bx, IntCC::UnsignedGreaterThanOrEqual, val, first_index);
+ bx.ins().brnz(is_good_val, jt_block, &[]);
+ bx.seal_block(jt_block);
+ cases_and_jt_blocks.push((first_index, jt_block, blocks));
+ }
+ }
+ was_branch = true;
+ }
+
+ bx.ins().jump(otherwise, &[]);
+ }
+
+ /// For every item in `cases_and_jt_blocks` this will create a jump table in the specified block.
+ fn build_jump_tables(
+ bx: &mut FunctionBuilder,
+ val: Value,
+ otherwise: Block,
+ cases_and_jt_blocks: Vec<(EntryIndex, Block, Vec<Block>)>,
+ ) {
+ for (first_index, jt_block, blocks) in cases_and_jt_blocks.into_iter().rev() {
+ // There are currently no 128bit systems supported by rustc, but once we do ensure that
+ // we don't silently ignore a part of the jump table for 128bit integers on 128bit systems.
+ assert!(
+ u32::try_from(blocks.len()).is_ok(),
+ "Jump tables bigger than 2^32-1 are not yet supported"
+ );
+
+ let mut jt_data = JumpTableData::new();
+ for block in blocks {
+ jt_data.push_entry(block);
+ }
+ let jump_table = bx.create_jump_table(jt_data);
+
+ bx.switch_to_block(jt_block);
+ let discr = if first_index == 0 {
+ val
+ } else {
+ if let Ok(first_index) = u64::try_from(first_index) {
+ bx.ins().iadd_imm(val, (first_index as i64).wrapping_neg())
+ } else {
+ let (lsb, msb) = (first_index as u64, (first_index >> 64) as u64);
+ let lsb = bx.ins().iconst(types::I64, lsb as i64);
+ let msb = bx.ins().iconst(types::I64, msb as i64);
+ let index = bx.ins().iconcat(lsb, msb);
+ bx.ins().isub(val, index)
+ }
+ };
+
+ let discr = if bx.func.dfg.value_type(discr).bits() > 32 {
+ // Check for overflow of cast to u32.
+ let new_block = bx.create_block();
+ let bigger_than_u32 =
+ bx.ins()
+ .icmp_imm(IntCC::UnsignedGreaterThan, discr, u32::max_value() as i64);
+ bx.ins().brnz(bigger_than_u32, otherwise, &[]);
+ bx.ins().jump(new_block, &[]);
+ bx.switch_to_block(new_block);
+
+ // Cast to u32, as br_table is not implemented for integers bigger than 32bits.
+ let discr = if bx.func.dfg.value_type(discr) == types::I128 {
+ bx.ins().isplit(discr).0
+ } else {
+ discr
+ };
+ bx.ins().ireduce(types::I32, discr)
+ } else {
+ discr
+ };
+
+ bx.ins().br_table(discr, otherwise, jump_table);
+ }
+ }
+
+ /// Build the switch
+ ///
+ /// # Arguments
+ ///
+ /// * The function builder to emit to
+ /// * The value to switch on
+ /// * The default block
+ pub fn emit(self, bx: &mut FunctionBuilder, val: Value, otherwise: Block) {
+ // FIXME icmp(_imm) doesn't have encodings for i8 and i16 on x86(_64) yet
+ let val = match bx.func.dfg.value_type(val) {
+ types::I8 | types::I16 => bx.ins().uextend(types::I32, val),
+ _ => val,
+ };
+
+ let contiguous_case_ranges = self.collect_contiguous_case_ranges();
+ let cases_and_jt_blocks =
+ Self::build_search_tree(bx, val, otherwise, contiguous_case_ranges);
+ Self::build_jump_tables(bx, val, otherwise, cases_and_jt_blocks);
+ }
+}
+
+fn icmp_imm_u128(bx: &mut FunctionBuilder, cond: IntCC, x: Value, y: u128) -> Value {
+ if let Ok(index) = u64::try_from(y) {
+ bx.ins().icmp_imm(cond, x, index as i64)
+ } else {
+ let (lsb, msb) = (y as u64, (y >> 64) as u64);
+ let lsb = bx.ins().iconst(types::I64, lsb as i64);
+ let msb = bx.ins().iconst(types::I64, msb as i64);
+ let index = bx.ins().iconcat(lsb, msb);
+ bx.ins().icmp(cond, x, index)
+ }
+}
+
+/// This represents a contiguous range of cases to switch on.
+///
+/// For example 10 => block1, 11 => block2, 12 => block7 will be represented as:
+///
+/// ```plain
+/// ContiguousCaseRange {
+/// first_index: 10,
+/// blocks: vec![Block::from_u32(1), Block::from_u32(2), Block::from_u32(7)]
+/// }
+/// ```
+#[derive(Debug)]
+struct ContiguousCaseRange {
+ /// The entry index of the first case. Eg. 10 when the entry indexes are 10, 11, 12 and 13.
+ first_index: EntryIndex,
+
+ /// The blocks to jump to sorted in ascending order of entry index.
+ blocks: Vec<Block>,
+}
+
+impl ContiguousCaseRange {
+ fn new(first_index: EntryIndex) -> Self {
+ Self {
+ first_index,
+ blocks: Vec::new(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::frontend::FunctionBuilderContext;
+ use alloc::string::ToString;
+ use cranelift_codegen::ir::Function;
+
+ macro_rules! setup {
+ ($default:expr, [$($index:expr,)*]) => {{
+ let mut func = Function::new();
+ let mut func_ctx = FunctionBuilderContext::new();
+ {
+ let mut bx = FunctionBuilder::new(&mut func, &mut func_ctx);
+ let block = bx.create_block();
+ bx.switch_to_block(block);
+ let val = bx.ins().iconst(types::I8, 0);
+ let mut switch = Switch::new();
+ $(
+ let block = bx.create_block();
+ switch.set_entry($index, block);
+ )*
+ switch.emit(&mut bx, val, Block::with_number($default).unwrap());
+ }
+ func
+ .to_string()
+ .trim_start_matches("function u0:0() fast {\n")
+ .trim_end_matches("\n}\n")
+ .to_string()
+ }};
+ }
+
+ #[test]
+ fn switch_zero() {
+ let func = setup!(0, [0,]);
+ assert_eq!(
+ func,
+ "block0:
+ v0 = iconst.i8 0
+ v1 = uextend.i32 v0
+ brz v1, block1
+ jump block0"
+ );
+ }
+
+ #[test]
+ fn switch_single() {
+ let func = setup!(0, [1,]);
+ assert_eq!(
+ func,
+ "block0:
+ v0 = iconst.i8 0
+ v1 = uextend.i32 v0
+ v2 = icmp_imm eq v1, 1
+ brnz v2, block1
+ jump block0"
+ );
+ }
+
+ #[test]
+ fn switch_bool() {
+ let func = setup!(0, [0, 1,]);
+ assert_eq!(
+ func,
+ " jt0 = jump_table [block1, block2]
+
+block0:
+ v0 = iconst.i8 0
+ v1 = uextend.i32 v0
+ jump block3
+
+block3:
+ br_table.i32 v1, block0, jt0"
+ );
+ }
+
+ #[test]
+ fn switch_two_gap() {
+ let func = setup!(0, [0, 2,]);
+ assert_eq!(
+ func,
+ "block0:
+ v0 = iconst.i8 0
+ v1 = uextend.i32 v0
+ v2 = icmp_imm eq v1, 2
+ brnz v2, block2
+ jump block3
+
+block3:
+ brz.i32 v1, block1
+ jump block0"
+ );
+ }
+
+ #[test]
+ fn switch_many() {
+ let func = setup!(0, [0, 1, 5, 7, 10, 11, 12,]);
+ assert_eq!(
+ func,
+ " jt0 = jump_table [block1, block2]
+ jt1 = jump_table [block5, block6, block7]
+
+block0:
+ v0 = iconst.i8 0
+ v1 = uextend.i32 v0
+ v2 = icmp_imm uge v1, 7
+ brnz v2, block9
+ jump block8
+
+block9:
+ v3 = icmp_imm.i32 uge v1, 10
+ brnz v3, block10
+ jump block11
+
+block11:
+ v4 = icmp_imm.i32 eq v1, 7
+ brnz v4, block4
+ jump block0
+
+block8:
+ v5 = icmp_imm.i32 eq v1, 5
+ brnz v5, block3
+ jump block12
+
+block12:
+ br_table.i32 v1, block0, jt0
+
+block10:
+ v6 = iadd_imm.i32 v1, -10
+ br_table v6, block0, jt1"
+ );
+ }
+
+ #[test]
+ fn switch_min_index_value() {
+ let func = setup!(0, [::core::i64::MIN as u64 as u128, 1,]);
+ assert_eq!(
+ func,
+ "block0:
+ v0 = iconst.i8 0
+ v1 = uextend.i32 v0
+ v2 = icmp_imm eq v1, 0x8000_0000_0000_0000
+ brnz v2, block1
+ jump block3
+
+block3:
+ v3 = icmp_imm.i32 eq v1, 1
+ brnz v3, block2
+ jump block0"
+ );
+ }
+
+ #[test]
+ fn switch_max_index_value() {
+ let func = setup!(0, [::core::i64::MAX as u64 as u128, 1,]);
+ assert_eq!(
+ func,
+ "block0:
+ v0 = iconst.i8 0
+ v1 = uextend.i32 v0
+ v2 = icmp_imm eq v1, 0x7fff_ffff_ffff_ffff
+ brnz v2, block1
+ jump block3
+
+block3:
+ v3 = icmp_imm.i32 eq v1, 1
+ brnz v3, block2
+ jump block0"
+ )
+ }
+
+ #[test]
+ fn switch_optimal_codegen() {
+ let func = setup!(0, [-1i64 as u64 as u128, 0, 1,]);
+ assert_eq!(
+ func,
+ " jt0 = jump_table [block2, block3]
+
+block0:
+ v0 = iconst.i8 0
+ v1 = uextend.i32 v0
+ v2 = icmp_imm eq v1, -1
+ brnz v2, block1
+ jump block4
+
+block4:
+ br_table.i32 v1, block0, jt0"
+ );
+ }
+
+ #[test]
+ fn switch_seal_generated_blocks() {
+ let keys = [0, 1, 2, 10, 11, 12, 20, 30, 40, 50];
+
+ let mut func = Function::new();
+ let mut builder_ctx = FunctionBuilderContext::new();
+ let mut builder = FunctionBuilder::new(&mut func, &mut builder_ctx);
+
+ let root_block = builder.create_block();
+ let default_block = builder.create_block();
+ let mut switch = Switch::new();
+
+ let case_blocks = keys
+ .iter()
+ .map(|key| {
+ let block = builder.create_block();
+ switch.set_entry(*key, block);
+ block
+ })
+ .collect::<Vec<_>>();
+
+ builder.seal_block(root_block);
+ builder.switch_to_block(root_block);
+
+ let val = builder.ins().iconst(types::I32, 1);
+ switch.emit(&mut builder, val, default_block);
+
+ for &block in case_blocks.iter().chain(std::iter::once(&default_block)) {
+ builder.seal_block(block);
+ builder.switch_to_block(block);
+ builder.ins().return_(&[]);
+ }
+
+ builder.finalize(); // Will panic if some blocks are not sealed
+ }
+
+ #[test]
+ fn switch_64bit() {
+ let mut func = Function::new();
+ let mut func_ctx = FunctionBuilderContext::new();
+ {
+ let mut bx = FunctionBuilder::new(&mut func, &mut func_ctx);
+ let block0 = bx.create_block();
+ bx.switch_to_block(block0);
+ let val = bx.ins().iconst(types::I64, 0);
+ let mut switch = Switch::new();
+ let block1 = bx.create_block();
+ switch.set_entry(1, block1);
+ let block2 = bx.create_block();
+ switch.set_entry(0, block2);
+ let block3 = bx.create_block();
+ switch.emit(&mut bx, val, block3);
+ }
+ let func = func
+ .to_string()
+ .trim_start_matches("function u0:0() fast {\n")
+ .trim_end_matches("\n}\n")
+ .to_string();
+ assert_eq!(
+ func,
+ " jt0 = jump_table [block2, block1]
+
+block0:
+ v0 = iconst.i64 0
+ jump block4
+
+block4:
+ v1 = icmp_imm.i64 ugt v0, 0xffff_ffff
+ brnz v1, block3
+ jump block5
+
+block5:
+ v2 = ireduce.i32 v0
+ br_table v2, block3, jt0"
+ );
+ }
+
+ #[test]
+ fn switch_128bit() {
+ let mut func = Function::new();
+ let mut func_ctx = FunctionBuilderContext::new();
+ {
+ let mut bx = FunctionBuilder::new(&mut func, &mut func_ctx);
+ let block0 = bx.create_block();
+ bx.switch_to_block(block0);
+ let val = bx.ins().iconst(types::I128, 0);
+ let mut switch = Switch::new();
+ let block1 = bx.create_block();
+ switch.set_entry(1, block1);
+ let block2 = bx.create_block();
+ switch.set_entry(0, block2);
+ let block3 = bx.create_block();
+ switch.emit(&mut bx, val, block3);
+ }
+ let func = func
+ .to_string()
+ .trim_start_matches("function u0:0() fast {\n")
+ .trim_end_matches("\n}\n")
+ .to_string();
+ assert_eq!(
+ func,
+ " jt0 = jump_table [block2, block1]
+
+block0:
+ v0 = iconst.i128 0
+ jump block4
+
+block4:
+ v1 = icmp_imm.i128 ugt v0, 0xffff_ffff
+ brnz v1, block3
+ jump block5
+
+block5:
+ v2, v3 = isplit.i128 v0
+ v4 = ireduce.i32 v2
+ br_table v4, block3, jt0"
+ );
+ }
+}
diff --git a/third_party/rust/cranelift-frontend/src/variable.rs b/third_party/rust/cranelift-frontend/src/variable.rs
new file mode 100644
index 0000000000..9a40b9dfe9
--- /dev/null
+++ b/third_party/rust/cranelift-frontend/src/variable.rs
@@ -0,0 +1,35 @@
+//! A basic `Variable` implementation.
+//!
+//! Frontends can use any indexing scheme they see fit and
+//! generate the appropriate `Variable` instances.
+//!
+//! Note: The `Variable` is used by Cranelift to index into densely allocated
+//! arrays containing information about your mutable variables
+//! Thus, make sure that Variable's indexes are allocated contiguously and
+//! starting at `0`.
+
+use core::u32;
+use cranelift_codegen::entity::EntityRef;
+
+///! An opaque reference to a variable.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct Variable(u32);
+
+impl Variable {
+ /// Create a new Variable with the given index.
+ pub fn with_u32(index: u32) -> Self {
+ debug_assert!(index < u32::MAX);
+ Self(index)
+ }
+}
+
+impl EntityRef for Variable {
+ fn new(index: usize) -> Self {
+ debug_assert!(index < (u32::MAX as usize));
+ Self(index as u32)
+ }
+
+ fn index(self) -> usize {
+ self.0 as usize
+ }
+}