summaryrefslogtreecommitdiffstats
path: root/vendor/psm
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/psm')
-rw-r--r--vendor/psm/.cargo-checksum.json1
-rw-r--r--vendor/psm/Cargo.lock16
-rw-r--r--vendor/psm/Cargo.toml26
-rw-r--r--vendor/psm/LICENSE-APACHE201
-rw-r--r--vendor/psm/LICENSE-MIT16
-rw-r--r--vendor/psm/README.mkd495
-rw-r--r--vendor/psm/build.rs107
-rw-r--r--vendor/psm/examples/info.rs20
-rw-r--r--vendor/psm/examples/on_stack_fibo.rs77
-rw-r--r--vendor/psm/examples/on_stack_fibo_alloc_each_frame.rs53
-rw-r--r--vendor/psm/examples/panics.rs52
-rw-r--r--vendor/psm/examples/replace_stack_1.rs33
-rw-r--r--vendor/psm/examples/thread.rs60
-rw-r--r--vendor/psm/src/arch/aarch64_armasm.asm38
-rw-r--r--vendor/psm/src/arch/aarch_aapcs64.s85
-rw-r--r--vendor/psm/src/arch/arm_aapcs.s106
-rw-r--r--vendor/psm/src/arch/arm_armasm.asm39
-rw-r--r--vendor/psm/src/arch/mips64_eabi.s87
-rw-r--r--vendor/psm/src/arch/mips_eabi.s88
-rw-r--r--vendor/psm/src/arch/powerpc32.s76
-rw-r--r--vendor/psm/src/arch/powerpc64.s90
-rw-r--r--vendor/psm/src/arch/powerpc64_openpower.s86
-rw-r--r--vendor/psm/src/arch/psm.h10
-rw-r--r--vendor/psm/src/arch/riscv.s64
-rw-r--r--vendor/psm/src/arch/riscv64.s64
-rw-r--r--vendor/psm/src/arch/sparc64.s67
-rw-r--r--vendor/psm/src/arch/sparc_sysv.s65
-rw-r--r--vendor/psm/src/arch/wasm32.s60
-rw-r--r--vendor/psm/src/arch/x86.s91
-rw-r--r--vendor/psm/src/arch/x86_64.s87
-rw-r--r--vendor/psm/src/arch/x86_64_msvc.asm61
-rw-r--r--vendor/psm/src/arch/x86_64_windows_gnu.s95
-rw-r--r--vendor/psm/src/arch/x86_msvc.asm70
-rw-r--r--vendor/psm/src/arch/x86_windows_gnu.s94
-rw-r--r--vendor/psm/src/arch/zseries_linux.s75
-rw-r--r--vendor/psm/src/lib.rs406
-rw-r--r--vendor/psm/tests/stack_direction.rs6
-rw-r--r--vendor/psm/tests/stack_direction_2.rs29
38 files changed, 3196 insertions, 0 deletions
diff --git a/vendor/psm/.cargo-checksum.json b/vendor/psm/.cargo-checksum.json
new file mode 100644
index 000000000..a69778b20
--- /dev/null
+++ b/vendor/psm/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.lock":"2928b712f89aee2b62581df1e552b7cb8288d999ba180291b4900b86a05c6d8d","Cargo.toml":"c2c5a0154a80cffc82349cd98f819ea1259c92f195c5878ceefb66e06b14d28c","LICENSE-APACHE":"965a63a81d9a2fbeb5f9096954dabb49690f9dffcdac9825f675b25c807252a2","LICENSE-MIT":"3e3714aa69bd874601741fd7d7ad5298740cece37778e279fc1ab4451c5a11af","README.mkd":"6385ecaced99b0a29a5b40166d34ef9312f322c1b8ad002bef3b08cd6c3e29b2","build.rs":"1d7872546e6924bbb2947edc055ddd01f48683cc80d9d75a846adb65540345f0","examples/info.rs":"8ffb89912304ecbf3d714dcc094f42e86fdd0738625b2e76be2e7d59ab0736cf","examples/on_stack_fibo.rs":"287f0a08b177a97366a5da39e24e33e1f4bbe30a1f2473956721c8a9d93926a4","examples/on_stack_fibo_alloc_each_frame.rs":"e084041bbb81d51b195a4db539a765409272916df29c83a62213a93de4b6fca3","examples/panics.rs":"6791fe0dda9456b3becf989cbc89bc45ae27302e633572a57bbf10a57b830076","examples/replace_stack_1.rs":"374a28881f5e5dbf9db9b9e34929fb7a7e6f3910d782a6718f53ac269807b990","examples/thread.rs":"3cf92882aff96151608584d63535701cc8e5ae953d7ecf706d77371180bff025","src/arch/aarch64_armasm.asm":"1c737338287f3de981fbae97f104ac5e49425ba7fbcb4f7d80120afae47a86d5","src/arch/aarch_aapcs64.s":"459b8cd5a96104893e8f849ac83369101d7204c933841672df162104bebd2375","src/arch/arm_aapcs.s":"4ada635e8528279bd0326654f5203b6bdc94dd68c94fdef5de551384ba1b1747","src/arch/arm_armasm.asm":"e3b514169f19368b0b49374412de38bd9f50576e7b93b64d685a0f84fa8f4c91","src/arch/mips64_eabi.s":"4e6f95f89ba72fc4dd1a9a547920764f66d98251d236941cee4d227010484520","src/arch/mips_eabi.s":"8b7927fd63660eb276e2951f28df6b11920f04be4dc17a16b66ad386da12c4c3","src/arch/powerpc32.s":"0b508a65dec7254ba2e0dc65a2c9e86c21069fe62f5d7c41f5190415a4885912","src/arch/powerpc64.s":"c1602d09d12ba1df48fc96af0f827f8679fc93cee728813527fb1b817a788911","src/arch/powerpc64_openpower.s":"421b11cc7381427d1e2acb4e681c9836ccfea0b79930492f0a99ec4d27495e58","src/arch/psm.h":"2cebda3740aa73b167b8ec18e3d2202ca46e400a081a46329b86051abd1a872a","src/arch/riscv.s":"a81d2af4bcc9c29db304730697e52a89a7376b51d2735185c67be8910d0cdf39","src/arch/riscv64.s":"a51da67ce569e2442ff487b062bb8fdfe7c769f3f05a88de480bd5ab214d9a4f","src/arch/sparc64.s":"6250acbd938aea2e440061663a79fbb2dac0592b3a193f027b6b910e2a8e3af1","src/arch/sparc_sysv.s":"c2da7576e1fbe2234cc8a5cf937f7676e125435295f8c32089bfa0b0f27fde5e","src/arch/wasm32.o":"d7279f419cb7e169cae2af2463507652e0393b801c2f4580244de70d3def58b6","src/arch/wasm32.s":"1ebdc90de48f13e6474ee17c406578fc090ff61e57c1f560ecf6e6b75c7ef10a","src/arch/x86.s":"1919a4af1474895f904ed4281a4a8fcdd0428dab257bff4ea262db83ed63b445","src/arch/x86_64.s":"c80f1a3e22db61fd62b5ef2e1b6663185403bdcbcfbfe7ff0f8e0831ff0cafcf","src/arch/x86_64_msvc.asm":"85683bc65a03371ea7d8d79dcbe487f690cc2460c359817fc63c30d575ad8957","src/arch/x86_64_windows_gnu.s":"44637034e094ec0ad76dbe1232e97271c8155eb93bcb1dd86fe825acd05978a0","src/arch/x86_msvc.asm":"1735d4b19f8e46d0699fc9538baa7ab0885d27531ef7d9960e2027ad8137769b","src/arch/x86_windows_gnu.s":"b94d907a86f230c5c8ca1c708ede173f73c5269496f3959e08e4a92155e160d7","src/arch/zseries_linux.s":"5c3379a76e31bf13abf240efda12596fabce108cf63f60f9d0495e82ab8f1717","src/lib.rs":"18774ee37630bc6c7a36273014f80f6afa3f73bf34f4c49f5795d2eb5df1c195","tests/stack_direction.rs":"77d8f9dee196e99e70d569f59fef82bc2f88a8ec17bfe07ebe2f005fcb815c8b","tests/stack_direction_2.rs":"f9191394de5b6381af6ba8223e7717230059dc335f639238c0ddbc7eb87bfc0e"},"package":"cd136ff4382c4753fc061cb9e4712ab2af263376b95bbd5bd8cd50c020b78e69"} \ No newline at end of file
diff --git a/vendor/psm/Cargo.lock b/vendor/psm/Cargo.lock
new file mode 100644
index 000000000..8473e9f72
--- /dev/null
+++ b/vendor/psm/Cargo.lock
@@ -0,0 +1,16 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "cc"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2"
+
+[[package]]
+name = "psm"
+version = "0.1.16"
+dependencies = [
+ "cc",
+]
diff --git a/vendor/psm/Cargo.toml b/vendor/psm/Cargo.toml
new file mode 100644
index 000000000..cf2c97b3f
--- /dev/null
+++ b/vendor/psm/Cargo.toml
@@ -0,0 +1,26 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+name = "psm"
+version = "0.1.16"
+authors = ["Simonas Kazlauskas <psm@kazlauskas.me>"]
+build = "build.rs"
+description = "Portable Stack Manipulation: stack manipulation and introspection routines"
+documentation = "https://docs.rs/psm/0.1.14"
+readme = "README.mkd"
+keywords = ["stack", "no_std"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/rust-lang/stacker/"
+
+[dependencies]
+[build-dependencies.cc]
+version = "1.0.2"
diff --git a/vendor/psm/LICENSE-APACHE b/vendor/psm/LICENSE-APACHE
new file mode 100644
index 000000000..a3711ba60
--- /dev/null
+++ b/vendor/psm/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/psm/LICENSE-MIT b/vendor/psm/LICENSE-MIT
new file mode 100644
index 000000000..5f991a00b
--- /dev/null
+++ b/vendor/psm/LICENSE-MIT
@@ -0,0 +1,16 @@
+Copyright © 2018, Simonas Kazlauskas
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+associated documentation files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or
+substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
+OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/psm/README.mkd b/vendor/psm/README.mkd
new file mode 100644
index 000000000..441763873
--- /dev/null
+++ b/vendor/psm/README.mkd
@@ -0,0 +1,495 @@
+# Portable Stack Manipulation
+
+This crate provides very portable functions to control the stack pointer and inspect the properties
+of the stack. This crate does not attempt to provide safe abstractions to any operations, the
+only goals are correctness, portability and efficiency (in that exact order). As a consequence most
+functions you’ll see in this crate are unsafe.
+
+Unless you’re writing a safe abstraction over stack manipulation, this is not the crate you
+want. Instead consider one of the safe abstractions over this crate. A good place to look at is
+the crates.io’s reverse dependency list.
+
+# Platform support
+
+The following table lists supported targets and architectures with notes on the level of current
+support and knowledge about the target. The three columns “Available”, “Tested” and “Callstack”
+imply an increasingly high level of support.
+
+* “Available” basically means that the code builds and the assembly files have been written for the
+ target;
+* “Tested” means that the assembly code has been tested or otherwise verified to be correct. For
+ most targets it also means that continuous integration is set up;
+* “Callstack” means that the assembly code has been written with due care to support unwinding the
+ stack and displaying the call frames (i.e. `gdb backtrace` works as expected).
+
+<table>
+<tr>
+<th rowspan="1" colspan="2">Target</th>
+<th colspan="3">Support</th>
+</tr>
+<tr>
+<th rowspan="2">Architecture</th>
+<th rowspan="2">OS</th>
+<th>Available</th>
+<th>Tested</th>
+<th>Callstack</th>
+</tr>
+<tr>
+<th colspan="3">Notes</th>
+</tr>
+
+<tr>
+<td rowspan="6">x86_64</td>
+<td rowspan="2">apple-ios</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Target has been tested locally.
+
+</td>
+</tr>
+<tr>
+<td rowspan="2">windows</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes, but disabled</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Stacks allocated the usual way are not valid to be used on Windows and the functions to allocate a
+stack in a proper way is a Windows implementation detail. As a (unnecessarily slow and inflexible)
+alternative use [Fibers][fibers].
+
+</td>
+</tr>
+<tr>
+<td rowspan="2">&#42;</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td></td>
+</tr>
+
+<tr>
+<td rowspan="8">i686<br>i586<br>i386</td>
+<td rowspan="2">apple-ios</td>
+<td>Yes</td>
+<td>Unknown</td>
+<td>Unknown</td>
+</tr>
+<tr>
+<td colspan="3">
+</td>
+</tr>
+<tr>
+<td rowspan="2">linux-android</td>
+<td>Unknown</td>
+<td>Unknown</td>
+<td>Unknown</td>
+</tr>
+<tr>
+<td colspan="3">
+
+The assembly files are available, but the target hasn’t been verified to build
+
+</td>
+</tr>
+<tr>
+<td rowspan="2">windows</td>
+<td>No</td>
+<td>No</td>
+<td>No</td>
+</tr>
+<tr>
+<td colspan="3">
+
+The code technically works on my local machine, but exception handling does not correctly work on
+appveyor, which makes me unwilling to mark this as working yet.
+
+Stacks allocated the usual way are not valid to be used on Windows and the functions to allocate a
+stack in a proper way is a Windows implementation detail. As a (unnecessarily slow and inflexible)
+alternative use [Fibers][fibers].
+
+</td>
+</tr>
+<tr>
+<td rowspan="2">&#42;</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+</td>
+</tr>
+
+<tr>
+<td rowspan="8">aarch64</td>
+<td rowspan="2">apple-ios</td>
+<td>Yes</td>
+<td>Unknown</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+
+aarch64-apple-ios has not been tested. iOS hardware is necessary to run these tests.
+
+</td>
+</tr>
+<tr>
+<td rowspan="2">fuchsia<br>unknown-cloudabi</td>
+<td>Unknown</td>
+<td>Unknown</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+</td>
+</tr>
+<tr>
+<td rowspan="2">windows</td>
+<td>No</td>
+<td>No</td>
+<td>No</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Stacks allocated the usual way are not valid to be used on Windows and the functions to allocate a
+stack in a proper way is a Windows implementation detail. As a (unnecessarily slow and inflexible)
+alternative use [Fibers][fibers].
+
+</td>
+</tr>
+<tr>
+<td rowspan="2">&#42;</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+</td>
+</tr>
+
+
+<tr>
+<td rowspan="6">arm<br>armv7</td>
+<td rowspan="2">apple-ios</td>
+<td>Yes</td>
+<td>Unknown</td>
+<td>Unknown</td>
+</tr>
+<tr>
+<td colspan="3">
+
+armv7-apple-ios has not been tested. iOS hardware is necessary to run these tests.
+
+</td>
+</tr>
+<tr>
+<td rowspan="2">windows</td>
+<td>No</td>
+<td>No</td>
+<td>No</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Stacks allocated the usual way are not valid to be used on Windows and the functions to allocate a
+stack in a proper way is a Windows implementation detail. As a (unnecessarily slow and inflexible)
+alternative use [Fibers][fibers].
+
+</td>
+</tr>
+<tr>
+<td rowspan="2">&#42;</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">armv5te</td>
+<td rowspan="2">&#42;</td>
+<td>Unknown</td>
+<td>Unknown</td>
+<td>Unknown</td>
+</tr>
+<tr>
+<td colspan="3">
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">thumbv6<br>thumbv7</td>
+<td rowspan="2">&#42;</td>
+<td>Unknown</td>
+<td>Unknown</td>
+<td>Unknown</td>
+</tr>
+<tr>
+<td colspan="3">
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">mips<br>mipsel</td>
+<td rowspan="2">linux</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Only the o32 ABI is supported and will be used for all 32-bit MIPS targets.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">mips64<br>mips64el</td>
+<td rowspan="2">linux</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">powerpc</td>
+<td rowspan="2">linux</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Callstack generation may fail at certain well defined ranges of the program, although the usual
+compiler-generated code fails at similar points itself.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">powerpc64</td>
+<td rowspan="2">linux</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Callstack generation may fail at certain well defined ranges of the program, although the usual
+compiler-generated code fails at similar points itself.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">powerpc64le</td>
+<td rowspan="2">linux</td>
+<td>Yes</td>
+<td>Yes</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Callstack generation may fail at certain well defined ranges of the program, although the usual
+compiler-generated code fails at similar points itself.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">s390x</td>
+<td rowspan="2">linux</td>
+<td>Yes</td>
+<td>Locally</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Test runner on CI hangs, local verification has been done on a qemu-system-s390x VM. It may be
+possible to add CI testing in the future via qemu’s full-system emulation.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">sparc</td>
+<td rowspan="2">linux</td>
+<td>Unknown</td>
+<td>Unknown</td>
+<td>Unknown</td>
+</tr>
+<tr>
+<td colspan="3">
+
+A Rust target for 32-bit SPARC exists, but no Linux distributions actually have a 32-bit SPARC
+distribution, so verification is infeasible.
+
+The actual assembly code has been written conservatively, modelled after the 64-bit SPARC code.
+and so has a non-zero chance of working.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">sparc64</td>
+<td rowspan="2">linux</td>
+<td>Yes</td>
+<td>Locally</td>
+<td>Yes</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Has been manually verified to work on the [GCC Farm Project] machines. It may be possible to
+add CI testing in the future via qemu’s full-system emulation.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">sparc9</td>
+<td rowspan="2">solaris</td>
+<td>Yes</td>
+<td>Unknown</td>
+<td>Unknown</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Uses the same assembly as the sparc64-linux-gnu target. This target has no rustc builds and
+therefore the correct operation of this target could not be verified at the moment.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">wasm</td>
+<td rowspan="2">&#42;</td>
+<td>No</td>
+<td>No</td>
+<td>No</td>
+</tr>
+<tr>
+<td colspan="3">
+
+This library is not applicable to the target. WASM hasn’t a specified C ABI, the callstack is
+not even in an address space and does not appear to be manipulatable.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">asmjs</td>
+<td rowspan="2">&#42;</td>
+<td>No</td>
+<td>No</td>
+<td>No</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Feasibility/necessity hasn’t been acertained.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">nvptx</td>
+<td rowspan="2">&#42;</td>
+<td>No</td>
+<td>No</td>
+<td>No</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Feasibility/necessity hasn’t been acertained.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">msp430</td>
+<td rowspan="2">&#42;</td>
+<td>No</td>
+<td>No</td>
+<td>No</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Haven’t gotten to it yet...
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">riscv32</td>
+<td rowspan="2">&#42;</td>
+<td>Yes</td>
+<td>No</td>
+<td>Unknown</td>
+</tr>
+<tr>
+<td colspan="3">
+
+Although the assembly code has not been tested, it is a straightforward copy of the 64-bit version.
+Unless there is a non-obvious mistake, this should work fine.
+
+</td>
+</tr>
+
+<tr>
+<td rowspan="2">riscv64</td>
+<td rowspan="2">&#42;</td>
+<td>Yes</td>
+<td>Locally</td>
+<td>Unknown</td>
+</tr>
+<tr>
+<td colspan="3">
+
+The assembly code for riscv64 has been tested locally with a C caller.
+
+</td>
+</tr>
+</table>
+
+[GCC Farm Project]: https://cfarm.tetaneutral.net/
+[fibers]: https://docs.microsoft.com/en-gb/windows/desktop/ProcThread/fibers
+
+# License
+
+PSM is licensed under either of
+
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
+ http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or
+ http://opensource.org/licenses/MIT)
+
+at your option.
diff --git a/vendor/psm/build.rs b/vendor/psm/build.rs
new file mode 100644
index 000000000..01a13bf71
--- /dev/null
+++ b/vendor/psm/build.rs
@@ -0,0 +1,107 @@
+extern crate cc;
+
+fn find_assembly(
+ arch: &str,
+ endian: &str,
+ os: &str,
+ env: &str,
+ masm: bool,
+) -> Option<(&'static str, bool)> {
+ match (arch, endian, os, env) {
+ // The implementations for stack switching exist, but, officially, doing so without Fibers
+ // is not supported in Windows. For x86_64 the implementation actually works locally,
+ // but failed tests in CI (???). Might want to have a feature for experimental support
+ // here.
+ ("x86", _, "windows", "msvc") => {
+ if masm {
+ Some(("src/arch/x86_msvc.asm", false))
+ } else {
+ Some(("src/arch/x86_windows_gnu.s", false))
+ }
+ }
+ ("x86_64", _, "windows", "msvc") => {
+ if masm {
+ Some(("src/arch/x86_64_msvc.asm", false))
+ } else {
+ Some(("src/arch/x86_64_windows_gnu.s", false))
+ }
+ }
+ ("arm", _, "windows", "msvc") => Some(("src/arch/arm_armasm.asm", false)),
+ ("aarch64", _, "windows", "msvc") => {
+ if masm {
+ Some(("src/arch/aarch64_armasm.asm", false))
+ } else {
+ Some(("src/arch/aarch_aapcs64.s", false))
+ }
+ }
+ ("x86", _, "windows", _) => Some(("src/arch/x86_windows_gnu.s", false)),
+ ("x86_64", _, "windows", _) => Some(("src/arch/x86_64_windows_gnu.s", false)),
+ ("x86", _, _, _) => Some(("src/arch/x86.s", true)),
+ ("x86_64", _, _, _) => Some(("src/arch/x86_64.s", true)),
+ ("arm", _, _, _) => Some(("src/arch/arm_aapcs.s", true)),
+ ("aarch64", _, _, _) => Some(("src/arch/aarch_aapcs64.s", true)),
+ ("powerpc", _, _, _) => Some(("src/arch/powerpc32.s", true)),
+ ("powerpc64", "little", _, _) => Some(("src/arch/powerpc64_openpower.s", true)),
+ ("powerpc64", _, _, _) => Some(("src/arch/powerpc64.s", true)),
+ ("s390x", _, _, _) => Some(("src/arch/zseries_linux.s", true)),
+ ("mips", _, _, _) => Some(("src/arch/mips_eabi.s", true)),
+ ("mips64", _, _, _) => Some(("src/arch/mips64_eabi.s", true)),
+ ("sparc64", _, _, _) => Some(("src/arch/sparc64.s", true)),
+ ("sparc", _, _, _) => Some(("src/arch/sparc_sysv.s", true)),
+ ("riscv32", _, _, _) => Some(("src/arch/riscv.s", true)),
+ ("riscv64", _, _, _) => Some(("src/arch/riscv64.s", true)),
+ ("wasm32", _, _, _) => Some(("src/arch/wasm32.o", true)),
+ _ => None,
+ }
+}
+
+fn main() {
+ let arch = ::std::env::var("CARGO_CFG_TARGET_ARCH").unwrap();
+ let env = ::std::env::var("CARGO_CFG_TARGET_ENV").unwrap();
+ let os = ::std::env::var("CARGO_CFG_TARGET_OS").unwrap();
+ let endian = ::std::env::var("CARGO_CFG_TARGET_ENDIAN").unwrap();
+
+ // We are only assembling a single file and any flags in the environment probably
+ // don't apply in this case, so we don't want to use them. Unfortunately, cc
+ // doesn't provide a way to clear/ignore flags set from the environment, so
+ // we manually remove them instead
+ for key in
+ std::env::vars().filter_map(|(k, _)| if k.contains("CFLAGS") { Some(k) } else { None })
+ {
+ std::env::remove_var(key);
+ }
+
+ let mut cfg = cc::Build::new();
+ let msvc = cfg.get_compiler().is_like_msvc();
+ let asm = if let Some((asm, canswitch)) = find_assembly(&arch, &endian, &os, &env, msvc) {
+ println!("cargo:rustc-cfg=asm");
+ if canswitch {
+ println!("cargo:rustc-cfg=switchable_stack")
+ }
+ asm
+ } else {
+ println!(
+ "cargo:warning=Target {}-{}-{} has no assembly files!",
+ arch, os, env
+ );
+ return;
+ };
+
+ if !msvc {
+ cfg.flag("-xassembler-with-cpp");
+ cfg.define(&*format!("CFG_TARGET_OS_{}", os), None);
+ cfg.define(&*format!("CFG_TARGET_ARCH_{}", arch), None);
+ cfg.define(&*format!("CFG_TARGET_ENV_{}", env), None);
+ }
+
+ // For wasm targets we ship a precompiled `*.o` file so we just pass that
+ // directly to `ar` to assemble an archive. Otherwise we're actually
+ // compiling the source assembly file.
+ if asm.ends_with(".o") {
+ cfg.object(asm);
+ } else {
+ cfg.file(asm);
+ }
+
+ cfg.compile("libpsm_s.a");
+}
diff --git a/vendor/psm/examples/info.rs b/vendor/psm/examples/info.rs
new file mode 100644
index 000000000..46a619b68
--- /dev/null
+++ b/vendor/psm/examples/info.rs
@@ -0,0 +1,20 @@
+extern crate psm;
+
+psm::psm_stack_information! {
+ yes {
+ fn main() {
+ println!("Stack is {:?} and is at {:p} currently",
+ psm::StackDirection::new(), psm::stack_pointer());
+ }
+ }
+ no {
+ fn main() {
+ eprintln!("Stack information not supported by this target");
+ }
+ }
+}
+
+#[test]
+fn run_example() {
+ main();
+}
diff --git a/vendor/psm/examples/on_stack_fibo.rs b/vendor/psm/examples/on_stack_fibo.rs
new file mode 100644
index 000000000..07e0338da
--- /dev/null
+++ b/vendor/psm/examples/on_stack_fibo.rs
@@ -0,0 +1,77 @@
+extern crate psm;
+
+psm::psm_stack_manipulation! {
+ yes {
+ use std::alloc;
+
+ #[inline(never)]
+ fn fib(n: usize, stack_limit: *mut u8) -> Option<u64> {
+ // match psm::StackDirection::new() {
+ // psm::StackDirection::Ascending => if psm::stack_pointer() > stack_limit {
+ // return None;
+ // }
+ // psm::StackDirection::Descending => if psm::stack_pointer() < stack_limit {
+ // return None;
+ // }
+ // }
+
+ match n {
+ 0 => Some(0),
+ 1 => Some(1),
+ _ => fib(n - 1, stack_limit).and_then(|x| fib(n - 2, stack_limit).map(|y| x + y)),
+ }
+ }
+
+ const STACK_ALIGN: usize = 4096;
+ const STACK_REDLINE: usize = 512;
+ const FIB_COUNTS: [(usize, u64); 3] = [
+ (8, 21),
+ (16, 987),
+ (32, 2178309),
+ ];
+
+
+ fn main() {
+ let mut stack_size = 1024 * 128;
+ unsafe {
+ for &(n, expected) in FIB_COUNTS.iter() {
+ loop {
+ println!("fib({}) with {} bytes of stack", n, stack_size - STACK_REDLINE);
+ let layout = alloc::Layout::from_size_align(stack_size, STACK_ALIGN).unwrap();
+ let new_stack = alloc::alloc(layout);
+ assert!(!new_stack.is_null(), "allocations must succeed!");
+ let max_stack = match psm::StackDirection::new() {
+ psm::StackDirection::Ascending =>
+ new_stack.offset((stack_size - STACK_REDLINE) as isize),
+ psm::StackDirection::Descending =>
+ new_stack.offset(STACK_REDLINE as isize),
+ };
+ let result = psm::on_stack(new_stack, stack_size, || {
+ fib(n, max_stack)
+ });
+ alloc::dealloc(new_stack, layout);
+ if let Some(res) = result {
+ assert_eq!(res, expected);
+ println!("fib({}) = {}", n, res);
+ break;
+ } else {
+ println!("Stack not large enough!");
+ stack_size *= 2;
+ }
+ }
+ }
+ }
+ }
+
+ }
+ no {
+ fn main() {
+ eprintln!("Stack manipulation not supported by this target");
+ }
+ }
+}
+
+#[test]
+fn run_example() {
+ main()
+}
diff --git a/vendor/psm/examples/on_stack_fibo_alloc_each_frame.rs b/vendor/psm/examples/on_stack_fibo_alloc_each_frame.rs
new file mode 100644
index 000000000..9b9f834b3
--- /dev/null
+++ b/vendor/psm/examples/on_stack_fibo_alloc_each_frame.rs
@@ -0,0 +1,53 @@
+extern crate psm;
+
+psm::psm_stack_manipulation! {
+ yes {
+ use std::alloc;
+
+ const STACK_ALIGN: usize = 4096;
+ const FRAME_SIZE: usize = 4096;
+ const FIB_COUNTS: [(usize, u64); 3] = [
+ (8, 21),
+ (16, 987),
+ (24, 46368),
+ ];
+
+ #[inline(never)]
+ fn fib(n: usize) -> u64 {
+ unsafe {
+ let layout = alloc::Layout::from_size_align(FRAME_SIZE, STACK_ALIGN).unwrap();
+ let new_stack = alloc::alloc(layout);
+ assert!(!new_stack.is_null(), "allocations must succeed!");
+ let r = match n {
+ 0 => 0,
+ 1 => 1,
+ _ => {
+ psm::on_stack(new_stack, FRAME_SIZE, || {
+ fib(n - 1) + fib(n - 2)
+ })
+ }
+ };
+ alloc::dealloc(new_stack, layout);
+ r
+ }
+ }
+
+ fn main() {
+ for &(n, expected) in FIB_COUNTS.iter() {
+ let res = fib(n);
+ assert_eq!(res, expected);
+ println!("fib({}) = {}", n, res);
+ }
+ }
+ }
+ no {
+ fn main() {
+ eprintln!("Stack manipulation not supported by this target");
+ }
+ }
+}
+
+#[test]
+fn run_example() {
+ main()
+}
diff --git a/vendor/psm/examples/panics.rs b/vendor/psm/examples/panics.rs
new file mode 100644
index 000000000..ada658d80
--- /dev/null
+++ b/vendor/psm/examples/panics.rs
@@ -0,0 +1,52 @@
+extern crate psm;
+
+use std::panic;
+
+const CHAIN_DEPTH: usize = 16;
+
+psm::psm_stack_manipulation! {
+ yes {
+ use std::alloc;
+ const STACK_ALIGN: usize = 4096;
+ // Generating backraces (because of RUST_BACKTRACE) create a few quite large frames, so it is
+ // important, that all frames have sufficient amount of available memory to not run over the
+ // stack...
+ const FRAME_SIZE: usize = 4096 * 10;
+
+ fn panic_chain(depth: usize) {
+ if depth == 0 {
+ panic!("full chain!");
+ } else {
+ unsafe {
+ let layout = alloc::Layout::from_size_align(FRAME_SIZE, STACK_ALIGN).unwrap();
+ let new_stack = alloc::alloc(layout);
+ assert!(!new_stack.is_null(), "allocations must succeed!");
+ let p = psm::on_stack(new_stack, FRAME_SIZE, || {
+ panic::catch_unwind(|| {
+ panic_chain(depth - 1);
+ })
+ });
+ alloc::dealloc(new_stack, layout);
+ p.map_err(panic::resume_unwind).unwrap()
+ }
+ }
+ }
+
+ fn main() {
+ panic_chain(CHAIN_DEPTH);
+ }
+
+ #[test]
+ fn run_example() {
+ assert!(panic::catch_unwind(|| {
+ panic_chain(CHAIN_DEPTH);
+ }).is_err(), "Panic did not propagate!");
+ }
+ }
+
+ no {
+ fn main() {
+ eprintln!("Stack manipulation not supported by this target");
+ }
+ }
+}
diff --git a/vendor/psm/examples/replace_stack_1.rs b/vendor/psm/examples/replace_stack_1.rs
new file mode 100644
index 000000000..af9e4b92c
--- /dev/null
+++ b/vendor/psm/examples/replace_stack_1.rs
@@ -0,0 +1,33 @@
+extern crate psm;
+
+psm::psm_stack_manipulation! {
+ yes {
+ use std::alloc;
+
+ const STACK_SIZE: usize = 4096 * 64;
+ const STACK_ALIGN: usize = 4096;
+
+ fn main() {
+ println!("current stack pointer is {:p}", psm::stack_pointer());
+ unsafe {
+ let new_stack = alloc::alloc(alloc::Layout::from_size_align(STACK_SIZE, STACK_ALIGN).unwrap());
+ println!("new stack is between {:p} and {:p}", new_stack, new_stack.offset(STACK_SIZE as isize));
+ psm::replace_stack(new_stack, STACK_SIZE, || {
+ println!("after replacement stack pointer is {:p}", psm::stack_pointer());
+ ::std::process::exit(0);
+ });
+ }
+ }
+ }
+ no {
+ fn main() {
+ eprintln!("Stack manipulation not supported by this target");
+ }
+ }
+}
+
+#[test]
+fn run_example() {
+ // NOTE: intentionally out-of-processes, as the example exits with `process::exit(0)`.
+ main()
+}
diff --git a/vendor/psm/examples/thread.rs b/vendor/psm/examples/thread.rs
new file mode 100644
index 000000000..eb335a5a4
--- /dev/null
+++ b/vendor/psm/examples/thread.rs
@@ -0,0 +1,60 @@
+extern crate psm;
+
+psm::psm_stack_manipulation! {
+ yes {
+ use std::alloc;
+
+ const STACK_ALIGN: usize = 4096;
+ const FRAME_SIZE: usize = 4096;
+ const FIB_COUNTS: [(usize, u64); 3] = [
+ (8, 21),
+ (16, 987),
+ (24, 46368),
+ ];
+
+ #[inline(never)]
+ fn fib(n: usize) -> u64 {
+ unsafe {
+ let layout = alloc::Layout::from_size_align(FRAME_SIZE, STACK_ALIGN).unwrap();
+ let new_stack = alloc::alloc(layout);
+ assert!(!new_stack.is_null(), "allocations must succeed!");
+ let r = match n {
+ 0 => 0,
+ 1 => 1,
+ _ => {
+ psm::on_stack(new_stack, FRAME_SIZE, || {
+ fib(n - 1) + fib(n - 2)
+ })
+ }
+ };
+ alloc::dealloc(new_stack, layout);
+ r
+ }
+ }
+
+ fn main() {
+ for (n, expected, handle) in FIB_COUNTS.iter().map(|&(n, expected)|
+ (n, expected, std::thread::spawn(move || {
+ fib(n)
+ }))
+ ) {
+ if let Ok(res) = handle.join() {
+ assert_eq!(res, expected);
+ println!("fib({}) = {}", n, res);
+ } else {
+ panic!("joining a thread returned an Err");
+ }
+ }
+ }
+ }
+ no {
+ fn main() {
+ eprintln!("Stack manipulation not supported by this target");
+ }
+ }
+}
+
+#[test]
+fn run_example() {
+ main()
+}
diff --git a/vendor/psm/src/arch/aarch64_armasm.asm b/vendor/psm/src/arch/aarch64_armasm.asm
new file mode 100644
index 000000000..95349f9cc
--- /dev/null
+++ b/vendor/psm/src/arch/aarch64_armasm.asm
@@ -0,0 +1,38 @@
+ AREA |.text|, CODE, READONLY
+
+ GLOBAL |rust_psm_stack_direction|
+ ALIGN 4
+|rust_psm_stack_direction| PROC
+ orr w0, wzr, #2
+ ret
+ ENDP
+
+
+ GLOBAL |rust_psm_stack_pointer|
+ ALIGN 4
+|rust_psm_stack_pointer| PROC
+ mov x0, sp
+ ret
+ ENDP
+
+
+ GLOBAL |rust_psm_replace_stack|
+ ALIGN 4
+|rust_psm_replace_stack| PROC
+ mov sp, x2
+ br x1
+ ENDP
+
+ GLOBAL |rust_psm_on_stack|
+ ALIGN 4
+|rust_psm_on_stack| PROC
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ mov sp, x3
+ blr x2
+ mov sp, x29
+ ldp x29, x30, [sp], #16
+ ret
+ ENDP
+
+ END
diff --git a/vendor/psm/src/arch/aarch_aapcs64.s b/vendor/psm/src/arch/aarch_aapcs64.s
new file mode 100644
index 000000000..55917f329
--- /dev/null
+++ b/vendor/psm/src/arch/aarch_aapcs64.s
@@ -0,0 +1,85 @@
+#include "psm.h"
+
+.text
+
+#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
+
+#define GLOBL(fnname) .globl _##fnname
+#define TYPE(fnname)
+#define FUNCTION(fnname) _##fnname
+#define SIZE(fnname,endlabel)
+
+#else
+
+#define GLOBL(fnname) .globl fnname
+#define TYPE(fnname) .type fnname,@function
+#define FUNCTION(fnname) fnname
+#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
+
+#endif
+
+
+GLOBL(rust_psm_stack_direction)
+.p2align 2
+TYPE(rust_psm_stack_direction)
+FUNCTION(rust_psm_stack_direction):
+/* extern "C" fn() -> u8 */
+.cfi_startproc
+ orr w0, wzr, #STACK_DIRECTION_DESCENDING
+ ret
+.rust_psm_stack_direction_end:
+SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
+.cfi_endproc
+
+
+GLOBL(rust_psm_stack_pointer)
+.p2align 2
+TYPE(rust_psm_stack_pointer)
+FUNCTION(rust_psm_stack_pointer):
+/* extern "C" fn() -> *mut u8 */
+.cfi_startproc
+ mov x0, sp
+ ret
+.rust_psm_stack_pointer_end:
+SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
+.cfi_endproc
+
+
+GLOBL(rust_psm_replace_stack)
+.p2align 2
+TYPE(rust_psm_replace_stack)
+FUNCTION(rust_psm_replace_stack):
+/* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */
+.cfi_startproc
+/* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */
+ mov sp, x2
+ br x1
+.rust_psm_replace_stack_end:
+SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
+.cfi_endproc
+
+
+GLOBL(rust_psm_on_stack)
+.p2align 2
+TYPE(rust_psm_on_stack)
+FUNCTION(rust_psm_on_stack):
+/* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */
+.cfi_startproc
+ stp x29, x30, [sp, #-16]!
+ .cfi_def_cfa sp, 16
+ mov x29, sp
+ .cfi_def_cfa x29, 16
+ .cfi_offset x29, -16
+ .cfi_offset x30, -8
+ mov sp, x3
+ blr x2
+ mov sp, x29
+ .cfi_def_cfa sp, 16
+ ldp x29, x30, [sp], #16
+ .cfi_def_cfa sp, 0
+ .cfi_restore x29
+ .cfi_restore x30
+ ret
+.rust_psm_on_stack_end:
+SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
+.cfi_endproc
diff --git a/vendor/psm/src/arch/arm_aapcs.s b/vendor/psm/src/arch/arm_aapcs.s
new file mode 100644
index 000000000..c2fa9d81d
--- /dev/null
+++ b/vendor/psm/src/arch/arm_aapcs.s
@@ -0,0 +1,106 @@
+#include "psm.h"
+
+.text
+.syntax unified
+
+#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
+
+#define GLOBL(fnname) .globl _##fnname
+#define THUMBTYPE(fnname) .thumb_func _##fnname
+#define FUNCTION(fnname) _##fnname
+#define THUMBFN .code 16
+#define SIZE(fnname,endlabel)
+#define FNSTART
+#define CANTUNWIND
+#define FNEND
+
+#else
+
+#define GLOBL(fnname) .globl fnname
+#define THUMBTYPE(fnname) .type fnname,%function
+#define FUNCTION(fnname) fnname
+#define THUMBFN .code 16
+#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
+#define FNSTART .fnstart
+#define CANTUNWIND .cantunwind
+#define FNEND .fnend
+
+#endif
+
+
+GLOBL(rust_psm_stack_direction)
+.p2align 2
+THUMBTYPE(rust_psm_stack_direction)
+THUMBFN
+FUNCTION(rust_psm_stack_direction):
+/* extern "C" fn() -> u8 */
+FNSTART
+.cfi_startproc
+ /* movs to support Thumb-1 */
+ movs r0, #STACK_DIRECTION_DESCENDING
+ bx lr
+.rust_psm_stack_direction_end:
+SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
+.cfi_endproc
+CANTUNWIND
+FNEND
+
+GLOBL(rust_psm_stack_pointer)
+.p2align 2
+THUMBTYPE(rust_psm_stack_pointer)
+THUMBFN
+FUNCTION(rust_psm_stack_pointer):
+/* extern "C" fn() -> *mut u8 */
+FNSTART
+.cfi_startproc
+ mov r0, sp
+ bx lr
+.rust_psm_stack_pointer_end:
+SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
+.cfi_endproc
+CANTUNWIND
+FNEND
+
+
+GLOBL(rust_psm_replace_stack)
+.p2align 2
+THUMBTYPE(rust_psm_replace_stack)
+THUMBFN
+FUNCTION(rust_psm_replace_stack):
+/* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */
+FNSTART
+.cfi_startproc
+/* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */
+ mov sp, r2
+ bx r1
+.rust_psm_replace_stack_end:
+SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
+.cfi_endproc
+CANTUNWIND
+FNEND
+
+
+GLOBL(rust_psm_on_stack)
+.p2align 2
+THUMBTYPE(rust_psm_on_stack)
+THUMBFN
+FUNCTION(rust_psm_on_stack):
+/* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */
+FNSTART
+.cfi_startproc
+ push {r4, lr}
+ .cfi_def_cfa_offset 8
+ mov r4, sp
+ .cfi_def_cfa_register r4
+ .cfi_offset lr, -4
+ .cfi_offset r4, -8
+ mov sp, r3
+ blx r2
+ mov sp, r4
+ .cfi_restore sp
+ pop {r4, pc}
+.rust_psm_on_stack_end:
+SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
+.cfi_endproc
+CANTUNWIND
+FNEND
diff --git a/vendor/psm/src/arch/arm_armasm.asm b/vendor/psm/src/arch/arm_armasm.asm
new file mode 100644
index 000000000..ab8a5bcf0
--- /dev/null
+++ b/vendor/psm/src/arch/arm_armasm.asm
@@ -0,0 +1,39 @@
+ THUMB
+ AREA |.text|, CODE, READONLY
+
+
+ GLOBAL |rust_psm_stack_direction|
+ ALIGN 4
+|rust_psm_stack_direction| PROC
+ movs r0, #2
+ bx lr
+ ENDP
+
+ GLOBAL |rust_psm_stack_pointer|
+ ALIGN 4
+|rust_psm_stack_pointer| PROC
+ mov r0, sp
+ bx lr
+ ENDP
+
+
+ GLOBAL |rust_psm_replace_stack|
+ ALIGN 4
+|rust_psm_replace_stack| PROC
+ mov sp, r2
+ bx r1
+ ENDP
+
+
+ GLOBAL |rust_psm_on_stack|
+ ALIGN 4
+|rust_psm_on_stack| PROC
+ push {r4, lr}
+ mov r4, sp
+ mov sp, r3
+ blx r2
+ mov sp, r4
+ pop {r4, pc}
+ ENDP
+
+ END
diff --git a/vendor/psm/src/arch/mips64_eabi.s b/vendor/psm/src/arch/mips64_eabi.s
new file mode 100644
index 000000000..72bc01eb7
--- /dev/null
+++ b/vendor/psm/src/arch/mips64_eabi.s
@@ -0,0 +1,87 @@
+/*
+Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is
+used where.
+
+This is an "EABI" implementation based on the following page:
+
+http://www.cygwin.com/ml/binutils/2003-06/msg00436.html
+*/
+
+#include "psm.h"
+
+.set noreorder /* we’ll manage the delay slots on our own, thanks! */
+
+.text
+.globl rust_psm_stack_direction
+.p2align 3
+.type rust_psm_stack_direction,@function
+.ent rust_psm_stack_direction
+/* extern "C" fn() -> u8 */
+rust_psm_stack_direction:
+.cfi_startproc
+ jr $31
+ addiu $2, $zero, STACK_DIRECTION_DESCENDING
+.end rust_psm_stack_direction
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 3
+.type rust_psm_stack_pointer,@function
+.ent rust_psm_stack_pointer
+/* extern "C" fn() -> *mut u8 */
+rust_psm_stack_pointer:
+.cfi_startproc
+ jr $31
+ move $2, $29
+.end rust_psm_stack_pointer
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+
+.globl rust_psm_replace_stack
+.p2align 3
+.type rust_psm_replace_stack,@function
+.ent rust_psm_replace_stack
+/* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */
+rust_psm_replace_stack:
+.cfi_startproc
+ move $25, $5
+ jr $5
+ move $29, $6
+.end rust_psm_replace_stack
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
+
+
+.globl rust_psm_on_stack
+.p2align 3
+.type rust_psm_on_stack,@function
+.ent rust_psm_on_stack
+/* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */
+rust_psm_on_stack:
+.cfi_startproc
+ sd $29, -8($7)
+ sd $31, -16($7)
+ .cfi_def_cfa 7, 0
+ .cfi_offset 31, -16
+ .cfi_offset 29, -8
+ move $25, $6
+ jalr $31, $6
+ daddiu $29, $7, -16
+ .cfi_def_cfa 29, 16
+ ld $31, 0($29)
+ .cfi_restore 31
+ ld $29, 8($29)
+ .cfi_restore 29
+ jr $31
+ nop
+.end rust_psm_on_stack
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/arch/mips_eabi.s b/vendor/psm/src/arch/mips_eabi.s
new file mode 100644
index 000000000..e08ed278a
--- /dev/null
+++ b/vendor/psm/src/arch/mips_eabi.s
@@ -0,0 +1,88 @@
+/*
+Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is
+used where.
+
+This is an "EABI" implementation based on the following page:
+
+http://www.cygwin.com/ml/binutils/2003-06/msg00436.html
+*/
+
+#include "psm.h"
+
+.set noreorder /* we’ll manage the delay slots on our own, thanks! */
+
+.text
+.abicalls
+.globl rust_psm_stack_direction
+.p2align 2
+.type rust_psm_stack_direction,@function
+.ent rust_psm_stack_direction
+/* extern "C" fn() -> u8 */
+rust_psm_stack_direction:
+.cfi_startproc
+ jr $31
+ addiu $2, $zero, STACK_DIRECTION_DESCENDING
+.end rust_psm_stack_direction
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 2
+.type rust_psm_stack_pointer,@function
+.ent rust_psm_stack_pointer
+/* extern "C" fn() -> *mut u8 */
+rust_psm_stack_pointer:
+.cfi_startproc
+ jr $31
+ move $2, $29
+.end rust_psm_stack_pointer
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+
+.globl rust_psm_replace_stack
+.p2align 2
+.type rust_psm_replace_stack,@function
+.ent rust_psm_replace_stack
+/* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */
+rust_psm_replace_stack:
+.cfi_startproc
+ move $25, $5
+ jr $5
+ move $29, $6
+.end rust_psm_replace_stack
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
+
+
+.globl rust_psm_on_stack
+.p2align 2
+.type rust_psm_on_stack,@function
+.ent rust_psm_on_stack
+/* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */
+rust_psm_on_stack:
+.cfi_startproc
+ sw $29, -4($7)
+ sw $31, -8($7)
+ .cfi_def_cfa 7, 0
+ .cfi_offset 31, -8
+ .cfi_offset 29, -4
+ move $25, $6
+ jalr $31, $6
+ addiu $29, $7, -8
+ .cfi_def_cfa 29, 8
+ lw $31, 0($29)
+ .cfi_restore 31
+ lw $29, 4($29)
+ .cfi_restore 29
+ jr $31
+ nop
+.end rust_psm_on_stack
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/arch/powerpc32.s b/vendor/psm/src/arch/powerpc32.s
new file mode 100644
index 000000000..1f7a08619
--- /dev/null
+++ b/vendor/psm/src/arch/powerpc32.s
@@ -0,0 +1,76 @@
+#include "psm.h"
+/* FIXME: this probably does not cover all ABIs? Tested with sysv only, possibly works for AIX as
+ well?
+*/
+
+.text
+.globl rust_psm_stack_direction
+.p2align 2
+.type rust_psm_stack_direction,@function
+rust_psm_stack_direction:
+/* extern "C" fn() -> u8 */
+.cfi_startproc
+ li 3, STACK_DIRECTION_DESCENDING
+ blr
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 2
+.type rust_psm_stack_pointer,@function
+rust_psm_stack_pointer:
+/* extern "C" fn() -> *mut u8 */
+.cfi_startproc
+ mr 3, 1
+ blr
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+.globl rust_psm_replace_stack
+.p2align 2
+.type rust_psm_replace_stack,@function
+rust_psm_replace_stack:
+/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
+.cfi_startproc
+/* NOTE: perhaps add a debug-assertion for stack alignment? */
+ addi 5, 5, -16
+ mr 1, 5
+ mtctr 4
+ bctr
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
+.cfi_endproc
+
+
+.globl rust_psm_on_stack
+.p2align 2
+.type rust_psm_on_stack,@function
+rust_psm_on_stack:
+/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
+.cfi_startproc
+ mflr 0
+ stw 0, -24(6)
+ sub 6, 6, 1
+ addi 6, 6, -32
+ stwux 1, 1, 6
+ .cfi_def_cfa r1, 32
+ .cfi_offset r1, -32
+ .cfi_offset lr, -24
+ mtctr 5
+ bctrl
+ lwz 0, 8(1)
+ mtlr 0
+ .cfi_restore lr
+ /* FIXME: after this instruction backtrace breaks until control returns to the caller
+ That being said compiler-generated code has the same issue, so I guess that is fine for now?
+ */
+ lwz 1, 0(1)
+ .cfi_restore r1
+ blr
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/arch/powerpc64.s b/vendor/psm/src/arch/powerpc64.s
new file mode 100644
index 000000000..1504a8c01
--- /dev/null
+++ b/vendor/psm/src/arch/powerpc64.s
@@ -0,0 +1,90 @@
+/* Implementation of the AIX-like PowerPC ABI. Seems to be used by the big-endian PowerPC targets.
+ The following references were used during the implementation of this code:
+
+ https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_rntime_stack.htm
+ https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_reg_use_conv.htm
+ https://www.ibm.com/developerworks/library/l-powasm4/index.html
+*/
+
+#include "psm.h"
+
+.text
+.globl rust_psm_stack_direction
+.p2align 2
+.type rust_psm_stack_direction,@function
+rust_psm_stack_direction:
+/* extern "C" fn() -> u8 */
+.cfi_startproc
+ li 3, STACK_DIRECTION_DESCENDING
+ blr
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 2
+.type rust_psm_stack_pointer,@function
+rust_psm_stack_pointer:
+/* extern "C" fn() -> *mut u8 */
+.cfi_startproc
+ mr 3, 1
+ blr
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+.globl rust_psm_replace_stack
+.p2align 2
+.type rust_psm_replace_stack,@function
+rust_psm_replace_stack:
+/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
+.cfi_startproc
+ ld 2, 8(4)
+ ld 4, 0(4)
+ /* do not allocate the whole 112-byte sized frame, we know wont be used */
+ addi 5, 5, -48
+ mr 1, 5
+ mtctr 4
+ bctr
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
+.cfi_endproc
+
+
+.globl rust_psm_on_stack
+.p2align 2
+.type rust_psm_on_stack,@function
+rust_psm_on_stack:
+/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
+.cfi_startproc
+ mflr 0
+ std 2, -72(6)
+ std 0, -8(6)
+ sub 6, 6, 1
+ addi 6, 6, -112
+ stdux 1, 1, 6
+ .cfi_def_cfa r1, 112
+ .cfi_offset r1, -112
+ .cfi_offset r2, -72
+ .cfi_offset lr, -8
+ /* load the function pointer from TOC and make the call */
+ ld 2, 8(5)
+ ld 5, 0(5)
+ mtctr 5
+ bctrl
+ ld 2, 40(1)
+ .cfi_restore r2
+ ld 0, 104(1)
+ mtlr 0
+ .cfi_restore lr
+ /* FIXME: after this instruction backtrace breaks until control returns to the caller.
+ That being said compiler-generated code has the same issue, so I guess that is fine for now?
+ */
+ ld 1, 0(1)
+ .cfi_restore r1
+ blr
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/arch/powerpc64_openpower.s b/vendor/psm/src/arch/powerpc64_openpower.s
new file mode 100644
index 000000000..eb3c1c174
--- /dev/null
+++ b/vendor/psm/src/arch/powerpc64_openpower.s
@@ -0,0 +1,86 @@
+/* Implementation of stack swtiching routines for OpenPOWER 64-bit ELF ABI
+ The specification can be found at
+ http://openpowerfoundation.org/wp-content/uploads/resources/leabi/content/ch_preface.html
+
+ This ABI is usually used by the ppc64le targets.
+*/
+
+#include "psm.h"
+
+.text
+.abiversion 2
+
+
+.globl rust_psm_stack_direction
+.p2align 4
+.type rust_psm_stack_direction,@function
+rust_psm_stack_direction:
+/* extern "C" fn() -> u8 */
+.cfi_startproc
+ li 3, STACK_DIRECTION_DESCENDING
+ blr
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 4
+.type rust_psm_stack_pointer,@function
+rust_psm_stack_pointer:
+/* extern "C" fn() -> *mut u8 */
+.cfi_startproc
+ mr 3, 1
+ blr
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+.globl rust_psm_replace_stack
+.p2align 4
+.type rust_psm_replace_stack,@function
+rust_psm_replace_stack:
+/* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */
+.cfi_startproc
+ addi 5, 5, -32
+ mtctr 4
+ mr 12, 4
+ mr 1, 5
+ bctr
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
+.cfi_endproc
+
+
+
+.globl rust_psm_on_stack
+.p2align 4
+.type rust_psm_on_stack,@function
+rust_psm_on_stack:
+/* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */
+.cfi_startproc
+ mflr 0
+ std 0, -8(6)
+ std 2, -24(6)
+ sub 6, 6, 1
+ addi 6, 6, -48
+ stdux 1, 1, 6
+ .cfi_def_cfa r1, 48
+ .cfi_offset r1, -48
+ .cfi_offset r2, -24
+ .cfi_offset lr, -8
+ mr 12, 5
+ mtctr 5
+ bctrl
+ ld 2, 24(1)
+ .cfi_restore r2
+ ld 0, 40(1)
+ mtlr 0
+ .cfi_restore lr
+ /* FIXME: after this instructin backtrace breaks until control returns to the caller */
+ ld 1, 0(1)
+ blr
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/arch/psm.h b/vendor/psm/src/arch/psm.h
new file mode 100644
index 000000000..c1137f984
--- /dev/null
+++ b/vendor/psm/src/arch/psm.h
@@ -0,0 +1,10 @@
+#define STACK_DIRECTION_ASCENDING 1
+#define STACK_DIRECTION_DESCENDING 2
+
+
+/*
+Various defines for values produced by `-DCFG_TARGET_*` flags. Only needs to be mutually unique
+*/
+#define darwin 1
+#define macos 2
+#define ios 3
diff --git a/vendor/psm/src/arch/riscv.s b/vendor/psm/src/arch/riscv.s
new file mode 100644
index 000000000..4972993c9
--- /dev/null
+++ b/vendor/psm/src/arch/riscv.s
@@ -0,0 +1,64 @@
+#include "psm.h"
+
+.text
+.globl rust_psm_stack_direction
+.p2align 2
+.type rust_psm_stack_direction,@function
+rust_psm_stack_direction:
+/* extern "C" fn() -> u8 */
+.cfi_startproc
+ li x10, STACK_DIRECTION_DESCENDING
+ jr x1
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 2
+.type rust_psm_stack_pointer,@function
+rust_psm_stack_pointer:
+/* extern "C" fn() -> *mut u8 */
+.cfi_startproc
+ add x10, x2, x0
+ jr x1
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+.globl rust_psm_replace_stack
+.p2align 2
+.type rust_psm_replace_stack,@function
+rust_psm_replace_stack:
+/* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */
+.cfi_startproc
+ add x2, x12, x0
+ jr x11
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
+.cfi_endproc
+
+
+.globl rust_psm_on_stack
+.p2align 2
+.type rust_psm_on_stack,@function
+rust_psm_on_stack:
+/* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */
+.cfi_startproc
+ sw x1, -12(x13)
+ sw x2, -16(x13)
+ .cfi_def_cfa x13, 0
+ .cfi_offset x1, -12
+ .cfi_offset x2, -16
+ addi x2, x13, -16
+ .cfi_def_cfa x2, -16
+ jalr x1, x12, 0
+ lw x1, 4(x2)
+ .cfi_restore x1
+ lw x2, 0(x2)
+ .cfi_restore x2
+ jr x1
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/arch/riscv64.s b/vendor/psm/src/arch/riscv64.s
new file mode 100644
index 000000000..1b275ec27
--- /dev/null
+++ b/vendor/psm/src/arch/riscv64.s
@@ -0,0 +1,64 @@
+#include "psm.h"
+
+.text
+.globl rust_psm_stack_direction
+.p2align 2
+.type rust_psm_stack_direction,@function
+rust_psm_stack_direction:
+/* extern "C" fn() -> u8 */
+.cfi_startproc
+ li x10, STACK_DIRECTION_DESCENDING
+ jr x1
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 2
+.type rust_psm_stack_pointer,@function
+rust_psm_stack_pointer:
+/* extern "C" fn() -> *mut u8 */
+.cfi_startproc
+ add x10, x2, x0
+ jr x1
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+.globl rust_psm_replace_stack
+.p2align 2
+.type rust_psm_replace_stack,@function
+rust_psm_replace_stack:
+/* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */
+.cfi_startproc
+ add x2, x12, x0
+ jr x11
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
+.cfi_endproc
+
+
+.globl rust_psm_on_stack
+.p2align 2
+.type rust_psm_on_stack,@function
+rust_psm_on_stack:
+/* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */
+.cfi_startproc
+ sd x1, -8(x13)
+ sd x2, -16(x13)
+ .cfi_def_cfa x13, 0
+ .cfi_offset x1, -8
+ .cfi_offset x2, -16
+ addi x2, x13, -16
+ .cfi_def_cfa x2, -16
+ jalr x1, x12, 0
+ ld x1, 8(x2)
+ .cfi_restore x1
+ ld x2, 0(x2)
+ .cfi_restore x2
+ jr x1
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/arch/sparc64.s b/vendor/psm/src/arch/sparc64.s
new file mode 100644
index 000000000..a0db27dde
--- /dev/null
+++ b/vendor/psm/src/arch/sparc64.s
@@ -0,0 +1,67 @@
+#include "psm.h"
+
+.text
+.globl rust_psm_stack_direction
+.p2align 2
+.type rust_psm_stack_direction,@function
+rust_psm_stack_direction:
+/* extern "C" fn() -> u8 */
+.cfi_startproc
+ jmpl %o7 + 8, %g0
+ mov STACK_DIRECTION_DESCENDING, %o0
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 2
+.type rust_psm_stack_pointer,@function
+rust_psm_stack_pointer:
+/* extern "C" fn() -> *mut u8 */
+.cfi_startproc
+ jmpl %o7 + 8, %g0
+ mov %o6, %o0
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+.globl rust_psm_replace_stack
+.p2align 2
+.type rust_psm_replace_stack,@function
+rust_psm_replace_stack:
+/* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */
+.cfi_startproc
+ .cfi_def_cfa 0, 0
+ .cfi_return_column 0
+ jmpl %o1, %g0
+ /* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */
+ add %o2, -0x7ff, %o6
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
+.cfi_endproc
+
+
+.globl rust_psm_on_stack
+.p2align 2
+.type rust_psm_on_stack,@function
+rust_psm_on_stack:
+/* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */
+.cfi_startproc
+ /* The fact that locals and saved register windows are offset by 2kB is
+ very nasty property of SPARC architecture and ABI. In this case it forces us to slice off
+ 2kB of the stack space outright for no good reason other than adapting to a botched design.
+ */
+ save %o3, -0x87f, %o6
+ .cfi_def_cfa_register %fp
+ .cfi_window_save
+ .cfi_register %r15, %r31
+ mov %i1, %o1
+ jmpl %i2, %o7
+ mov %i0, %o0
+ ret
+ restore
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/arch/sparc_sysv.s b/vendor/psm/src/arch/sparc_sysv.s
new file mode 100644
index 000000000..27d95e933
--- /dev/null
+++ b/vendor/psm/src/arch/sparc_sysv.s
@@ -0,0 +1,65 @@
+#include "psm.h"
+
+/* FIXME: this ABI has definitely not been verified at all */
+
+.text
+.globl rust_psm_stack_direction
+.p2align 2
+.type rust_psm_stack_direction,@function
+rust_psm_stack_direction:
+/* extern "C" fn() -> u8 */
+.cfi_startproc
+ jmpl %o7 + 8, %g0
+ mov STACK_DIRECTION_DESCENDING, %o0
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 2
+.type rust_psm_stack_pointer,@function
+rust_psm_stack_pointer:
+/* extern "C" fn() -> *mut u8 */
+.cfi_startproc
+ jmpl %o7 + 8, %g0
+ mov %o6, %o0
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+.globl rust_psm_replace_stack
+.p2align 2
+.type rust_psm_replace_stack,@function
+rust_psm_replace_stack:
+/* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */
+.cfi_startproc
+ .cfi_def_cfa 0, 0
+ .cfi_return_column 0
+ jmpl %o1, %g0
+ /* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */
+ add %o2, -0x3ff, %o6
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
+.cfi_endproc
+
+
+.globl rust_psm_on_stack
+.p2align 2
+.type rust_psm_on_stack,@function
+rust_psm_on_stack:
+/* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */
+.cfi_startproc
+ save %o3, -0x43f, %o6
+ .cfi_def_cfa_register %fp
+ .cfi_window_save
+ .cfi_register %r15, %r31
+ mov %i1, %o1
+ jmpl %i2, %o7
+ mov %i0, %o0
+ ret
+ restore
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/arch/wasm32.s b/vendor/psm/src/arch/wasm32.s
new file mode 100644
index 000000000..e3364e7af
--- /dev/null
+++ b/vendor/psm/src/arch/wasm32.s
@@ -0,0 +1,60 @@
+#include "psm.h"
+
+# Note that this function is not compiled when this package is uploaded to
+# crates.io, this source is only here as a reference for how the corresponding
+# wasm32.o was generated. This file can be compiled with:
+#
+# cpp psm/src/arch/wasm32.s | llvm-mc -o psm/src/arch/wasm32.o --arch=wasm32 -filetype=obj
+#
+# where you'll want to ensure that `llvm-mc` is from a relatively recent
+# version of LLVM.
+
+.globaltype __stack_pointer, i32
+
+.globl rust_psm_stack_direction
+.type rust_psm_stack_direction,@function
+rust_psm_stack_direction:
+.functype rust_psm_stack_direction () -> (i32)
+ i32.const STACK_DIRECTION_DESCENDING
+ end_function
+
+.globl rust_psm_stack_pointer
+.type rust_psm_stack_pointer,@function
+rust_psm_stack_pointer:
+.functype rust_psm_stack_pointer () -> (i32)
+ global.get __stack_pointer
+ end_function
+
+.globl rust_psm_on_stack
+.type rust_psm_on_stack,@function
+rust_psm_on_stack:
+.functype rust_psm_on_stack (i32, i32, i32, i32) -> ()
+ # get our new stack argument, then save the old stack
+ # pointer into that local
+ local.get 3
+ global.get __stack_pointer
+ local.set 3
+ global.set __stack_pointer
+
+ # Call our indirect function specified
+ local.get 0
+ local.get 1
+ local.get 2
+ call_indirect (i32, i32) -> ()
+
+ # restore the stack pointer before returning
+ local.get 3
+ global.set __stack_pointer
+ end_function
+
+.globl rust_psm_replace_stack
+.type rust_psm_replace_stack,@function
+rust_psm_replace_stack:
+.functype rust_psm_replace_stack (i32, i32, i32) -> ()
+ local.get 2
+ global.set __stack_pointer
+ local.get 0
+ local.get 1
+ call_indirect (i32) -> ()
+ unreachable
+ end_function
diff --git a/vendor/psm/src/arch/x86.s b/vendor/psm/src/arch/x86.s
new file mode 100644
index 000000000..2e388760d
--- /dev/null
+++ b/vendor/psm/src/arch/x86.s
@@ -0,0 +1,91 @@
+#include "psm.h"
+/* NOTE: fastcall calling convention used on all x86 targets */
+
+.text
+
+#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
+
+#define GLOBL(fnname) .globl _##fnname
+#define TYPE(fnname)
+#define FUNCTION(fnname) _##fnname
+#define SIZE(fnname,endlabel)
+
+#else
+
+#define GLOBL(fnname) .globl fnname
+#define TYPE(fnname) .type fnname,@function
+#define FUNCTION(fnname) fnname
+#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
+
+#endif
+
+
+GLOBL(rust_psm_stack_direction)
+.p2align 4
+TYPE(rust_psm_stack_direction)
+FUNCTION(rust_psm_stack_direction):
+/* extern "fastcall" fn() -> u8 (%al) */
+.cfi_startproc
+ movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64
+ retl
+.rust_psm_stack_direction_end:
+SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
+.cfi_endproc
+
+
+GLOBL(rust_psm_stack_pointer)
+.p2align 4
+TYPE(rust_psm_stack_pointer)
+FUNCTION(rust_psm_stack_pointer):
+/* extern "fastcall" fn() -> *mut u8 (%rax) */
+.cfi_startproc
+ leal 4(%esp), %eax
+ retl
+.rust_psm_stack_pointer_end:
+SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
+.cfi_endproc
+
+
+GLOBL(rust_psm_replace_stack)
+.p2align 4
+TYPE(rust_psm_replace_stack)
+FUNCTION(rust_psm_replace_stack):
+/* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */
+.cfi_startproc
+/*
+ All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx
+
+ Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address
+ would be store there) off the required stack alignment on entry. To offset the stack in such a
+ way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but
+ would require to adjust the stack manually, which cannot be easily done, because the stack
+ pointer argument is already stored in memory.
+ */
+ movl 4(%esp), %esp
+ calll *%edx
+ ud2
+.rust_psm_replace_stack_end:
+SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
+.cfi_endproc
+
+
+GLOBL(rust_psm_on_stack)
+.p2align 4
+TYPE(rust_psm_on_stack)
+FUNCTION(rust_psm_on_stack):
+/* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */
+.cfi_startproc
+ pushl %ebp
+ .cfi_def_cfa %esp, 8
+ .cfi_offset %ebp, -8
+ movl %esp, %ebp
+ .cfi_def_cfa_register %ebp
+ movl 12(%ebp), %esp
+ calll *8(%ebp)
+ movl %ebp, %esp
+ popl %ebp
+ .cfi_def_cfa %esp, 4
+ retl $8
+.rust_psm_on_stack_end:
+SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
+.cfi_endproc
diff --git a/vendor/psm/src/arch/x86_64.s b/vendor/psm/src/arch/x86_64.s
new file mode 100644
index 000000000..5f5ece5b1
--- /dev/null
+++ b/vendor/psm/src/arch/x86_64.s
@@ -0,0 +1,87 @@
+#include "psm.h"
+/* NOTE: sysv64 calling convention is used on all x86_64 targets, including Windows! */
+
+.text
+
+#if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios
+
+#define GLOBL(fnname) .globl _##fnname
+#define TYPE(fnname)
+#define FUNCTION(fnname) _##fnname
+#define SIZE(fnname,endlabel)
+
+#else
+
+#define GLOBL(fnname) .globl fnname
+#define TYPE(fnname) .type fnname,@function
+#define FUNCTION(fnname) fnname
+#define SIZE(fnname,endlabel) .size fnname,endlabel-fnname
+
+#endif
+
+
+GLOBL(rust_psm_stack_direction)
+.p2align 4
+TYPE(rust_psm_stack_direction)
+FUNCTION(rust_psm_stack_direction):
+/* extern "sysv64" fn() -> u8 (%al) */
+.cfi_startproc
+ movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64
+ retq
+.rust_psm_stack_direction_end:
+SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end)
+.cfi_endproc
+
+
+GLOBL(rust_psm_stack_pointer)
+.p2align 4
+TYPE(rust_psm_stack_pointer)
+FUNCTION(rust_psm_stack_pointer):
+/* extern "sysv64" fn() -> *mut u8 (%rax) */
+.cfi_startproc
+ leaq 8(%rsp), %rax
+ retq
+.rust_psm_stack_pointer_end:
+SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end)
+.cfi_endproc
+
+
+GLOBL(rust_psm_replace_stack)
+.p2align 4
+TYPE(rust_psm_replace_stack)
+FUNCTION(rust_psm_replace_stack):
+/* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8) */
+.cfi_startproc
+/*
+ All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi.
+
+ 8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto
+ stack with a regular call
+*/
+ leaq -8(%rdx), %rsp
+ jmpq *%rsi
+.rust_psm_replace_stack_end:
+SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end)
+.cfi_endproc
+
+
+GLOBL(rust_psm_on_stack)
+.p2align 4
+TYPE(rust_psm_on_stack)
+FUNCTION(rust_psm_on_stack):
+/* extern "sysv64" fn(%rdi: usize, %rsi: usize, %rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8) */
+.cfi_startproc
+ pushq %rbp
+ .cfi_def_cfa %rsp, 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movq %rcx, %rsp
+ callq *%rdx
+ movq %rbp, %rsp
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.rust_psm_on_stack_end:
+SIZE(rust_psm_on_stack,.rust_psm_on_stack_end)
+.cfi_endproc
diff --git a/vendor/psm/src/arch/x86_64_msvc.asm b/vendor/psm/src/arch/x86_64_msvc.asm
new file mode 100644
index 000000000..67d72832f
--- /dev/null
+++ b/vendor/psm/src/arch/x86_64_msvc.asm
@@ -0,0 +1,61 @@
+PUBLIC rust_psm_stack_direction
+PUBLIC rust_psm_stack_pointer
+PUBLIC rust_psm_replace_stack
+PUBLIC rust_psm_on_stack
+
+_TEXT SEGMENT
+
+; extern "sysv64" fn() -> u8 (%al)
+rust_psm_stack_direction PROC
+ mov al, 2
+ ret
+rust_psm_stack_direction ENDP
+
+; extern "sysv64" fn() -> *mut u8 (%rax)
+rust_psm_stack_pointer PROC
+ lea rax, [rsp + 8]
+ ret
+rust_psm_stack_pointer ENDP
+
+; extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8, %rcx: *mut u8)
+rust_psm_replace_stack PROC
+ mov gs:[08h], rdx
+ mov gs:[10h], rcx
+ lea rsp, [rdx - 8]
+ jmp rsi
+rust_psm_replace_stack ENDP
+
+; extern "sysv64" fn(%rdi: usize, %rsi: usize,
+; %rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8, %r8: *mut u8)
+;
+; NB: on Windows for SEH to work at all, the pointers in TIB, thread information block, need to be
+; fixed up. Otherwise, it seems that exception mechanism on Windows will not bother looking for
+; exception handlers at *all* if they happen to fall outside the are specified in TIB.
+;
+; This necessitates an API difference from the usual 4-argument signature used elsewhere.
+;
+; FIXME: this needs a catch-all exception handler that aborts in case somebody unwinds into here.
+rust_psm_on_stack PROC FRAME
+ push rbp
+ .pushreg rbp
+ mov rbp, rsp
+ .setframe rbp, 0
+ .endprolog
+
+ push gs:[08h]
+ mov gs:[08h], rcx
+ push gs:[10h]
+ mov gs:[10h], r8
+ mov rsp, rcx
+ call rdx
+ lea rsp, [rbp - 010h]
+ pop gs:[10h]
+ pop gs:[08h]
+
+ pop rbp
+ ret
+rust_psm_on_stack ENDP
+
+_TEXT ENDS
+
+END
diff --git a/vendor/psm/src/arch/x86_64_windows_gnu.s b/vendor/psm/src/arch/x86_64_windows_gnu.s
new file mode 100644
index 000000000..8f1258356
--- /dev/null
+++ b/vendor/psm/src/arch/x86_64_windows_gnu.s
@@ -0,0 +1,95 @@
+.text
+
+.def rust_psm_stack_direction
+.scl 2
+.type 32
+.endef
+.globl rust_psm_stack_direction
+.p2align 4
+rust_psm_stack_direction:
+/* extern "sysv64" fn() -> u8 (%al) */
+.cfi_startproc
+ movb $2, %al # always descending on x86_64
+ retq
+.cfi_endproc
+
+.def rust_psm_stack_pointer
+.scl 2
+.type 32
+.endef
+.globl rust_psm_stack_pointer
+.p2align 4
+rust_psm_stack_pointer:
+/* extern "sysv64" fn() -> *mut u8 (%rax) */
+.cfi_startproc
+ leaq 8(%rsp), %rax
+ retq
+.cfi_endproc
+
+
+.def rust_psm_replace_stack
+.scl 2
+.type 32
+.endef
+.globl rust_psm_replace_stack
+.p2align 4
+rust_psm_replace_stack:
+/* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8, %rcx: *mut u8) */
+.cfi_startproc
+/*
+ All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi.
+
+ 8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto
+ stack with a regular call
+*/
+ movq %gs:0x08, %rdx
+ movq %gs:0x10, %rcx
+ leaq -8(%rdx), %rsp
+ jmpq *%rsi
+.cfi_endproc
+
+
+.def rust_psm_on_stack
+.scl 2
+.type 32
+.endef
+.globl rust_psm_on_stack
+.p2align 4
+rust_psm_on_stack:
+/*
+extern "sysv64" fn(%rdi: usize, %rsi: usize,
+ %rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8, %r8: *mut u8)
+
+NB: on Windows for SEH to work at all, the pointers in TIB, thread information block, need to be
+fixed up. Otherwise, it seems that exception mechanism on Windows will not bother looking for
+exception handlers at *all* if they happen to fall outside the are specified in TIB.
+
+This necessitates an API difference from the usual 4-argument signature used elsewhere.
+
+FIXME: this needs a catch-all exception handler that aborts in case somebody unwinds into here.
+*/
+.cfi_startproc
+ pushq %rbp
+ .cfi_def_cfa %rsp, 16
+ .cfi_offset %rbp, -16
+ pushq %gs:0x08
+ .cfi_def_cfa %rsp, 24
+ pushq %gs:0x10
+ .cfi_def_cfa %rsp, 32
+
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movq %rcx, %gs:0x08
+ movq %r8, %gs:0x10
+ movq %rcx, %rsp
+ callq *%rdx
+ movq %rbp, %rsp
+
+ popq %gs:0x10
+ .cfi_def_cfa %rsp, 24
+ popq %gs:0x08
+ .cfi_def_cfa %rsp, 16
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.cfi_endproc
diff --git a/vendor/psm/src/arch/x86_msvc.asm b/vendor/psm/src/arch/x86_msvc.asm
new file mode 100644
index 000000000..3838759f7
--- /dev/null
+++ b/vendor/psm/src/arch/x86_msvc.asm
@@ -0,0 +1,70 @@
+; FIXME: this is weird, this works locally but not on appveyor?!??!
+
+.386
+.model flat
+
+ASSUME FS:NOTHING
+
+; WTF: PUBLIC conflicts with "SYSCALL" but "SYSCALL" is the only way to stop MASM from manging the
+; symbol names?
+;
+; PUBLIC @rust_psm_stack_direction@0
+; PUBLIC @rust_psm_stack_pointer@0
+; PUBLIC @rust_psm_replace_stack@12
+; PUBLIC @rust_psm_on_stack@16
+
+_TEXT SEGMENT
+
+; extern "fastcall" fn() -> u8 (%al)
+@rust_psm_stack_direction@0 PROC SYSCALL
+ mov al, 2
+ ret
+@rust_psm_stack_direction@0 ENDP
+
+; extern "fastcall" fn() -> *mut u8 (%rax)
+@rust_psm_stack_pointer@0 PROC SYSCALL
+ lea eax, [esp + 4]
+ ret
+@rust_psm_stack_pointer@0 ENDP
+
+; extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize),
+; 4(%esp): *mut u8, 8(%esp): *mut u8)
+@rust_psm_replace_stack@16 PROC SYSCALL
+ mov eax, dword ptr [esp + 8]
+ mov fs:[08h], eax
+ mov esp, dword ptr [esp + 4]
+ mov fs:[04h], esp
+ jmp edx
+@rust_psm_replace_stack@16 ENDP
+
+; extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize),
+; 8(%esp): *mut u8, 12(%esp): *mut u8)
+;
+; NB: on Windows for SEH to work at all, the pointers in TIB, thread information block, need to be
+; fixed up. Otherwise, it seems that exception mechanism on Windows will not bother looking for
+; exception handlers at *all* if they happen to fall outside the are specified in TIB.
+;
+; This necessitates an API difference from the usual 4-argument signature used elsewhere.
+@rust_psm_on_stack@20 PROC SYSCALL
+ push ebp
+ mov ebp, esp
+
+ push fs:[0E0Ch]
+ push fs:[08h]
+ mov eax, dword ptr [ebp + 4 + 12]
+ mov dword ptr fs:[08h], eax
+ mov dword ptr fs:[0E0Ch], eax
+ push fs:[04h]
+ mov esp, dword ptr [ebp + 4 + 8]
+ mov dword ptr fs:[04h], esp
+ call dword ptr [ebp + 4 + 4]
+
+ lea esp, [ebp - 12]
+ pop fs:[04h]
+ pop fs:[08h]
+ pop fs:[0E0Ch]
+ pop ebp
+ ret 12
+@rust_psm_on_stack@20 ENDP
+
+END
diff --git a/vendor/psm/src/arch/x86_windows_gnu.s b/vendor/psm/src/arch/x86_windows_gnu.s
new file mode 100644
index 000000000..474d4168a
--- /dev/null
+++ b/vendor/psm/src/arch/x86_windows_gnu.s
@@ -0,0 +1,94 @@
+/* FIXME: this works locally but not on appveyor??!? */
+/* NOTE: fastcall calling convention used on all x86 targets */
+.text
+
+.def @rust_psm_stack_direction@0
+.scl 2
+.type 32
+.endef
+.globl @rust_psm_stack_direction@0
+.p2align 4
+@rust_psm_stack_direction@0:
+/* extern "fastcall" fn() -> u8 (%al) */
+.cfi_startproc
+ movb $2, %al # always descending on x86_64
+ retl
+.cfi_endproc
+
+
+.def @rust_psm_stack_pointer@0
+.scl 2
+.type 32
+.endef
+.globl @rust_psm_stack_pointer@0
+.p2align 4
+@rust_psm_stack_pointer@0:
+/* extern "fastcall" fn() -> *mut u8 (%rax) */
+.cfi_startproc
+ leal 4(%esp), %eax
+ retl
+.cfi_endproc
+
+
+.def @rust_psm_replace_stack@16
+.scl 2
+.type 32
+.endef
+.globl @rust_psm_replace_stack@16
+.p2align 4
+@rust_psm_replace_stack@16:
+/* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */
+.cfi_startproc
+/*
+ All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx
+
+ Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address
+ would be store there) off the required stack alignment on entry. To offset the stack in such a
+ way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but
+ would require to adjust the stack manually, which cannot be easily done, because the stack
+ pointer argument is already stored in memory.
+ */
+ movl 8(%esp), %eax
+ mov %eax, %fs:0x08
+ movl 4(%esp), %esp
+ mov %esp, %fs:0x04
+ calll *%edx
+ ud2
+.cfi_endproc
+
+
+.def @rust_psm_on_stack@16
+.scl 2
+.type 32
+.endef
+.globl @rust_psm_on_stack@16
+.p2align 4
+@rust_psm_on_stack@16:
+/* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */
+.cfi_startproc
+ pushl %ebp
+ .cfi_def_cfa %esp, 8
+ .cfi_offset %ebp, -8
+ pushl %fs:0x04
+ .cfi_def_cfa %esp, 12
+ pushl %fs:0x08
+ .cfi_def_cfa %esp, 16
+ movl %esp, %ebp
+ .cfi_def_cfa_register %ebp
+
+ movl 24(%ebp), %eax
+ movl %eax, %fs:0x08
+ movl 20(%ebp), %esp
+ movl %esp, %fs:0x04
+ calll *16(%ebp)
+
+ movl %ebp, %esp
+ popl %fs:0x08
+ .cfi_def_cfa %esp, 12
+ popl %fs:0x04
+ .cfi_def_cfa %esp, 8
+ popl %ebp
+ .cfi_def_cfa %esp, 4
+ retl $12
+.cfi_endproc
+
diff --git a/vendor/psm/src/arch/zseries_linux.s b/vendor/psm/src/arch/zseries_linux.s
new file mode 100644
index 000000000..e2536a1c9
--- /dev/null
+++ b/vendor/psm/src/arch/zseries_linux.s
@@ -0,0 +1,75 @@
+/* Implementation of stack swtiching routines for zSeries LINUX ABI.
+
+ This ABI is used by the s390x-unknown-linux-gnu target.
+
+ Documents used:
+
+ * LINUX for zSeries: ELF Application Binary Interface Supplement (1st ed., 2001) (LNUX-1107-01)
+ * z/Architecture: Principles of Operation (4th ed., 2004) (SA22-7832-03)
+*/
+
+#include "psm.h"
+
+.text
+
+
+.globl rust_psm_stack_direction
+.p2align 4
+.type rust_psm_stack_direction,@function
+rust_psm_stack_direction:
+/* extern "C" fn() -> u8 */
+.cfi_startproc
+ lghi %r2, STACK_DIRECTION_DESCENDING
+ br %r14
+.rust_psm_stack_direction_end:
+.size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction
+.cfi_endproc
+
+
+.globl rust_psm_stack_pointer
+.p2align 4
+.type rust_psm_stack_pointer,@function
+rust_psm_stack_pointer:
+/* extern "C" fn() -> *mut u8 */
+.cfi_startproc
+ la %r2, 0(%r15)
+ br %r14
+.rust_psm_stack_pointer_end:
+.size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer
+.cfi_endproc
+
+
+.globl rust_psm_replace_stack
+.p2align 4
+.type rust_psm_replace_stack,@function
+rust_psm_replace_stack:
+/* extern "C" fn(r2: usize, r3: extern "C" fn(usize), r4: *mut u8) */
+.cfi_startproc
+ /* FIXME: backtrace does not terminate cleanly for some reason */
+ lay %r15, -160(%r4)
+ /* FIXME: this is `basr` instead of `br` purely to remove the backtrace link to the caller */
+ basr %r14, %r3
+.rust_psm_replace_stack_end:
+.size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack
+.cfi_endproc
+
+
+.globl rust_psm_on_stack
+.p2align 4
+.type rust_psm_on_stack,@function
+rust_psm_on_stack:
+/* extern "C" fn(r2: usize, r3: usize, r4: extern "C" fn(usize, usize), r5: *mut u8) */
+.cfi_startproc
+ stmg %r14, %r15, -16(%r5)
+ lay %r15, -176(%r5)
+ .cfi_def_cfa %r15, 176
+ .cfi_offset %r14, -16
+ .cfi_offset %r15, -8
+ basr %r14, %r4
+ lmg %r14, %r15, 160(%r15)
+ .cfi_restore %r14
+ .cfi_restore %r15
+ br %r14
+.rust_psm_on_stack_end:
+.size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack
+.cfi_endproc
diff --git a/vendor/psm/src/lib.rs b/vendor/psm/src/lib.rs
new file mode 100644
index 000000000..61ab9a85b
--- /dev/null
+++ b/vendor/psm/src/lib.rs
@@ -0,0 +1,406 @@
+//! # **P**ortable **S**tack **M**anipulation
+//! This crate provides portable functions to control the stack pointer and inspect the properties
+//! of the stack. This crate does not attempt to provide safe abstractions to any operations, the
+//! only goals are correctness, portability and efficiency (in that exact order). As a consequence
+//! most functions you will find in this crate are unsafe.
+//!
+//! Note, that the stack allocation is left up to the user. Unless you’re writing a safe
+//! abstraction over stack manipulation, this is unlikely to be the crate you want. Instead
+//! consider one of the safe abstractions over this crate such as `stacker`. Another good place to
+//! look at is the crates.io’s reverse dependency list.
+
+#![allow(unused_macros)]
+#![no_std]
+
+macro_rules! extern_item {
+ (unsafe $($toks: tt)+) => {
+ unsafe extern "C" $($toks)+
+ };
+ ($($toks: tt)+) => {
+ extern "C" $($toks)+
+ };
+}
+
+// Surprising: turns out subsequent macro_rules! override previous definitions, instead of
+// erroring? Convenient for us in this case, though.
+#[cfg(target_arch = "x86_64")]
+macro_rules! extern_item {
+ (unsafe $($toks: tt)+) => {
+ unsafe extern "sysv64" $($toks)+
+ };
+ ($($toks: tt)+) => {
+ extern "sysv64" $($toks)+
+ };
+}
+
+#[cfg(target_arch = "x86")]
+macro_rules! extern_item {
+ (unsafe $($toks: tt)+) => {
+ unsafe extern "fastcall" $($toks)+
+ };
+ ($($toks: tt)+) => {
+ extern "fastcall" $($toks)+
+ };
+}
+
+#[cfg(target_arch = "arm")]
+macro_rules! extern_item {
+ (unsafe $($toks: tt)+) => {
+ unsafe extern "aapcs" $($toks)+
+ };
+ ($($toks: tt)+) => {
+ extern "aapcs" $($toks)+
+ };
+}
+
+// NB: this could be nicer across multiple blocks but we cannot do it because of
+// https://github.com/rust-lang/rust/issues/65847
+extern_item! { {
+ #![link(name="psm_s")]
+
+ #[cfg(asm)]
+ fn rust_psm_stack_direction() -> u8;
+ #[cfg(asm)]
+ fn rust_psm_stack_pointer() -> *mut u8;
+
+ #[cfg(all(switchable_stack, not(target_os = "windows")))]
+ #[link_name="rust_psm_replace_stack"]
+ fn _rust_psm_replace_stack(
+ data: usize,
+ callback: extern_item!(unsafe fn(usize) -> !),
+ sp: *mut u8
+ ) -> !;
+ #[cfg(all(switchable_stack, not(target_os = "windows")))]
+ #[link_name="rust_psm_on_stack"]
+ fn _rust_psm_on_stack(
+ data: usize,
+ return_ptr: usize,
+ callback: extern_item!(unsafe fn(usize, usize)),
+ sp: *mut u8,
+ );
+ #[cfg(all(switchable_stack, target_os = "windows"))]
+ fn rust_psm_replace_stack(
+ data: usize,
+ callback: extern_item!(unsafe fn(usize) -> !),
+ sp: *mut u8,
+ stack_base: *mut u8
+ ) -> !;
+ #[cfg(all(switchable_stack, target_os = "windows"))]
+ fn rust_psm_on_stack(
+ data: usize,
+ return_ptr: usize,
+ callback: extern_item!(unsafe fn(usize, usize)),
+ sp: *mut u8,
+ stack_base: *mut u8
+ );
+} }
+
+#[cfg(all(switchable_stack, not(target_os = "windows")))]
+#[inline(always)]
+unsafe fn rust_psm_replace_stack(
+ data: usize,
+ callback: extern_item!(unsafe fn(usize) -> !),
+ sp: *mut u8,
+ _: *mut u8,
+) -> ! {
+ _rust_psm_replace_stack(data, callback, sp)
+}
+
+#[cfg(all(switchable_stack, not(target_os = "windows")))]
+#[inline(always)]
+unsafe fn rust_psm_on_stack(
+ data: usize,
+ return_ptr: usize,
+ callback: extern_item!(unsafe fn(usize, usize)),
+ sp: *mut u8,
+ _: *mut u8,
+) {
+ _rust_psm_on_stack(data, return_ptr, callback, sp)
+}
+
+/// Run the closure on the provided stack.
+///
+/// Once the closure completes its execution, the original stack pointer is restored and execution
+/// returns to the caller.
+///
+/// `base` address must be the low address of the stack memory region, regardless of the stack
+/// growth direction. It is not necessary for the whole region `[base; base + size]` to be usable
+/// at the time this function called, however it is required that at least the following hold:
+///
+/// * Both `base` and `base + size` are aligned up to the target-specific requirements;
+/// * Depending on `StackDirection` value for the platform, the end of the stack memory region,
+/// which would end up containing the first frame(s), must have sufficient number of pages
+/// allocated to execute code until more pages are commited. The other end should contain a guard
+/// page (not writable, readable or executable) to ensure Rust’s soundness guarantees.
+///
+/// Note, that some or all of these considerations are irrelevant to some applications. For
+/// example, Rust’s soundness story relies on all stacks having a guard-page, however if the user
+/// is able to guarantee that the memory region used for stack cannot be exceeded, a guard page may
+/// end up being an expensive unnecessity.
+///
+/// The previous stack may not be deallocated. If an ability to deallocate the old stack is desired
+/// consider `replace_stack` instead.
+///
+/// # Guidelines
+///
+/// Memory regions that are aligned to a single page (usually 4kB) are an extremely portable choice
+/// for stacks.
+///
+/// Allocate at least 4kB of stack. Some architectures (such as SPARC) consume stack memory
+/// significantly faster compared to the more usual architectures such as x86 or ARM. Allocating
+/// less than 4kB of memory may make it impossible to commit more pages without overflowing the
+/// stack later on.
+///
+/// # Unsafety
+///
+/// The stack `base` address must be aligned as appropriate for the target.
+///
+/// The stack `size` must be a multiple of stack alignment required by target.
+///
+/// The `size` must not overflow `isize`.
+///
+/// `callback` must not unwind or return control flow by any other means than directly returning.
+///
+/// # Examples
+///
+/// ```
+/// use std::alloc;
+/// const STACK_ALIGN: usize = 4096;
+/// const STACK_SIZE: usize = 4096;
+/// unsafe {
+/// let layout = alloc::Layout::from_size_align(STACK_SIZE, STACK_ALIGN).unwrap();
+/// let new_stack = alloc::alloc(layout);
+/// assert!(!new_stack.is_null(), "allocations must succeed!");
+/// let (stack, result) = psm::on_stack(new_stack, STACK_SIZE, || {
+/// (psm::stack_pointer(), 4 + 4)
+/// });
+/// println!("4 + 4 = {} has been calculated on stack {:p}", result, stack);
+/// }
+/// ```
+#[cfg(switchable_stack)]
+pub unsafe fn on_stack<R, F: FnOnce() -> R>(base: *mut u8, size: usize, callback: F) -> R {
+ use core::mem::MaybeUninit;
+
+ extern_item! {
+ unsafe fn with_on_stack<R, F: FnOnce() -> R>(callback_ptr: usize, return_ptr: usize) {
+ let return_ptr = (*(return_ptr as *mut MaybeUninit<R>)).as_mut_ptr();
+ let callback = (*(callback_ptr as *mut MaybeUninit<F>)).as_ptr();
+ // Safe to move out from `F`, because closure in is forgotten in `on_stack` and dropping
+ // only occurs in this callback.
+ return_ptr.write((callback.read())());
+ }
+ }
+ let sp = match StackDirection::new() {
+ StackDirection::Ascending => base,
+ StackDirection::Descending => base.offset(size as isize),
+ };
+ let mut callback: MaybeUninit<F> = MaybeUninit::new(callback);
+ let mut return_value: MaybeUninit<R> = MaybeUninit::uninit();
+ rust_psm_on_stack(
+ &mut callback as *mut MaybeUninit<F> as usize,
+ &mut return_value as *mut MaybeUninit<R> as usize,
+ with_on_stack::<R, F>,
+ sp,
+ base,
+ );
+ return return_value.assume_init();
+}
+
+/// Run the provided non-terminating computation on an entirely new stack.
+///
+/// `base` address must be the low address of the stack memory region, regardless of the stack
+/// growth direction. It is not necessary for the whole region `[base; base + size]` to be usable
+/// at the time this function called, however it is required that at least the following hold:
+///
+/// * Both `base` and `base + size` are aligned up to the target-specific requirements;
+/// * Depending on `StackDirection` value for the platform, the end of the stack memory region,
+/// which would end up containing the first frame(s), must have sufficient number of pages
+/// allocated to execute code until more pages are commited. The other end should contain a guard
+/// page (not writable, readable or executable) to ensure Rust’s soundness guarantees.
+///
+/// Note, that some or all of these considerations are irrelevant to some applications. For
+/// example, Rust’s soundness story relies on all stacks having a guard-page, however if the user
+/// is able to guarantee that the memory region used for stack cannot be exceeded, a guard page may
+/// end up being an expensive unnecessity.
+///
+/// The previous stack is not deallocated and may not be deallocated unless the data on the old
+/// stack is not referenced in any way (by e.g. the `callback` closure).
+///
+/// On platforms where multiple stack pointers are available, the “current” stack pointer is
+/// replaced.
+///
+/// # Guidelines
+///
+/// Memory regions that are aligned to a single page (usually 4kB) are an extremely portable choice
+/// for stacks.
+///
+/// Allocate at least 4kB of stack. Some architectures (such as SPARC) consume stack memory
+/// significantly faster compared to the more usual architectures such as x86 or ARM. Allocating
+/// less than 4kB of memory may make it impossible to commit more pages without overflowing the
+/// stack later on.
+///
+/// # Unsafety
+///
+/// The stack `base` address must be aligned as appropriate for the target.
+///
+/// The stack `size` must be a multiple of stack alignment required by target.
+///
+/// The `size` must not overflow `isize`.
+///
+/// `callback` must not return (not enforced by typesystem currently because `!` is unstable),
+/// unwind or otherwise return control flow to any of the previous frames.
+#[cfg(switchable_stack)]
+pub unsafe fn replace_stack<F: FnOnce()>(base: *mut u8, size: usize, callback: F) -> ! {
+ extern_item! { unsafe fn with_replaced_stack<F: FnOnce()>(d: usize) -> ! {
+ // Safe to move out, because the closure is essentially forgotten by
+ // this being required to never return...
+ ::core::ptr::read(d as *const F)();
+ ::core::hint::unreachable_unchecked();
+ } }
+ let sp = match StackDirection::new() {
+ StackDirection::Ascending => base,
+ StackDirection::Descending => base.offset(size as isize),
+ };
+ rust_psm_replace_stack(
+ &callback as *const F as usize,
+ with_replaced_stack::<F>,
+ sp,
+ base,
+ );
+}
+
+/// The direction into which stack grows as stack frames are made.
+///
+/// This is a target-specific property that can be obtained at runtime by calling
+/// `StackDirection::new()`.
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum StackDirection {
+ Ascending = 1,
+ Descending = 2,
+}
+
+impl StackDirection {
+ /// Obtain the stack growth direction.
+ #[cfg(asm)]
+ pub fn new() -> StackDirection {
+ const ASC: u8 = StackDirection::Ascending as u8;
+ const DSC: u8 = StackDirection::Descending as u8;
+ unsafe {
+ match rust_psm_stack_direction() {
+ ASC => StackDirection::Ascending,
+ DSC => StackDirection::Descending,
+ _ => ::core::hint::unreachable_unchecked(),
+ }
+ }
+ }
+}
+
+/// Returns current stack pointer.
+///
+/// Note, that the stack pointer returned is from the perspective of the caller. From the
+/// perspective of `stack_pointer` function the pointer returned is the frame pointer.
+///
+/// While it is a goal to minimize the amount of stack used by this function, implementations for
+/// some targets may be unable to avoid allocating a stack frame. This makes this function
+/// suitable for stack exhaustion detection only in conjunction with sufficient padding.
+///
+/// Using `stack_pointer` to check for stack exhaustion is tricky to get right. It is impossible to
+/// know the callee’s frame size, therefore such value must be derived some other way. A common
+/// approach is to use stack padding (reserve enough stack space for any function to be called) and
+/// check against the padded threshold. If padding is chosen incorrectly, a situation similar to
+/// one described below may occur:
+///
+/// 1. For stack exhaustion check, remaining stack is checked against `stack_pointer` with the
+/// padding applied;
+/// 2. Callee allocates more stack than was accounted for with padding, and accesses pages outside
+/// the stack, invalidating the execution (by e.g. crashing).
+#[cfg(asm)]
+pub fn stack_pointer() -> *mut u8 {
+ unsafe { rust_psm_stack_pointer() }
+}
+
+/// Macro that outputs its tokens only if `psm::on_stack` and `psm::replace_stack` are available.
+///
+/// # Examples
+///
+/// ```
+/// # use psm::psm_stack_manipulation;
+/// psm_stack_manipulation! {
+/// yes {
+/// /* Functions `on_stack` and `replace_stack` are available here */
+/// }
+/// no {
+/// /* Functions `on_stack` and `replace_stack` are not available here */
+/// }
+/// }
+/// ```
+#[cfg(switchable_stack)]
+#[macro_export]
+macro_rules! psm_stack_manipulation {
+ (yes { $($yes: tt)* } no { $($no: tt)* }) => { $($yes)* };
+}
+
+/// Macro that outputs its tokens only if `psm::on_stack` and `psm::replace_stack` are available.
+///
+/// # Examples
+///
+/// ```
+/// # use psm::psm_stack_manipulation;
+/// psm_stack_manipulation! {
+/// yes {
+/// /* Functions `on_stack` and `replace_stack` are available here */
+/// }
+/// no {
+/// /* Functions `on_stack` and `replace_stack` are not available here */
+/// }
+/// }
+/// ```
+#[cfg(not(switchable_stack))]
+#[macro_export]
+macro_rules! psm_stack_manipulation {
+ (yes { $($yes: tt)* } no { $($no: tt)* }) => { $($no)* };
+}
+
+/// Macro that outputs its tokens only if `psm::stack_pointer` and `psm::StackDirection::new` are
+/// available.
+///
+/// # Examples
+///
+/// ```
+/// # use psm::psm_stack_information;
+/// psm_stack_information! {
+/// yes {
+/// /* `psm::stack_pointer` and `psm::StackDirection::new` are available here */
+/// }
+/// no {
+/// /* `psm::stack_pointer` and `psm::StackDirection::new` are not available here */
+/// }
+/// }
+/// ```
+#[cfg(asm)]
+#[macro_export]
+macro_rules! psm_stack_information {
+ (yes { $($yes: tt)* } no { $($no: tt)* }) => { $($yes)* };
+}
+
+/// Macro that outputs its tokens only if `psm::stack_pointer` and `psm::StackDirection::new` are
+/// available.
+///
+/// # Examples
+///
+/// ```
+/// # use psm::psm_stack_information;
+/// psm_stack_information! {
+/// yes {
+/// /* `psm::stack_pointer` and `psm::StackDirection::new` are available here */
+/// }
+/// no {
+/// /* `psm::stack_pointer` and `psm::StackDirection::new` are not available here */
+/// }
+/// }
+/// ```
+#[cfg(not(asm))]
+#[macro_export]
+macro_rules! psm_stack_information {
+ (yes { $($yes: tt)* } no { $($no: tt)* }) => { $($no)* };
+}
diff --git a/vendor/psm/tests/stack_direction.rs b/vendor/psm/tests/stack_direction.rs
new file mode 100644
index 000000000..609decbfe
--- /dev/null
+++ b/vendor/psm/tests/stack_direction.rs
@@ -0,0 +1,6 @@
+extern crate psm;
+
+#[test]
+fn always_equal() {
+ assert_eq!(psm::StackDirection::new(), psm::StackDirection::new());
+}
diff --git a/vendor/psm/tests/stack_direction_2.rs b/vendor/psm/tests/stack_direction_2.rs
new file mode 100644
index 000000000..dd0679074
--- /dev/null
+++ b/vendor/psm/tests/stack_direction_2.rs
@@ -0,0 +1,29 @@
+extern crate psm;
+
+#[inline(never)]
+fn test_direction(previous_sp: *mut u8) {
+ let current_sp = psm::stack_pointer();
+ match psm::StackDirection::new() {
+ psm::StackDirection::Ascending => {
+ assert!(
+ current_sp > previous_sp,
+ "the stack pointer is not ascending! current = {:p}, previous = {:p}",
+ current_sp,
+ previous_sp
+ );
+ }
+ psm::StackDirection::Descending => {
+ assert!(
+ current_sp < previous_sp,
+ "the stack pointer is not descending! current = {:p}, previous = {:p}",
+ current_sp,
+ previous_sp
+ );
+ }
+ }
+}
+
+#[test]
+fn direction_right() {
+ test_direction(psm::stack_pointer());
+}