summaryrefslogtreecommitdiffstats
path: root/third_party/rust/goblin/src/elf
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/goblin/src/elf
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/goblin/src/elf')
-rw-r--r--third_party/rust/goblin/src/elf/compression_header.rs275
-rw-r--r--third_party/rust/goblin/src/elf/constants_header.rs563
-rw-r--r--third_party/rust/goblin/src/elf/constants_relocation.rs1417
-rw-r--r--third_party/rust/goblin/src/elf/dynamic.rs807
-rw-r--r--third_party/rust/goblin/src/elf/gnu_hash.rs220
-rw-r--r--third_party/rust/goblin/src/elf/header.rs630
-rw-r--r--third_party/rust/goblin/src/elf/mod.rs571
-rw-r--r--third_party/rust/goblin/src/elf/note.rs319
-rw-r--r--third_party/rust/goblin/src/elf/program_header.rs430
-rw-r--r--third_party/rust/goblin/src/elf/reloc.rs522
-rw-r--r--third_party/rust/goblin/src/elf/section_header.rs581
-rw-r--r--third_party/rust/goblin/src/elf/sym.rs607
-rw-r--r--third_party/rust/goblin/src/elf/symver.rs880
13 files changed, 7822 insertions, 0 deletions
diff --git a/third_party/rust/goblin/src/elf/compression_header.rs b/third_party/rust/goblin/src/elf/compression_header.rs
new file mode 100644
index 0000000000..1dd7aa1362
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/compression_header.rs
@@ -0,0 +1,275 @@
+macro_rules! elf_compression_header {
+ () => {
+ use plain;
+ // Declare that this is a plain type.
+ unsafe impl plain::Plain for CompressionHeader {}
+
+ impl ::core::fmt::Debug for CompressionHeader {
+ fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
+ f.debug_struct("CompressionHeader")
+ .field("ch_type", &self.ch_type)
+ .field("ch_size", &format_args!("0x{:x}", self.ch_size))
+ .field("ch_addralign", &format_args!("0x{:x}", self.ch_addralign))
+ .finish()
+ }
+ }
+ };
+}
+
+/// ZLIB/DEFLATE algorithm.
+pub const ELFCOMPRESS_ZLIB: u32 = 1;
+/// Start of OS-specific.
+pub const ELFCOMPRESS_LOOS: u32 = 0x6000_0000;
+/// End of OS-specific.
+pub const ELFCOMPRESS_HIOS: u32 = 0x6fff_ffff;
+/// Start of processor-specific.
+pub const ELFCOMPRESS_LOPROC: u32 = 0x7000_0000;
+/// End of processor-specific.
+pub const ELFCOMPRESS_HIPROC: u32 = 0x7fff_ffff;
+
+macro_rules! elf_compression_header_std_impl {
+ ($size:ty) => {
+ #[cfg(test)]
+ mod tests {
+ use super::*;
+ #[test]
+ fn size_of() {
+ assert_eq!(::std::mem::size_of::<CompressionHeader>(), SIZEOF_CHDR);
+ }
+ }
+
+ if_alloc! {
+ use crate::elf::compression_header::CompressionHeader as ElfCompressionHeader;
+
+ use plain::Plain;
+
+ if_std! {
+ use crate::error::Result;
+
+ use std::fs::File;
+ use std::io::{Read, Seek};
+ use std::io::SeekFrom::Start;
+ }
+
+ impl From<CompressionHeader> for ElfCompressionHeader {
+ fn from(ch: CompressionHeader) -> Self {
+ ElfCompressionHeader {
+ ch_type: ch.ch_type,
+ ch_size: u64::from(ch.ch_size),
+ ch_addralign: u64::from(ch.ch_addralign),
+ }
+ }
+ }
+
+ impl CompressionHeader {
+ pub fn from_bytes(bytes: &[u8]) -> CompressionHeader {
+ let mut chdr = CompressionHeader::default();
+ chdr.copy_from_bytes(bytes).expect("buffer is too short for header");
+ chdr
+ }
+
+ #[cfg(feature = "std")]
+ pub fn from_fd(fd: &mut File, offset: u64) -> Result<CompressionHeader> {
+ let mut chdr = CompressionHeader::default();
+ fd.seek(Start(offset))?;
+ unsafe {
+ fd.read_exact(plain::as_mut_bytes(&mut chdr))?;
+ }
+ Ok(chdr)
+ }
+ }
+ } // end if_alloc
+ };
+}
+
+#[cfg(feature = "alloc")]
+use scroll::{Pread, Pwrite, SizeWith};
+
+pub mod compression_header32 {
+ pub use crate::elf::compression_header::*;
+
+ #[repr(C)]
+ #[derive(Copy, Clone, Eq, PartialEq, Default)]
+ #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))]
+ /// The compression header is used at the start of SHF_COMPRESSED sections
+ pub struct CompressionHeader {
+ /// Compression format
+ pub ch_type: u32,
+ /// Uncompressed data size
+ pub ch_size: u32,
+ /// Uncompressed data alignment
+ pub ch_addralign: u32,
+ }
+
+ elf_compression_header!();
+
+ pub const SIZEOF_CHDR: usize = 12;
+
+ elf_compression_header_std_impl!(u32);
+
+ if_alloc! {
+ impl From<ElfCompressionHeader> for CompressionHeader {
+ fn from(ch: ElfCompressionHeader) -> Self {
+ CompressionHeader {
+ ch_type: ch.ch_type,
+ ch_size: ch.ch_size as u32,
+ ch_addralign: ch.ch_addralign as u32,
+ }
+ }
+ }
+ }
+}
+
+pub mod compression_header64 {
+ pub use crate::elf::compression_header::*;
+
+ #[repr(C)]
+ #[derive(Copy, Clone, Eq, PartialEq, Default)]
+ #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))]
+ /// The compression header is used at the start of SHF_COMPRESSED sections
+ pub struct CompressionHeader {
+ /// Compression format
+ pub ch_type: u32,
+ pub ch_reserved: u32,
+ /// Uncompressed data size
+ pub ch_size: u64,
+ /// Uncompressed data alignment
+ pub ch_addralign: u64,
+ }
+
+ elf_compression_header!();
+
+ pub const SIZEOF_CHDR: usize = 24;
+
+ elf_compression_header_std_impl!(u64);
+
+ if_alloc! {
+ impl From<ElfCompressionHeader> for CompressionHeader {
+ fn from(ch: ElfCompressionHeader) -> Self {
+ CompressionHeader {
+ ch_type: ch.ch_type,
+ ch_reserved: 0,
+ ch_size: ch.ch_size as u64,
+ ch_addralign: ch.ch_addralign as u64,
+ }
+ }
+ }
+ }
+}
+
+///////////////////////////////
+// Std/analysis/Unified Structs
+///////////////////////////////
+
+if_alloc! {
+ #[cfg(feature = "endian_fd")]
+ use crate::error;
+ use core::fmt;
+ use core::result;
+ use scroll::ctx;
+ use crate::container::{Container, Ctx};
+
+ #[derive(Default, PartialEq, Clone)]
+ /// A unified CompressionHeader - convertable to and from 32-bit and 64-bit variants
+ pub struct CompressionHeader {
+ /// Compression format
+ pub ch_type: u32,
+ /// Uncompressed data size
+ pub ch_size: u64,
+ /// Uncompressed data alignment
+ pub ch_addralign: u64,
+ }
+
+ impl CompressionHeader {
+ /// Return the size of the underlying compression header, given a `container`
+ #[inline]
+ pub fn size(ctx: Ctx) -> usize {
+ use scroll::ctx::SizeWith;
+ Self::size_with(&ctx)
+ }
+ pub fn new() -> Self {
+ CompressionHeader {
+ ch_type: 0,
+ ch_size: 0,
+ ch_addralign: 2 << 8,
+ }
+ }
+ /// Parse a compression header from `bytes` at `offset`, using the given `ctx`
+ #[cfg(feature = "endian_fd")]
+ pub fn parse(bytes: &[u8], mut offset: usize, ctx: Ctx) -> error::Result<CompressionHeader> {
+ use scroll::Pread;
+ bytes.gread_with(&mut offset, ctx)
+ }
+ }
+
+ impl fmt::Debug for CompressionHeader {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("CompressionHeader")
+ .field("ch_type", &self.ch_type)
+ .field("ch_size", &format_args!("0x{:x}", self.ch_size))
+ .field("ch_addralign", &format_args!("0x{:x}", self.ch_addralign))
+ .finish()
+ }
+ }
+
+ impl ctx::SizeWith<Ctx> for CompressionHeader {
+ fn size_with( &Ctx { container, .. }: &Ctx) -> usize {
+ match container {
+ Container::Little => {
+ compression_header32::SIZEOF_CHDR
+ },
+ Container::Big => {
+ compression_header64::SIZEOF_CHDR
+ },
+ }
+ }
+ }
+
+ impl<'a> ctx::TryFromCtx<'a, Ctx> for CompressionHeader {
+ type Error = crate::error::Error;
+ fn try_from_ctx(bytes: &'a [u8], Ctx {container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> {
+ use scroll::Pread;
+ let res = match container {
+ Container::Little => {
+ (bytes.pread_with::<compression_header32::CompressionHeader>(0, le)?.into(), compression_header32::SIZEOF_CHDR)
+ },
+ Container::Big => {
+ (bytes.pread_with::<compression_header64::CompressionHeader>(0, le)?.into(), compression_header64::SIZEOF_CHDR)
+ }
+ };
+ Ok(res)
+ }
+ }
+
+ impl ctx::TryIntoCtx<Ctx> for CompressionHeader {
+ type Error = crate::error::Error;
+ fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result<usize, Self::Error> {
+ use scroll::Pwrite;
+ match container {
+ Container::Little => {
+ let chdr: compression_header32::CompressionHeader = self.into();
+ Ok(bytes.pwrite_with(chdr, 0, le)?)
+ },
+ Container::Big => {
+ let chdr: compression_header64::CompressionHeader = self.into();
+ Ok(bytes.pwrite_with(chdr, 0, le)?)
+ }
+ }
+ }
+ }
+ impl ctx::IntoCtx<Ctx> for CompressionHeader {
+ fn into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) {
+ use scroll::Pwrite;
+ match container {
+ Container::Little => {
+ let chdr: compression_header32::CompressionHeader = self.into();
+ bytes.pwrite_with(chdr, 0, le).unwrap();
+ },
+ Container::Big => {
+ let chdr: compression_header64::CompressionHeader = self.into();
+ bytes.pwrite_with(chdr, 0, le).unwrap();
+ }
+ }
+ }
+ }
+} // end if_alloc
diff --git a/third_party/rust/goblin/src/elf/constants_header.rs b/third_party/rust/goblin/src/elf/constants_header.rs
new file mode 100644
index 0000000000..9b19c84c2c
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/constants_header.rs
@@ -0,0 +1,563 @@
+// sweet emacs regexp
+// pub const \([[:word:]|_]*\)[[:space:]]*\([[:digit:]]+\)[[:space:]]*/\*\(.*\) \*/
+// \\\\3 C-q C-j pub const \1: u32 = \2;
+
+/// TODO: use Enum with explicit discriminant and get debug printer for free?
+
+/// No machine
+pub const EM_NONE: u16 = 0;
+/// AT&T WE 32100
+pub const EM_M32: u16 = 1;
+/// SUN SPARC
+pub const EM_SPARC: u16 = 2;
+/// Intel 80386
+pub const EM_386: u16 = 3;
+/// Motorola m68k family
+pub const EM_68K: u16 = 4;
+/// Motorola m88k family
+pub const EM_88K: u16 = 5;
+/// Intel MCU
+pub const EM_IAMCU: u16 = 6;
+/// Intel 80860
+pub const EM_860: u16 = 7;
+/// MIPS R3000 big-endian
+pub const EM_MIPS: u16 = 8;
+/// IBM System/370
+pub const EM_S370: u16 = 9;
+/// MIPS R3000 little-endian
+pub const EM_MIPS_RS3_LE: u16 = 10;
+// reserved 11-14
+/// HPPA
+pub const EM_PARISC: u16 = 15;
+// reserved 16
+/// Fujitsu VPP500
+pub const EM_VPP500: u16 = 17;
+/// Sun's "v8plus"
+pub const EM_SPARC32PLUS: u16 = 18;
+/// Intel 80960
+pub const EM_960: u16 = 19;
+/// PowerPC
+pub const EM_PPC: u16 = 20;
+/// PowerPC 64-bit
+pub const EM_PPC64: u16 = 21;
+/// IBM S390
+pub const EM_S390: u16 = 22;
+/// IBM SPU/SPC
+pub const EM_SPU: u16 = 23;
+// reserved 24-35
+/// NEC V800 series
+pub const EM_V800: u16 = 36;
+/// Fujitsu FR20
+pub const EM_FR20: u16 = 37;
+/// TRW RH-32
+pub const EM_RH32: u16 = 38;
+/// Motorola RCE
+pub const EM_RCE: u16 = 39;
+/// ARM
+pub const EM_ARM: u16 = 40;
+/// Digital Alpha
+pub const EM_FAKE_ALPHA: u16 = 41;
+/// Hitachi SH
+pub const EM_SH: u16 = 42;
+/// SPARC v9 64-bit
+pub const EM_SPARCV9: u16 = 43;
+/// Siemens Tricore
+pub const EM_TRICORE: u16 = 44;
+/// Argonaut RISC Core
+pub const EM_ARC: u16 = 45;
+/// Hitachi H8/300
+pub const EM_H8_300: u16 = 46;
+/// Hitachi H8/300H
+pub const EM_H8_300H: u16 = 47;
+/// Hitachi H8S
+pub const EM_H8S: u16 = 48;
+/// Hitachi H8/500
+pub const EM_H8_500: u16 = 49;
+/// Intel Merced
+pub const EM_IA_64: u16 = 50;
+/// Stanford MIPS-X
+pub const EM_MIPS_X: u16 = 51;
+/// Motorola Coldfire
+pub const EM_COLDFIRE: u16 = 52;
+/// Motorola M68HC12
+pub const EM_68HC12: u16 = 53;
+/// Fujitsu MMA Multimedia Accelerator
+pub const EM_MMA: u16 = 54;
+/// Siemens PCP
+pub const EM_PCP: u16 = 55;
+/// Sony nCPU embeeded RISC
+pub const EM_NCPU: u16 = 56;
+/// Denso NDR1 microprocessor
+pub const EM_NDR1: u16 = 57;
+/// Motorola Start*Core processor
+pub const EM_STARCORE: u16 = 58;
+/// Toyota ME16 processor
+pub const EM_ME16: u16 = 59;
+/// STMicroelectronic ST100 processor
+pub const EM_ST100: u16 = 60;
+/// Advanced Logic Corp. Tinyj emb.fam
+pub const EM_TINYJ: u16 = 61;
+/// AMD x86-64 architecture
+pub const EM_X86_64: u16 = 62;
+/// Sony DSP Processor
+pub const EM_PDSP: u16 = 63;
+/// Digital PDP-10
+pub const EM_PDP10: u16 = 64;
+/// Digital PDP-11
+pub const EM_PDP11: u16 = 65;
+/// Siemens FX66 microcontroller
+pub const EM_FX66: u16 = 66;
+/// STMicroelectronics ST9+ 8/16 mc
+pub const EM_ST9PLUS: u16 = 67;
+/// STmicroelectronics ST7 8 bit mc
+pub const EM_ST7: u16 = 68;
+/// Motorola MC68HC16 microcontroller
+pub const EM_68HC16: u16 = 69;
+/// Motorola MC68HC11 microcontroller
+pub const EM_68HC11: u16 = 70;
+/// Motorola MC68HC08 microcontroller
+pub const EM_68HC08: u16 = 71;
+/// Motorola MC68HC05 microcontroller
+pub const EM_68HC05: u16 = 72;
+/// Silicon Graphics SVx
+pub const EM_SVX: u16 = 73;
+/// STMicroelectronics ST19 8 bit mc
+pub const EM_ST19: u16 = 74;
+/// Digital VAX
+pub const EM_VAX: u16 = 75;
+/// Axis Communications 32-bit emb.proc
+pub const EM_CRIS: u16 = 76;
+/// Infineon Technologies 32-bit emb.proc
+pub const EM_JAVELIN: u16 = 77;
+/// Element 14 64-bit DSP Processor
+pub const EM_FIREPATH: u16 = 78;
+/// LSI Logic 16-bit DSP Processor
+pub const EM_ZSP: u16 = 79;
+/// Donald Knuth's educational 64-bit proc
+pub const EM_MMIX: u16 = 80;
+/// Harvard University machine-independent object files
+pub const EM_HUANY: u16 = 81;
+/// SiTera Prism
+pub const EM_PRISM: u16 = 82;
+/// Atmel AVR 8-bit microcontroller
+pub const EM_AVR: u16 = 83;
+/// Fujitsu FR30
+pub const EM_FR30: u16 = 84;
+/// Mitsubishi D10V
+pub const EM_D10V: u16 = 85;
+/// Mitsubishi D30V
+pub const EM_D30V: u16 = 86;
+/// NEC v850
+pub const EM_V850: u16 = 87;
+/// Mitsubishi M32R
+pub const EM_M32R: u16 = 88;
+/// Matsushita MN10300
+pub const EM_MN10300: u16 = 89;
+/// Matsushita MN10200
+pub const EM_MN10200: u16 = 90;
+/// picoJava
+pub const EM_PJ: u16 = 91;
+/// OpenRISC 32-bit embedded processor
+pub const EM_OPENRISC: u16 = 92;
+/// ARC International ARCompact
+pub const EM_ARC_COMPACT: u16 = 93;
+/// Tensilica Xtensa Architecture
+pub const EM_XTENSA: u16 = 94;
+/// Alphamosaic VideoCore
+pub const EM_VIDEOCORE: u16 = 95;
+/// Thompson Multimedia General Purpose Proc
+pub const EM_TMM_GPP: u16 = 96;
+/// National Semi. 32000
+pub const EM_NS32K: u16 = 97;
+/// Tenor Network TPC
+pub const EM_TPC: u16 = 98;
+/// Trebia SNP 1000
+pub const EM_SNP1K: u16 = 99;
+/// STMicroelectronics ST200
+pub const EM_ST200: u16 = 100;
+/// Ubicom IP2xxx
+pub const EM_IP2K: u16 = 101;
+/// MAX processor
+pub const EM_MAX: u16 = 102;
+/// National Semi. CompactRISC
+pub const EM_CR: u16 = 103;
+/// Fujitsu F2MC16
+pub const EM_F2MC16: u16 = 104;
+/// Texas Instruments msp430
+pub const EM_MSP430: u16 = 105;
+/// Analog Devices Blackfin DSP
+pub const EM_BLACKFIN: u16 = 106;
+/// Seiko Epson S1C33 family
+pub const EM_SE_C33: u16 = 107;
+/// Sharp embedded microprocessor
+pub const EM_SEP: u16 = 108;
+/// Arca RISC
+pub const EM_ARCA: u16 = 109;
+/// PKU-Unity & MPRC Peking Uni. mc series
+pub const EM_UNICORE: u16 = 110;
+/// eXcess configurable cpu
+pub const EM_EXCESS: u16 = 111;
+/// Icera Semi. Deep Execution Processor
+pub const EM_DXP: u16 = 112;
+/// Altera Nios II
+pub const EM_ALTERA_NIOS2: u16 = 113;
+/// National Semi. CompactRISC CRX
+pub const EM_CRX: u16 = 114;
+/// Motorola XGATE
+pub const EM_XGATE: u16 = 115;
+/// Infineon C16x/XC16x
+pub const EM_C166: u16 = 116;
+/// Renesas M16C
+pub const EM_M16C: u16 = 117;
+/// Microchip Technology dsPIC30F
+pub const EM_DSPIC30F: u16 = 118;
+/// Freescale Communication Engine RISC
+pub const EM_CE: u16 = 119;
+/// Renesas M32C
+pub const EM_M32C: u16 = 120;
+// reserved 121-130
+/// Altium TSK3000
+pub const EM_TSK3000: u16 = 131;
+/// Freescale RS08
+pub const EM_RS08: u16 = 132;
+/// Analog Devices SHARC family
+pub const EM_SHARC: u16 = 133;
+/// Cyan Technology eCOG2
+pub const EM_ECOG2: u16 = 134;
+/// Sunplus S+core7 RISC
+pub const EM_SCORE7: u16 = 135;
+/// New Japan Radio (NJR) 24-bit DSP
+pub const EM_DSP24: u16 = 136;
+/// Broadcom VideoCore III
+pub const EM_VIDEOCORE3: u16 = 137;
+/// RISC for Lattice FPGA
+pub const EM_LATTICEMICO32: u16 = 138;
+/// Seiko Epson C17
+pub const EM_SE_C17: u16 = 139;
+/// Texas Instruments TMS320C6000 DSP
+pub const EM_TI_C6000: u16 = 140;
+/// Texas Instruments TMS320C2000 DSP
+pub const EM_TI_C2000: u16 = 141;
+/// Texas Instruments TMS320C55x DSP
+pub const EM_TI_C5500: u16 = 142;
+/// Texas Instruments App. Specific RISC
+pub const EM_TI_ARP32: u16 = 143;
+/// Texas Instruments Prog. Realtime Unit
+pub const EM_TI_PRU: u16 = 144;
+// reserved 145-159
+/// STMicroelectronics 64bit VLIW DSP
+pub const EM_MMDSP_PLUS: u16 = 160;
+/// Cypress M8C
+pub const EM_CYPRESS_M8C: u16 = 161;
+/// Renesas R32C
+pub const EM_R32C: u16 = 162;
+/// NXP Semi. TriMedia
+pub const EM_TRIMEDIA: u16 = 163;
+/// QUALCOMM DSP6
+pub const EM_QDSP6: u16 = 164;
+/// Intel 8051 and variants
+pub const EM_8051: u16 = 165;
+/// STMicroelectronics STxP7x
+pub const EM_STXP7X: u16 = 166;
+/// Andes Tech. compact code emb. RISC
+pub const EM_NDS32: u16 = 167;
+/// Cyan Technology eCOG1X
+pub const EM_ECOG1X: u16 = 168;
+/// Dallas Semi. MAXQ30 mc
+pub const EM_MAXQ30: u16 = 169;
+/// New Japan Radio (NJR) 16-bit DSP
+pub const EM_XIMO16: u16 = 170;
+/// M2000 Reconfigurable RISC
+pub const EM_MANIK: u16 = 171;
+/// Cray NV2 vector architecture
+pub const EM_CRAYNV2: u16 = 172;
+/// Renesas RX
+pub const EM_RX: u16 = 173;
+/// Imagination Tech. META
+pub const EM_METAG: u16 = 174;
+/// MCST Elbrus
+pub const EM_MCST_ELBRUS: u16 = 175;
+/// Cyan Technology eCOG16
+pub const EM_ECOG16: u16 = 176;
+/// National Semi. CompactRISC CR16
+pub const EM_CR16: u16 = 177;
+/// Freescale Extended Time Processing Unit
+pub const EM_ETPU: u16 = 178;
+/// Infineon Tech. SLE9X
+pub const EM_SLE9X: u16 = 179;
+/// Intel L10M
+pub const EM_L10M: u16 = 180;
+/// Intel K10M
+pub const EM_K10M: u16 = 181;
+// reserved 182
+/// ARM AARCH64
+pub const EM_AARCH64: u16 = 183;
+// reserved 184
+/// Amtel 32-bit microprocessor
+pub const EM_AVR32: u16 = 185;
+/// STMicroelectronics STM8
+pub const EM_STM8: u16 = 186;
+/// Tileta TILE64
+pub const EM_TILE64: u16 = 187;
+/// Tilera TILEPro
+pub const EM_TILEPRO: u16 = 188;
+/// Xilinx MicroBlaze
+pub const EM_MICROBLAZE: u16 = 189;
+/// NVIDIA CUDA
+pub const EM_CUDA: u16 = 190;
+/// Tilera TILE-Gx
+pub const EM_TILEGX: u16 = 191;
+/// CloudShield
+pub const EM_CLOUDSHIELD: u16 = 192;
+/// KIPO-KAIST Core-A 1st gen.
+pub const EM_COREA_1ST: u16 = 193;
+/// KIPO-KAIST Core-A 2nd gen.
+pub const EM_COREA_2ND: u16 = 194;
+/// Synopsys ARCompact V2
+pub const EM_ARC_COMPACT2: u16 = 195;
+/// Open8 RISC
+pub const EM_OPEN8: u16 = 196;
+/// Renesas RL78
+pub const EM_RL78: u16 = 197;
+/// Broadcom VideoCore V
+pub const EM_VIDEOCORE5: u16 = 198;
+/// Renesas 78KOR
+pub const EM_78KOR: u16 = 199;
+/// Freescale 56800EX DSC
+pub const EM_56800EX: u16 = 200;
+/// Beyond BA1
+pub const EM_BA1: u16 = 201;
+/// Beyond BA2
+pub const EM_BA2: u16 = 202;
+/// XMOS xCORE
+pub const EM_XCORE: u16 = 203;
+/// Microchip 8-bit PIC(r)
+pub const EM_MCHP_PIC: u16 = 204;
+/// Intel Graphics Technology
+pub const EM_INTELGT: u16 = 205;
+// reserved 206-209
+/// KM211 KM32
+pub const EM_KM32: u16 = 210;
+/// KM211 KMX32
+pub const EM_KMX32: u16 = 211;
+/// KM211 KMX16
+pub const EM_EMX16: u16 = 212;
+/// KM211 KMX8
+pub const EM_EMX8: u16 = 213;
+/// KM211 KVARC
+pub const EM_KVARC: u16 = 214;
+/// Paneve CDP
+pub const EM_CDP: u16 = 215;
+/// Cognitive Smart Memory Processor
+pub const EM_COGE: u16 = 216;
+/// Bluechip CoolEngine
+pub const EM_COOL: u16 = 217;
+/// Nanoradio Optimized RISC
+pub const EM_NORC: u16 = 218;
+/// CSR Kalimba
+pub const EM_CSR_KALIMBA: u16 = 219;
+/// Zilog Z80
+pub const EM_Z80: u16 = 220;
+/// Controls and Data Services VISIUMcore
+pub const EM_VISIUM: u16 = 221;
+/// FTDI Chip FT32
+pub const EM_FT32: u16 = 222;
+/// Moxie processor
+pub const EM_MOXIE: u16 = 223;
+/// AMD GPU
+pub const EM_AMDGPU: u16 = 224;
+// reserved 225-242
+/// RISC-V
+pub const EM_RISCV: u16 = 243;
+
+/// Linux BPF -- in-kernel virtual machine
+pub const EM_BPF: u16 = 247;
+
+/// C-SKY
+pub const EM_CSKY: u16 = 252;
+
+pub const EM_NUM: u16 = 248;
+
+/// Convert machine to str representation
+pub fn machine_to_str (machine: u16) -> &'static str {
+ match machine {
+ EM_M32 => "M32",
+ EM_SPARC => "SPARC",
+ EM_386 => "386",
+ EM_68K => "68K",
+ EM_88K => "88K",
+ EM_IAMCU => "IAMCU",
+ EM_860 => "860",
+ EM_MIPS => "MIPS",
+ EM_S370 => "S370",
+ EM_MIPS_RS3_LE => "MIPS_RS3_LE",
+ EM_PARISC => "PARISC",
+ EM_VPP500 => "VPP500",
+ EM_SPARC32PLUS => "SPARC32PLUS",
+ EM_960 => "960",
+ EM_PPC => "PPC",
+ EM_PPC64 => "PPC64",
+ EM_S390 => "S390",
+ EM_SPU => "SPU",
+ EM_V800 => "V800",
+ EM_FR20 => "FR20",
+ EM_RH32 => "RH32",
+ EM_RCE => "RCE",
+ EM_ARM => "ARM",
+ EM_FAKE_ALPHA => "FAKE_ALPHA",
+ EM_SH => "SH",
+ EM_SPARCV9 => "SPARCV9",
+ EM_TRICORE => "TRICORE",
+ EM_ARC => "ARC",
+ EM_H8_300 => "H8_300",
+ EM_H8_300H => "H8_300H",
+ EM_H8S => "H8S",
+ EM_H8_500 => "H8_500",
+ EM_IA_64 => "IA_64",
+ EM_MIPS_X => "MIPS_X",
+ EM_COLDFIRE => "COLDFIRE",
+ EM_68HC12 => "68HC12",
+ EM_MMA => "MMA",
+ EM_PCP => "PCP",
+ EM_NCPU => "NCPU",
+ EM_NDR1 => "NDR1",
+ EM_STARCORE => "STARCORE",
+ EM_ME16 => "ME16",
+ EM_ST100 => "ST100",
+ EM_TINYJ => "TINYJ",
+ EM_X86_64 => "X86_64",
+ EM_PDSP => "PDSP",
+ EM_PDP10 => "PDP10",
+ EM_PDP11 => "PDP11",
+ EM_FX66 => "FX66",
+ EM_ST9PLUS => "ST9PLUS",
+ EM_ST7 => "ST7",
+ EM_68HC16 => "68HC16",
+ EM_68HC11 => "68HC11",
+ EM_68HC08 => "68HC08",
+ EM_68HC05 => "68HC05",
+ EM_SVX => "SVX",
+ EM_ST19 => "ST19",
+ EM_VAX => "VAX",
+ EM_CRIS => "CRIS",
+ EM_JAVELIN => "JAVELIN",
+ EM_FIREPATH => "FIREPATH",
+ EM_ZSP => "ZSP",
+ EM_MMIX => "MMIX",
+ EM_HUANY => "HUANY",
+ EM_PRISM => "PRISM",
+ EM_AVR => "AVR",
+ EM_FR30 => "FR30",
+ EM_D10V => "D10V",
+ EM_D30V => "D30V",
+ EM_V850 => "V850",
+ EM_M32R => "M32R",
+ EM_MN10300 => "MN10300",
+ EM_MN10200 => "MN10200",
+ EM_PJ => "PJ",
+ EM_OPENRISC => "OPENRISC",
+ EM_ARC_COMPACT => "ARC_COMPACT",
+ EM_XTENSA => "XTENSA",
+ EM_VIDEOCORE => "VIDEOCORE",
+ EM_TMM_GPP => "TMM_GPP",
+ EM_NS32K => "NS32K",
+ EM_TPC => "TPC",
+ EM_SNP1K => "SNP1K",
+ EM_ST200 => "ST200",
+ EM_IP2K => "IP2K",
+ EM_MAX => "MAX",
+ EM_CR => "CR",
+ EM_F2MC16 => "F2MC16",
+ EM_MSP430 => "MSP430",
+ EM_BLACKFIN => "BLACKFIN",
+ EM_SE_C33 => "SE_C33",
+ EM_SEP => "SEP",
+ EM_ARCA => "ARCA",
+ EM_UNICORE => "UNICORE",
+ EM_EXCESS => "EXCESS",
+ EM_DXP => "DXP",
+ EM_ALTERA_NIOS2 => "ALTERA_NIOS2",
+ EM_CRX => "CRX",
+ EM_XGATE => "XGATE",
+ EM_C166 => "C166",
+ EM_M16C => "M16C",
+ EM_DSPIC30F => "DSPIC30F",
+ EM_CE => "CE",
+ EM_M32C => "M32C",
+ EM_TSK3000 => "TSK3000",
+ EM_RS08 => "RS08",
+ EM_SHARC => "SHARC",
+ EM_ECOG2 => "ECOG2",
+ EM_SCORE7 => "SCORE7",
+ EM_DSP24 => "DSP24",
+ EM_VIDEOCORE3 => "VIDEOCORE3",
+ EM_LATTICEMICO32 => "LATTICEMICO32",
+ EM_SE_C17 => "SE_C17",
+ EM_TI_C6000 => "TI_C6000",
+ EM_TI_C2000 => "TI_C2000",
+ EM_TI_C5500 => "TI_C5500",
+ EM_TI_ARP32 => "TI_ARP32",
+ EM_TI_PRU => "TI_PRU",
+ EM_MMDSP_PLUS => "MMDSP_PLUS",
+ EM_CYPRESS_M8C => "CYPRESS_M8C",
+ EM_R32C => "R32C",
+ EM_TRIMEDIA => "TRIMEDIA",
+ EM_QDSP6 => "QDSP6",
+ EM_8051 => "8051",
+ EM_STXP7X => "STXP7X",
+ EM_NDS32 => "NDS32",
+ EM_ECOG1X => "ECOG1X",
+ EM_MAXQ30 => "MAXQ30",
+ EM_XIMO16 => "XIMO16",
+ EM_MANIK => "MANIK",
+ EM_CRAYNV2 => "CRAYNV2",
+ EM_RX => "RX",
+ EM_METAG => "METAG",
+ EM_MCST_ELBRUS => "MCST_ELBRUS",
+ EM_ECOG16 => "ECOG16",
+ EM_CR16 => "CR16",
+ EM_ETPU => "ETPU",
+ EM_SLE9X => "SLE9X",
+ EM_L10M => "L10M",
+ EM_K10M => "K10M",
+ EM_AARCH64 => "AARCH64",
+ EM_AVR32 => "AVR32",
+ EM_STM8 => "STM8",
+ EM_TILE64 => "TILE64",
+ EM_TILEPRO => "TILEPRO",
+ EM_MICROBLAZE => "MICROBLAZE",
+ EM_CUDA => "CUDA",
+ EM_TILEGX => "TILEGX",
+ EM_CLOUDSHIELD => "CLOUDSHIELD",
+ EM_COREA_1ST => "COREA_1ST",
+ EM_COREA_2ND => "COREA_2ND",
+ EM_ARC_COMPACT2 => "ARC_COMPACT2",
+ EM_OPEN8 => "OPEN8",
+ EM_RL78 => "RL78",
+ EM_VIDEOCORE5 => "VIDEOCORE5",
+ EM_78KOR => "78KOR",
+ EM_56800EX => "56800EX",
+ EM_BA1 => "BA1",
+ EM_BA2 => "BA2",
+ EM_XCORE => "XCORE",
+ EM_MCHP_PIC => "MCHP_PIC",
+ EM_KM32 => "KM32",
+ EM_KMX32 => "KMX32",
+ EM_EMX16 => "EMX16",
+ EM_EMX8 => "EMX8",
+ EM_KVARC => "KVARC",
+ EM_CDP => "CDP",
+ EM_COGE => "COGE",
+ EM_COOL => "COOL",
+ EM_NORC => "NORC",
+ EM_CSR_KALIMBA => "CSR_KALIMBA",
+ EM_Z80 => "Z80",
+ EM_VISIUM => "VISIUM",
+ EM_FT32 => "FT32",
+ EM_MOXIE => "MOXIE",
+ EM_AMDGPU => "AMDGPU",
+ EM_RISCV => "RISCV",
+ EM_BPF => "BPF",
+ _val => "EM_UNKNOWN",
+ }
+}
diff --git a/third_party/rust/goblin/src/elf/constants_relocation.rs b/third_party/rust/goblin/src/elf/constants_relocation.rs
new file mode 100644
index 0000000000..6339de5434
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/constants_relocation.rs
@@ -0,0 +1,1417 @@
+// x86_64 relocations
+/// No reloc.
+pub const R_X86_64_NONE: u32 = 0;
+/// Direct 64 bit.
+pub const R_X86_64_64: u32 = 1;
+/// PC relative 32 bit signed.
+pub const R_X86_64_PC32: u32 = 2;
+/// 32 bit GOT entry.
+pub const R_X86_64_GOT32: u32 = 3;
+/// 32 bit PLT address.
+pub const R_X86_64_PLT32: u32 = 4;
+/// Copy symbol at runtime.
+pub const R_X86_64_COPY: u32 = 5;
+/// Create GOT entry.
+pub const R_X86_64_GLOB_DAT: u32 = 6;
+/// Create PLT entry.
+pub const R_X86_64_JUMP_SLOT: u32 = 7;
+/// Adjust by program base.
+pub const R_X86_64_RELATIVE: u32 = 8;
+/// 32 bit signed PC relative offset to GOT.
+pub const R_X86_64_GOTPCREL: u32 = 9;
+/// Direct 32 bit zero extended.
+pub const R_X86_64_32: u32 = 10;
+/// Direct 32 bit sign extended.
+pub const R_X86_64_32S: u32 = 11;
+/// Direct 16 bit zero extended.
+pub const R_X86_64_16: u32 = 12;
+/// 16 bit sign extended pc relative.
+pub const R_X86_64_PC16: u32 = 13;
+/// Direct 8 bit sign extended.
+pub const R_X86_64_8: u32 = 14;
+/// 8 bit sign extended pc relative.
+pub const R_X86_64_PC8: u32 = 15;
+/// ID of module containing symbol.
+pub const R_X86_64_DTPMOD64: u32 = 16;
+/// Offset in module's TLS block.
+pub const R_X86_64_DTPOFF64: u32 = 17;
+/// Offset in initial TLS block.
+pub const R_X86_64_TPOFF64: u32 = 18;
+/// 32 bit signed PC relative offset to two GOT entries for GD symbol.
+pub const R_X86_64_TLSGD: u32 = 19;
+/// 32 bit signed PC relative offset to two GOT entries for LD symbol.
+pub const R_X86_64_TLSLD: u32 = 20;
+/// Offset in TLS block.
+pub const R_X86_64_DTPOFF32: u32 = 21;
+/// 32 bit signed PC relative offset to GOT entry for IE symbol.
+pub const R_X86_64_GOTTPOFF: u32 = 22;
+/// Offset in initial TLS block.
+pub const R_X86_64_TPOFF32: u32 = 23;
+/// PC relative 64 bit.
+pub const R_X86_64_PC64: u32 = 24;
+/// 64 bit offset to GOT.
+pub const R_X86_64_GOTOFF64: u32 = 25;
+/// 32 bit signed pc relative offset to GOT.
+pub const R_X86_64_GOTPC32: u32 = 26;
+/// 64-bit GOT entry offset.
+pub const R_X86_64_GOT64: u32 = 27;
+/// 64-bit PC relative offset to GOT entry.
+pub const R_X86_64_GOTPCREL64: u32 = 28;
+/// 64-bit PC relative offset to GOT.
+pub const R_X86_64_GOTPC64: u32 = 29;
+/// like GOT64, says PLT entry needed.
+pub const R_X86_64_GOTPLT64: u32 = 30;
+/// 64-bit GOT relative offset to PLT entry.
+pub const R_X86_64_PLTOFF64: u32 = 31;
+/// Size of symbol plus 32-bit addend.
+pub const R_X86_64_SIZE32: u32 = 32;
+/// Size of symbol plus 64-bit addend.
+pub const R_X86_64_SIZE64: u32 = 33;
+/// GOT offset for TLS descriptor..
+pub const R_X86_64_GOTPC32_TLSDESC: u32 = 34;
+/// Marker for call through TLS descriptor..
+pub const R_X86_64_TLSDESC_CALL: u32 = 35;
+/// TLS descriptor..
+pub const R_X86_64_TLSDESC: u32 = 36;
+/// Adjust indirectly by program base.
+pub const R_X86_64_IRELATIVE: u32 = 37;
+/// 64-bit adjust by program base.
+pub const R_X86_64_RELATIVE64: u32 = 38;
+///Load from 32 bit signed pc relative offset to GOT entry without REX prefix, relaxable.
+pub const R_X86_64_GOTPCRELX: u32 = 41;
+/// Load from 32 bit signed pc relative offset to GOT entry with REX prefix, relaxable.
+pub const R_X86_64_REX_GOTPCRELX: u32 = 42;
+pub const R_X86_64_NUM: u32 = 43;
+
+// Intel 80386 specific definitions
+
+// i386 relocs
+/// No reloc
+pub const R_386_NONE: u32 = 0;
+/// Direct 32 bit
+pub const R_386_32: u32 = 1;
+/// PC relative 32 bit
+pub const R_386_PC32: u32 = 2;
+/// 32 bit GOT entry
+pub const R_386_GOT32: u32 = 3;
+/// 32 bit PLT address
+pub const R_386_PLT32: u32 = 4;
+/// Copy symbol at runtime
+pub const R_386_COPY: u32 = 5;
+/// Create GOT entry
+pub const R_386_GLOB_DAT: u32 = 6;
+/// Create PLT entry
+pub const R_386_JMP_SLOT: u32 = 7;
+/// Adjust by program base
+pub const R_386_RELATIVE: u32 = 8;
+/// 32 bit offset to GOT
+pub const R_386_GOTOFF: u32 = 9;
+/// 32 bit PC relative offset to GOT
+pub const R_386_GOTPC: u32 = 10;
+pub const R_386_32PLT: u32 = 11;
+/// Offset in static TLS block
+pub const R_386_TLS_TPOFF: u32 = 14;
+/// Address of GOT entry for static TLS block offset
+pub const R_386_TLS_IE: u32 = 15;
+/// GOT entry for static TLS block offset
+pub const R_386_TLS_GOTIE: u32 = 16;
+/// Offset relative to static TLS block
+pub const R_386_TLS_LE: u32 = 17;
+/// Direct 32 bit for GNU version of general dynamic thread local data
+pub const R_386_TLS_GD: u32 = 18;
+/// Direct 32 bit for GNU version of local dynamic thread local data in LE code
+pub const R_386_TLS_LDM: u32 = 19;
+pub const R_386_16: u32 = 20;
+pub const R_386_PC16: u32 = 21;
+pub const R_386_8: u32 = 22;
+pub const R_386_PC8: u32 = 23;
+/// Direct 32 bit for general dynamic thread local data
+pub const R_386_TLS_GD_32: u32 = 24;
+/// Tag for pushl in GD TLS code
+pub const R_386_TLS_GD_PUSH: u32 = 25;
+/// Relocation for call to __tls_get_addr()
+pub const R_386_TLS_GD_CALL: u32 = 26;
+/// Tag for popl in GD TLS code
+pub const R_386_TLS_GD_POP: u32 = 27;
+/// Direct 32 bit for local dynamic thread local data in LE code
+pub const R_386_TLS_LDM_32: u32 = 28;
+/// Tag for pushl in LDM TLS code
+pub const R_386_TLS_LDM_PUSH: u32 = 29;
+/// Relocation for call to __tls_get_addr() in LDM code
+pub const R_386_TLS_LDM_CALL: u32 = 30;
+/// Tag for popl in LDM TLS code
+pub const R_386_TLS_LDM_POP: u32 = 31;
+/// Offset relative to TLS block
+pub const R_386_TLS_LDO_32: u32 = 32;
+/// GOT entry for negated static TLS block offset
+pub const R_386_TLS_IE_32: u32 = 33;
+/// Negated offset relative to static TLS block
+pub const R_386_TLS_LE_32: u32 = 34;
+/// ID of module containing symbol
+pub const R_386_TLS_DTPMOD32: u32 = 35;
+/// Offset in TLS block
+pub const R_386_TLS_DTPOFF32: u32 = 36;
+/// Negated offset in static TLS block
+pub const R_386_TLS_TPOFF32: u32 = 37;
+/// 32-bit symbol size
+pub const R_386_SIZE32: u32 = 38;
+/// GOT offset for TLS descriptor.
+pub const R_386_TLS_GOTDESC: u32 = 39;
+/// Marker of call through TLS descriptor for relaxation
+pub const R_386_TLS_DESC_CALL: u32 = 40;
+/// TLS descriptor containing pointer to code and to argument, returning the TLS offset for the symbol
+pub const R_386_TLS_DESC: u32 = 41;
+/// Adjust indirectly by program base
+pub const R_386_IRELATIVE: u32 = 42;
+/// Load from 32 bit GOT entry, relaxable
+pub const R_386_GOT32X: u32 = 43;
+/// Keep this the last entry
+pub const R_386_NUM: u32 = 44;
+
+// AArch64 relocs
+/// No relocation
+pub const R_AARCH64_NONE: u32 = 0;
+
+// ILP32 AArch64 relocs
+/// Direct 32 bit
+pub const R_AARCH64_P32_ABS32: u32 = 1;
+/// Copy symbol at runtime
+pub const R_AARCH64_P32_COPY: u32 = 180;
+/// Create GOT entry
+pub const R_AARCH64_P32_GLOB_DAT: u32 = 181;
+/// Create PLT entry
+pub const R_AARCH64_P32_JUMP_SLOT: u32 = 182;
+/// Adjust by program base
+pub const R_AARCH64_P32_RELATIVE: u32 = 183;
+/// Module number, 32 bit
+pub const R_AARCH64_P32_TLS_DTPMOD: u32 = 184;
+/// Module-relative offset, 32 bit
+pub const R_AARCH64_P32_TLS_DTPREL: u32 = 185;
+/// TP-relative offset, 32 bit
+pub const R_AARCH64_P32_TLS_TPREL: u32 = 186;
+/// TLS Descriptor
+pub const R_AARCH64_P32_TLSDESC: u32 = 187;
+/// STT_GNU_IFUNC relocation
+pub const R_AARCH64_P32_IRELATIVE: u32 = 188;
+
+// LP64 AArch64 relocs
+/// Direct 64 bit
+pub const R_AARCH64_ABS64: u32 = 257;
+/// Direct 32 bit
+pub const R_AARCH64_ABS32: u32 = 258;
+/// Direct 16-bit
+pub const R_AARCH64_ABS16: u32 = 259;
+/// PC-relative 64-bit
+pub const R_AARCH64_PREL64: u32 = 260;
+/// PC-relative 32-bit
+pub const R_AARCH64_PREL32: u32 = 261;
+/// PC-relative 16-bit
+pub const R_AARCH64_PREL16: u32 = 262;
+/// Dir. MOVZ imm. from bits 15:0
+pub const R_AARCH64_MOVW_UABS_G0: u32 = 263;
+/// Likewise for MOVK; no check
+pub const R_AARCH64_MOVW_UABS_G0_NC: u32 = 264;
+/// Dir. MOVZ imm. from bits 31:16
+pub const R_AARCH64_MOVW_UABS_G1: u32 = 265;
+/// Likewise for MOVK; no check
+pub const R_AARCH64_MOVW_UABS_G1_NC: u32 = 266;
+/// Dir. MOVZ imm. from bits 47:32
+pub const R_AARCH64_MOVW_UABS_G2: u32 = 267;
+/// Likewise for MOVK; no check
+pub const R_AARCH64_MOVW_UABS_G2_NC: u32 = 268;
+/// Dir. MOV{K,Z} imm. from 63:48
+pub const R_AARCH64_MOVW_UABS_G3: u32 = 269;
+/// Dir. MOV{N,Z} imm. from 15:0
+pub const R_AARCH64_MOVW_SABS_G0: u32 = 270;
+/// Dir. MOV{N,Z} imm. from 31:16
+pub const R_AARCH64_MOVW_SABS_G1: u32 = 271;
+/// Dir. MOV{N,Z} imm. from 47:32
+pub const R_AARCH64_MOVW_SABS_G2: u32 = 272;
+/// PC-rel. LD imm. from bits 20:2
+pub const R_AARCH64_LD_PREL_LO19: u32 = 273;
+/// PC-rel. ADR imm. from bits 20:0
+pub const R_AARCH64_ADR_PREL_LO21: u32 = 274;
+/// Page-rel. ADRP imm. from 32:12
+pub const R_AARCH64_ADR_PREL_PG_HI21: u32 = 275;
+/// Likewise; no overflow check
+pub const R_AARCH64_ADR_PREL_PG_HI21_NC: u32 = 276;
+/// Dir. ADD imm. from bits 11:0
+pub const R_AARCH64_ADD_ABS_LO12_NC: u32 = 277;
+/// Likewise for LD/ST; no check.
+pub const R_AARCH64_LDST8_ABS_LO12_NC: u32 = 278;
+/// PC-rel. TBZ/TBNZ imm. from 15:2
+pub const R_AARCH64_TSTBR14: u32 = 279;
+/// PC-rel. cond. br. imm. from 20:2.
+pub const R_AARCH64_CONDBR19: u32 = 280;
+/// PC-rel. B imm. from bits 27:2
+pub const R_AARCH64_JUMP26: u32 = 282;
+/// Likewise for CALL
+pub const R_AARCH64_CALL26: u32 = 283;
+/// Dir. ADD imm. from bits 11:1
+pub const R_AARCH64_LDST16_ABS_LO12_NC: u32 = 284;
+/// Likewise for bits 11:2
+pub const R_AARCH64_LDST32_ABS_LO12_NC: u32 = 285;
+/// Likewise for bits 11:3
+pub const R_AARCH64_LDST64_ABS_LO12_NC: u32 = 286;
+/// PC-rel. MOV{N,Z} imm. from 15:0
+pub const R_AARCH64_MOVW_PREL_G0: u32 = 287;
+/// Likewise for MOVK; no check
+pub const R_AARCH64_MOVW_PREL_G0_NC: u32 = 288;
+/// PC-rel. MOV{N,Z} imm. from 31:16.
+pub const R_AARCH64_MOVW_PREL_G1: u32 = 289;
+/// Likewise for MOVK; no check
+pub const R_AARCH64_MOVW_PREL_G1_NC: u32 = 290;
+/// PC-rel. MOV{N,Z} imm. from 47:32.
+pub const R_AARCH64_MOVW_PREL_G2: u32 = 291;
+/// Likewise for MOVK; no check
+pub const R_AARCH64_MOVW_PREL_G2_NC: u32 = 292;
+/// PC-rel. MOV{N,Z} imm. from 63:48.
+pub const R_AARCH64_MOVW_PREL_G3: u32 = 293;
+/// Dir. ADD imm. from bits 11:4
+pub const R_AARCH64_LDST128_ABS_LO12_NC: u32 = 299;
+/// GOT-rel. off. MOV{N,Z} imm. 15:0.
+pub const R_AARCH64_MOVW_GOTOFF_G0: u32 = 300;
+/// Likewise for MOVK; no check
+pub const R_AARCH64_MOVW_GOTOFF_G0_NC: u32 = 301;
+/// GOT-rel. o. MOV{N,Z} imm. 31:16
+pub const R_AARCH64_MOVW_GOTOFF_G1: u32 = 302;
+/// Likewise for MOVK; no check
+pub const R_AARCH64_MOVW_GOTOFF_G1_NC: u32 = 303;
+/// GOT-rel. o. MOV{N,Z} imm. 47:32
+pub const R_AARCH64_MOVW_GOTOFF_G2: u32 = 304;
+/// Likewise for MOVK; no check
+pub const R_AARCH64_MOVW_GOTOFF_G2_NC: u32 = 305;
+/// GOT-rel. o. MOV{N,Z} imm. 63:48
+pub const R_AARCH64_MOVW_GOTOFF_G3: u32 = 306;
+/// GOT-relative 64-bit
+pub const R_AARCH64_GOTREL64: u32 = 307;
+/// GOT-relative 32-bit
+pub const R_AARCH64_GOTREL32: u32 = 308;
+/// PC-rel. GOT off. load imm. 20:2
+pub const R_AARCH64_GOT_LD_PREL19: u32 = 309;
+/// GOT-rel. off. LD/ST imm. 14:3
+pub const R_AARCH64_LD64_GOTOFF_LO15: u32 = 310;
+/// P-page-rel. GOT off. ADRP 32:12
+pub const R_AARCH64_ADR_GOT_PAGE: u32 = 311;
+/// Dir. GOT off. LD/ST imm. 11:3
+pub const R_AARCH64_LD64_GOT_LO12_NC: u32 = 312;
+/// GOT-page-rel. GOT off. LD/ST 14:3
+pub const R_AARCH64_LD64_GOTPAGE_LO15: u32 = 313;
+/// PC-relative ADR imm. 20:0
+pub const R_AARCH64_TLSGD_ADR_PREL21: u32 = 512;
+/// page-rel. ADRP imm. 32:12
+pub const R_AARCH64_TLSGD_ADR_PAGE21: u32 = 513;
+/// direct ADD imm. from 11:0
+pub const R_AARCH64_TLSGD_ADD_LO12_NC: u32 = 514;
+/// GOT-rel. MOV{N,Z} 31:16
+pub const R_AARCH64_TLSGD_MOVW_G1: u32 = 515;
+/// GOT-rel. MOVK imm. 15:0
+pub const R_AARCH64_TLSGD_MOVW_G0_NC: u32 = 516;
+/// Like 512; local dynamic model
+pub const R_AARCH64_TLSLD_ADR_PREL21: u32 = 517;
+/// Like 513; local dynamic model
+pub const R_AARCH64_TLSLD_ADR_PAGE21: u32 = 518;
+/// Like 514; local dynamic model
+pub const R_AARCH64_TLSLD_ADD_LO12_NC: u32 = 519;
+/// Like 515; local dynamic model
+pub const R_AARCH64_TLSLD_MOVW_G1: u32 = 520;
+/// Like 516; local dynamic model
+pub const R_AARCH64_TLSLD_MOVW_G0_NC: u32 = 521;
+/// TLS PC-rel. load imm. 20:2
+pub const R_AARCH64_TLSLD_LD_PREL19: u32 = 522;
+/// TLS DTP-rel. MOV{N,Z} 47:32
+pub const R_AARCH64_TLSLD_MOVW_DTPREL_G2: u32 = 523;
+/// TLS DTP-rel. MOV{N,Z} 31:16
+pub const R_AARCH64_TLSLD_MOVW_DTPREL_G1: u32 = 524;
+/// Likewise; MOVK; no check
+pub const R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: u32 = 525;
+/// TLS DTP-rel. MOV{N,Z} 15:0
+pub const R_AARCH64_TLSLD_MOVW_DTPREL_G0: u32 = 526;
+/// Likewise; MOVK; no check
+pub const R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: u32 = 527;
+/// DTP-rel. ADD imm. from 23:12.
+pub const R_AARCH64_TLSLD_ADD_DTPREL_HI12: u32 = 528;
+/// DTP-rel. ADD imm. from 11:0
+pub const R_AARCH64_TLSLD_ADD_DTPREL_LO12: u32 = 529;
+/// Likewise; no ovfl. check
+pub const R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: u32 = 530;
+/// DTP-rel. LD/ST imm. 11:0
+pub const R_AARCH64_TLSLD_LDST8_DTPREL_LO12: u32 = 531;
+/// Likewise; no check
+pub const R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: u32 = 532;
+/// DTP-rel. LD/ST imm. 11:1
+pub const R_AARCH64_TLSLD_LDST16_DTPREL_LO12: u32 = 533;
+/// Likewise; no check
+pub const R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: u32 = 534;
+/// DTP-rel. LD/ST imm. 11:2
+pub const R_AARCH64_TLSLD_LDST32_DTPREL_LO12: u32 = 535;
+/// Likewise; no check
+pub const R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: u32 = 536;
+/// DTP-rel. LD/ST imm. 11:3
+pub const R_AARCH64_TLSLD_LDST64_DTPREL_LO12: u32 = 537;
+/// Likewise; no check
+pub const R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: u32 = 538;
+/// GOT-rel. MOV{N,Z} 31:16
+pub const R_AARCH64_TLSIE_MOVW_GOTTPREL_G1: u32 = 539;
+/// GOT-rel. MOVK 15:0
+pub const R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: u32 = 540;
+/// Page-rel. ADRP 32:12
+pub const R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: u32 = 541;
+/// Direct LD off. 11:3
+pub const R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: u32 = 542;
+/// PC-rel. load imm. 20:2
+pub const R_AARCH64_TLSIE_LD_GOTTPREL_PREL19: u32 = 543;
+/// TLS TP-rel. MOV{N,Z} 47:32
+pub const R_AARCH64_TLSLE_MOVW_TPREL_G2: u32 = 544;
+/// TLS TP-rel. MOV{N,Z} 31:16
+pub const R_AARCH64_TLSLE_MOVW_TPREL_G1: u32 = 545;
+/// Likewise; MOVK; no check
+pub const R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: u32 = 546;
+/// TLS TP-rel. MOV{N,Z} 15:0
+pub const R_AARCH64_TLSLE_MOVW_TPREL_G0: u32 = 547;
+/// Likewise; MOVK; no check
+pub const R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: u32 = 548;
+/// TP-rel. ADD imm. 23:12
+pub const R_AARCH64_TLSLE_ADD_TPREL_HI12: u32 = 549;
+/// TP-rel. ADD imm. 11:0
+pub const R_AARCH64_TLSLE_ADD_TPREL_LO12: u32 = 550;
+/// Likewise; no ovfl. check
+pub const R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: u32 = 551;
+/// TP-rel. LD/ST off. 11:0
+pub const R_AARCH64_TLSLE_LDST8_TPREL_LO12: u32 = 552;
+/// Likewise; no ovfl. check.
+pub const R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: u32 = 553;
+/// TP-rel. LD/ST off. 11:1
+pub const R_AARCH64_TLSLE_LDST16_TPREL_LO12: u32 = 554;
+/// Likewise; no check
+pub const R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: u32 = 555;
+/// TP-rel. LD/ST off. 11:2
+pub const R_AARCH64_TLSLE_LDST32_TPREL_LO12: u32 = 556;
+/// Likewise; no check
+pub const R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: u32 = 557;
+/// TP-rel. LD/ST off. 11:3
+pub const R_AARCH64_TLSLE_LDST64_TPREL_LO12: u32 = 558;
+/// Likewise; no check
+pub const R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: u32 = 559;
+/// PC-rel. load immediate 20:2
+pub const R_AARCH64_TLSDESC_LD_PREL19: u32 = 560;
+/// PC-rel. ADR immediate 20:0
+pub const R_AARCH64_TLSDESC_ADR_PREL21: u32 = 561;
+/// Page-rel. ADRP imm. 32:12
+pub const R_AARCH64_TLSDESC_ADR_PAGE21: u32 = 562;
+/// Direct LD off. from 11:3
+pub const R_AARCH64_TLSDESC_LD64_LO12: u32 = 563;
+/// Direct ADD imm. from 11:0
+pub const R_AARCH64_TLSDESC_ADD_LO12: u32 = 564;
+/// GOT-rel. MOV{N,Z} imm. 31:16
+pub const R_AARCH64_TLSDESC_OFF_G1: u32 = 565;
+/// GOT-rel. MOVK imm. 15:0; no ck
+pub const R_AARCH64_TLSDESC_OFF_G0_NC: u32 = 566;
+/// Relax LDR
+pub const R_AARCH64_TLSDESC_LDR: u32 = 567;
+/// Relax ADD
+pub const R_AARCH64_TLSDESC_ADD: u32 = 568;
+/// Relax BLR
+pub const R_AARCH64_TLSDESC_CALL: u32 = 569;
+/// TP-rel. LD/ST off. 11:4
+pub const R_AARCH64_TLSLE_LDST128_TPREL_LO12: u32 = 570;
+/// Likewise; no check
+pub const R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC: u32 = 571;
+/// DTP-rel. LD/ST imm. 11:4.
+pub const R_AARCH64_TLSLD_LDST128_DTPREL_LO12: u32 = 572;
+/// Likewise; no check
+pub const R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC: u32 = 573;
+/// Copy symbol at runtime
+pub const R_AARCH64_COPY: u32 = 1024;
+/// Create GOT entry
+pub const R_AARCH64_GLOB_DAT: u32 = 1025;
+/// Create PLT entry
+pub const R_AARCH64_JUMP_SLOT: u32 = 1026;
+/// Adjust by program base
+pub const R_AARCH64_RELATIVE: u32 = 1027;
+/// Module number, 64 bit
+pub const R_AARCH64_TLS_DTPMOD: u32 = 1028;
+/// Module-relative offset, 64 bit
+pub const R_AARCH64_TLS_DTPREL: u32 = 1029;
+/// TP-relative offset, 64 bit
+pub const R_AARCH64_TLS_TPREL: u32 = 1030;
+/// TLS Descriptor
+pub const R_AARCH64_TLSDESC: u32 = 1031;
+/// STT_GNU_IFUNC relocation
+pub const R_AARCH64_IRELATIVE: u32 = 1032;
+
+// ARM relocs
+/// No reloc
+pub const R_ARM_NONE: u32 = 0;
+/// Deprecated PC relative 26 bit branch
+pub const R_ARM_PC24: u32 = 1;
+/// Direct 32 bit
+pub const R_ARM_ABS32: u32 = 2;
+/// PC relative 32 bit
+pub const R_ARM_REL32: u32 = 3;
+pub const R_ARM_PC13: u32 = 4;
+/// Direct 16 bit
+pub const R_ARM_ABS16: u32 = 5;
+/// Direct 12 bit
+pub const R_ARM_ABS12: u32 = 6;
+/// Direct & 0x7C (LDR, STR)
+pub const R_ARM_THM_ABS5: u32 = 7;
+/// Direct 8 bit
+pub const R_ARM_ABS8: u32 = 8;
+pub const R_ARM_SBREL32: u32 = 9;
+/// PC relative 24 bit (Thumb32 BL)
+pub const R_ARM_THM_PC22: u32 = 10;
+/// PC relative & 0x3FC(Thumb16 LDR, ADD, ADR).
+pub const R_ARM_THM_PC8: u32 = 11;
+pub const R_ARM_AMP_VCALL9: u32 = 12;
+/// Obsolete static relocation
+pub const R_ARM_SWI24: u32 = 13;
+/// Dynamic relocation
+pub const R_ARM_TLS_DESC: u32 = 13;
+/// Reserved
+pub const R_ARM_THM_SWI8: u32 = 14;
+/// Reserved
+pub const R_ARM_XPC25: u32 = 15;
+/// Reserved
+pub const R_ARM_THM_XPC22: u32 = 16;
+/// ID of module containing symbol
+pub const R_ARM_TLS_DTPMOD32: u32 = 17;
+/// Offset in TLS block
+pub const R_ARM_TLS_DTPOFF32: u32 = 18;
+/// Offset in static TLS block
+pub const R_ARM_TLS_TPOFF32: u32 = 19;
+/// Copy symbol at runtime
+pub const R_ARM_COPY: u32 = 20;
+/// Create GOT entry
+pub const R_ARM_GLOB_DAT: u32 = 21;
+/// Create PLT entry
+pub const R_ARM_JUMP_SLOT: u32 = 22;
+/// Adjust by program base
+pub const R_ARM_RELATIVE: u32 = 23;
+/// 32 bit offset to GOT
+pub const R_ARM_GOTOFF: u32 = 24;
+/// 32 bit PC relative offset to GOT
+pub const R_ARM_GOTPC: u32 = 25;
+/// 32 bit GOT entry
+pub const R_ARM_GOT32: u32 = 26;
+/// Deprecated, 32 bit PLT address
+pub const R_ARM_PLT32: u32 = 27;
+/// PC relative 24 bit (BL, BLX)
+pub const R_ARM_CALL: u32 = 28;
+/// PC relative 24 bit (B, BL<cond>)
+pub const R_ARM_JUMP24: u32 = 29;
+/// PC relative 24 bit (Thumb32 B.W)
+pub const R_ARM_THM_JUMP24: u32 = 30;
+/// Adjust by program base
+pub const R_ARM_BASE_ABS: u32 = 31;
+/// Obsolete
+pub const R_ARM_ALU_PCREL_7_0: u32 = 32;
+/// Obsolete
+pub const R_ARM_ALU_PCREL_15_8: u32 = 33;
+/// Obsolete
+pub const R_ARM_ALU_PCREL_23_15: u32 = 34;
+/// Deprecated, prog. base relative
+pub const R_ARM_LDR_SBREL_11_0: u32 = 35;
+/// Deprecated, prog. base relative
+pub const R_ARM_ALU_SBREL_19_12: u32 = 36;
+/// Deprecated, prog. base relative
+pub const R_ARM_ALU_SBREL_27_20: u32 = 37;
+pub const R_ARM_TARGET1: u32 = 38;
+/// Program base relative
+pub const R_ARM_SBREL31: u32 = 39;
+pub const R_ARM_V4BX: u32 = 40;
+pub const R_ARM_TARGET2: u32 = 41;
+/// 32 bit PC relative
+pub const R_ARM_PREL31: u32 = 42;
+/// Direct 16-bit (MOVW)
+pub const R_ARM_MOVW_ABS_NC: u32 = 43;
+/// Direct high 16-bit (MOVT)
+pub const R_ARM_MOVT_ABS: u32 = 44;
+/// PC relative 16-bit (MOVW)
+pub const R_ARM_MOVW_PREL_NC: u32 = 45;
+/// PC relative (MOVT)
+pub const R_ARM_MOVT_PREL: u32 = 46;
+/// Direct 16 bit (Thumb32 MOVW)
+pub const R_ARM_THM_MOVW_ABS_NC: u32 = 47;
+/// Direct high 16 bit (Thumb32 MOVT)
+pub const R_ARM_THM_MOVT_ABS: u32 = 48;
+/// PC relative 16 bit (Thumb32 MOVW)
+pub const R_ARM_THM_MOVW_PREL_NC: u32 = 49;
+/// PC relative high 16 bit (Thumb32 MOVT)
+pub const R_ARM_THM_MOVT_PREL: u32 = 50;
+/// PC relative 20 bit (Thumb32 B<cond>.W)
+pub const R_ARM_THM_JUMP19: u32 = 51;
+/// PC relative X & 0x7E (Thumb16 CBZ, CBNZ)
+pub const R_ARM_THM_JUMP6: u32 = 52;
+/// PC relative 12 bit (Thumb32 ADR.W)
+pub const R_ARM_THM_ALU_PREL_11_0: u32 = 53;
+/// PC relative 12 bit (Thumb32 LDR{D,SB,H,SH})
+pub const R_ARM_THM_PC12: u32 = 54;
+/// Direct 32-bit
+pub const R_ARM_ABS32_NOI: u32 = 55;
+/// PC relative 32-bit
+pub const R_ARM_REL32_NOI: u32 = 56;
+/// PC relative (ADD, SUB)
+pub const R_ARM_ALU_PC_G0_NC: u32 = 57;
+/// PC relative (ADD, SUB)
+pub const R_ARM_ALU_PC_G0: u32 = 58;
+/// PC relative (ADD, SUB)
+pub const R_ARM_ALU_PC_G1_NC: u32 = 59;
+/// PC relative (ADD, SUB)
+pub const R_ARM_ALU_PC_G1: u32 = 60;
+/// PC relative (ADD, SUB)
+pub const R_ARM_ALU_PC_G2: u32 = 61;
+/// PC relative (LDR,STR,LDRB,STRB)
+pub const R_ARM_LDR_PC_G1: u32 = 62;
+/// PC relative (LDR,STR,LDRB,STRB)
+pub const R_ARM_LDR_PC_G2: u32 = 63;
+/// PC relative (STR{D,H},LDR{D,SB,H,SH})
+pub const R_ARM_LDRS_PC_G0: u32 = 64;
+/// PC relative (STR{D,H},LDR{D,SB,H,SH})
+pub const R_ARM_LDRS_PC_G1: u32 = 65;
+/// PC relative (STR{D,H},LDR{D,SB,H,SH})
+pub const R_ARM_LDRS_PC_G2: u32 = 66;
+/// PC relative (LDC, STC)
+pub const R_ARM_LDC_PC_G0: u32 = 67;
+/// PC relative (LDC, STC)
+pub const R_ARM_LDC_PC_G1: u32 = 68;
+/// PC relative (LDC, STC)
+pub const R_ARM_LDC_PC_G2: u32 = 69;
+/// Program base relative (ADD,SUB)
+pub const R_ARM_ALU_SB_G0_NC: u32 = 70;
+/// Program base relative (ADD,SUB)
+pub const R_ARM_ALU_SB_G0: u32 = 71;
+/// Program base relative (ADD,SUB)
+pub const R_ARM_ALU_SB_G1_NC: u32 = 72;
+/// Program base relative (ADD,SUB)
+pub const R_ARM_ALU_SB_G1: u32 = 73;
+/// Program base relative (ADD,SUB)
+pub const R_ARM_ALU_SB_G2: u32 = 74;
+/// Program base relative (LDR,STR, LDRB, STRB)
+pub const R_ARM_LDR_SB_G0: u32 = 75;
+/// Program base relative (LDR, STR, LDRB, STRB)
+pub const R_ARM_LDR_SB_G1: u32 = 76;
+/// Program base relative (LDR, STR, LDRB, STRB)
+pub const R_ARM_LDR_SB_G2: u32 = 77;
+/// Program base relative (LDR, STR, LDRB, STRB)
+pub const R_ARM_LDRS_SB_G0: u32 = 78;
+/// Program base relative (LDR, STR, LDRB, STRB)
+pub const R_ARM_LDRS_SB_G1: u32 = 79;
+/// Program base relative (LDR, STR, LDRB, STRB)
+pub const R_ARM_LDRS_SB_G2: u32 = 80;
+/// Program base relative (LDC,STC)
+pub const R_ARM_LDC_SB_G0: u32 = 81;
+/// Program base relative (LDC,STC)
+pub const R_ARM_LDC_SB_G1: u32 = 82;
+/// Program base relative (LDC,STC)
+pub const R_ARM_LDC_SB_G2: u32 = 83;
+/// Program base relative 16 bit (MOVW)
+pub const R_ARM_MOVW_BREL_NC: u32 = 84;
+/// Program base relative high 16 bit (MOVT)
+pub const R_ARM_MOVT_BREL: u32 = 85;
+/// Program base relative 16 bit (MOVW)
+pub const R_ARM_MOVW_BREL: u32 = 86;
+/// Program base relative 16 bit (Thumb32 MOVW)
+pub const R_ARM_THM_MOVW_BREL_NC: u32 = 87;
+/// Program base relative high 16 bit (Thumb32 MOVT)
+pub const R_ARM_THM_MOVT_BREL: u32 = 88;
+/// Program base relative 16 bit (Thumb32 MOVW)
+pub const R_ARM_THM_MOVW_BREL: u32 = 89;
+pub const R_ARM_TLS_GOTDESC: u32 = 90;
+pub const R_ARM_TLS_CALL: u32 = 91;
+/// TLS relaxation
+pub const R_ARM_TLS_DESCSEQ: u32 = 92;
+pub const R_ARM_THM_TLS_CALL: u32 = 93;
+pub const R_ARM_PLT32_ABS: u32 = 94;
+/// GOT entry
+pub const R_ARM_GOT_ABS: u32 = 95;
+/// PC relative GOT entry
+pub const R_ARM_GOT_PREL: u32 = 96;
+/// GOT entry relative to GOT origin (LDR)
+pub const R_ARM_GOT_BREL12: u32 = 97;
+/// 12 bit, GOT entry relative to GOT origin (LDR, STR)
+pub const R_ARM_GOTOFF12: u32 = 98;
+pub const R_ARM_GOTRELAX: u32 = 99;
+pub const R_ARM_GNU_VTENTRY: u32 = 100;
+pub const R_ARM_GNU_VTINHERIT: u32 = 101;
+/// PC relative & 0xFFE (Thumb16 B)
+pub const R_ARM_THM_PC11: u32 = 102;
+/// PC relative & 0x1FE (Thumb16 B/B<cond>)
+pub const R_ARM_THM_PC9: u32 = 103;
+/// PC-rel 32 bit for global dynamic thread local data
+pub const R_ARM_TLS_GD32: u32 = 104;
+/// PC-rel 32 bit for local dynamic thread local data
+pub const R_ARM_TLS_LDM32: u32 = 105;
+/// 32 bit offset relative to TLS block
+pub const R_ARM_TLS_LDO32: u32 = 106;
+/// PC-rel 32 bit for GOT entry of static TLS block offset
+pub const R_ARM_TLS_IE32: u32 = 107;
+/// 32 bit offset relative to static TLS block
+pub const R_ARM_TLS_LE32: u32 = 108;
+/// 12 bit relative to TLS block (LDR, STR)
+pub const R_ARM_TLS_LDO12: u32 = 109;
+/// 12 bit relative to static TLS block (LDR, STR)
+pub const R_ARM_TLS_LE12: u32 = 110;
+/// 12 bit GOT entry relative to GOT origin (LDR)
+pub const R_ARM_TLS_IE12GP: u32 = 111;
+/// Obsolete
+pub const R_ARM_ME_TOO: u32 = 128;
+pub const R_ARM_THM_TLS_DESCSEQ: u32 = 129;
+pub const R_ARM_THM_TLS_DESCSEQ16: u32 = 129;
+pub const R_ARM_THM_TLS_DESCSEQ32: u32 = 130;
+/// GOT entry relative to GOT origin, 12 bit (Thumb32 LDR)
+pub const R_ARM_THM_GOT_BREL12: u32 = 131;
+pub const R_ARM_IRELATIVE: u32 = 160;
+pub const R_ARM_RXPC25: u32 = 249;
+pub const R_ARM_RSBREL32: u32 = 250;
+pub const R_ARM_THM_RPC22: u32 = 251;
+pub const R_ARM_RREL32: u32 = 252;
+pub const R_ARM_RABS22: u32 = 253;
+pub const R_ARM_RPC24: u32 = 254;
+pub const R_ARM_RBASE: u32 = 255;
+/// Keep this the last entry
+pub const R_ARM_NUM: u32 = 256;
+
+///////////////////
+// OpenRisc
+///////////////////
+pub const R_OR1K_NONE: u32 = 0;
+pub const R_OR1K_32: u32 = 1;
+pub const R_OR1K_16: u32 = 2;
+pub const R_OR1K_8: u32 = 3;
+pub const R_OR1K_LO_16_IN_INSN: u32 = 4;
+pub const R_OR1K_HI_16_IN_INSN: u32 = 5;
+pub const R_OR1K_INSN_REL_26: u32 = 6;
+pub const R_OR1K_GNU_VTENTRY: u32 = 7;
+pub const R_OR1K_GNU_VTINHERIT: u32 = 8;
+pub const R_OR1K_32_PCREL: u32 = 9;
+pub const R_OR1K_16_PCREL: u32 = 10;
+pub const R_OR1K_8_PCREL: u32 = 11;
+pub const R_OR1K_GOTPC_HI16: u32 = 12;
+pub const R_OR1K_GOTPC_LO16: u32 = 13;
+pub const R_OR1K_GOT16: u32 = 14;
+pub const R_OR1K_PLT26: u32 = 15;
+pub const R_OR1K_GOTOFF_HI16: u32 = 16;
+pub const R_OR1K_GOTOFF_LO16: u32 = 17;
+pub const R_OR1K_COPY: u32 = 18;
+pub const R_OR1K_GLOB_DAT: u32 = 19;
+pub const R_OR1K_JMP_SLOT: u32 = 20;
+pub const R_OR1K_RELATIVE: u32 = 21;
+pub const R_OR1K_TLS_GD_HI16: u32 = 22;
+pub const R_OR1K_TLS_GD_LO16: u32 = 23;
+pub const R_OR1K_TLS_LDM_HI16: u32 = 24;
+pub const R_OR1K_TLS_LDM_LO16: u32 = 25;
+pub const R_OR1K_TLS_LDO_HI16: u32 = 26;
+pub const R_OR1K_TLS_LDO_LO16: u32 = 27;
+pub const R_OR1K_TLS_IE_HI16: u32 = 28;
+pub const R_OR1K_TLS_IE_LO16: u32 = 29;
+pub const R_OR1K_TLS_LE_HI16: u32 = 30;
+pub const R_OR1K_TLS_LE_LO16: u32 = 31;
+pub const R_OR1K_TLS_TPOFF: u32 = 32;
+pub const R_OR1K_TLS_DTPOFF: u32 = 33;
+pub const R_OR1K_TLS_DTPMOD: u32 = 34;
+pub const R_OR1K_NUM: u32 = 35;
+
+/////////////////////
+// MIPS
+/////////////////////
+/// No reloc
+pub const R_MIPS_NONE: u32 = 0;
+/// Direct 16 bit
+pub const R_MIPS_16: u32 = 1;
+/// Direct 32 bit
+pub const R_MIPS_32: u32 = 2;
+/// PC relative 32 bit
+pub const R_MIPS_REL32: u32 = 3;
+/// Direct 26 bit shifted
+pub const R_MIPS_26: u32 = 4;
+/// High 16 bit
+pub const R_MIPS_HI16: u32 = 5;
+/// Low 16 bit
+pub const R_MIPS_LO16: u32 = 6;
+/// GP relative 16 bit
+pub const R_MIPS_GPREL16: u32 = 7;
+/// 16 bit literal entry
+pub const R_MIPS_LITERAL: u32 = 8;
+/// 16 bit GOT entry
+pub const R_MIPS_GOT16: u32 = 9;
+/// PC relative 16 bit
+pub const R_MIPS_PC16: u32 = 10;
+/// 16 bit GOT entry for function
+pub const R_MIPS_CALL16: u32 = 11;
+/// GP relative 32 bit
+pub const R_MIPS_GPREL32: u32 = 12;
+pub const R_MIPS_SHIFT5: u32 = 16;
+pub const R_MIPS_SHIFT6: u32 = 17;
+pub const R_MIPS_64: u32 = 18;
+pub const R_MIPS_GOT_DISP: u32 = 19;
+pub const R_MIPS_GOT_PAGE: u32 = 20;
+pub const R_MIPS_GOT_OFST: u32 = 21;
+pub const R_MIPS_GOT_HI16: u32 = 22;
+pub const R_MIPS_GOT_LO16: u32 = 23;
+pub const R_MIPS_SUB: u32 = 24;
+pub const R_MIPS_INSERT_A: u32 = 25;
+pub const R_MIPS_INSERT_B: u32 = 26;
+pub const R_MIPS_DELETE: u32 = 27;
+pub const R_MIPS_HIGHER: u32 = 28;
+pub const R_MIPS_HIGHEST: u32 = 29;
+pub const R_MIPS_CALL_HI16: u32 = 30;
+pub const R_MIPS_CALL_LO16: u32 = 31;
+pub const R_MIPS_SCN_DISP: u32 = 32;
+pub const R_MIPS_REL16: u32 = 33;
+pub const R_MIPS_ADD_IMMEDIATE: u32 = 34;
+pub const R_MIPS_PJUMP: u32 = 35;
+pub const R_MIPS_RELGOT: u32 = 36;
+pub const R_MIPS_JALR: u32 = 37;
+/// Module number 32 bit
+pub const R_MIPS_TLS_DTPMOD32: u32 = 38;
+/// Module-relative offset 32 bit
+pub const R_MIPS_TLS_DTPREL32: u32 = 39;
+/// Module number 64 bit
+pub const R_MIPS_TLS_DTPMOD64: u32 = 40;
+/// Module-relative offset 64 bit
+pub const R_MIPS_TLS_DTPREL64: u32 = 41;
+/// 16 bit GOT offset for GD
+pub const R_MIPS_TLS_GD: u32 = 42;
+/// 16 bit GOT offset for LDM
+pub const R_MIPS_TLS_LDM: u32 = 43;
+/// Module-relative offset, high 16 bits
+pub const R_MIPS_TLS_DTPREL_HI16: u32 = 44;
+/// Module-relative offset, low 16 bits
+pub const R_MIPS_TLS_DTPREL_LO16: u32 = 45;
+/// 16 bit GOT offset for IE
+pub const R_MIPS_TLS_GOTTPREL: u32 = 46;
+/// TP-relative offset, 32 bit6
+pub const R_MIPS_TLS_TPREL32: u32 = 47;
+/// TP-relative offset, 64 bit
+pub const R_MIPS_TLS_TPREL64: u32 = 48;
+/// TP-relative offset, high 16 bits
+pub const R_MIPS_TLS_TPREL_HI16: u32 = 49;
+/// TP-relative offset, low 16 bits
+pub const R_MIPS_TLS_TPREL_LO16: u32 = 50;
+pub const R_MIPS_GLOB_DAT: u32 = 51;
+pub const R_MIPS_COPY: u32 = 126;
+pub const R_MIPS_JUMP_SLOT: u32 = 127;
+pub const R_MIPS_NUM: u32 = 128;
+
+///////////////////
+// RISC-V
+// See https://github.com/riscv/riscv-elf-psabi-doc
+///////////////////
+/// None
+pub const R_RISCV_NONE: u32 = 0;
+/// Runtime relocation: word32 = S + A
+pub const R_RISCV_32: u32 = 1;
+/// Runtime relocation: word64 = S + A
+pub const R_RISCV_64: u32 = 2;
+/// Runtime relocation: word32,64 = B + A
+pub const R_RISCV_RELATIVE: u32 = 3;
+/// Runtime relocation: must be in executable, not allowed in shared library
+pub const R_RISCV_COPY: u32 = 4;
+/// Runtime relocation: word32,64 = S; handled by PLT unless LD_BIND_NOW
+pub const R_RISCV_JUMP_SLOT: u32 = 5;
+/// TLS relocation: word32 = S->TLSINDEX
+pub const R_RISCV_TLS_DTPMOD32: u32 = 6;
+/// TLS relocation: word64 = S->TLSINDEX
+pub const R_RISCV_TLS_DTPMOD64: u32 = 7;
+/// TLS relocation: word32 = TLS + S + A - TLS_TP_OFFSET
+pub const R_RISCV_TLS_DTPREL32: u32 = 8;
+/// TLS relocation: word64 = TLS + S + A - TLS_TP_OFFSET
+pub const R_RISCV_TLS_DTPREL64: u32 = 9;
+/// TLS relocation: word32 = TLS + S + A + S_TLS_OFFSET - TLS_DTV_OFFSET
+pub const R_RISCV_TLS_TPREL32: u32 = 10;
+/// TLS relocation: word64 = TLS + S + A + S_TLS_OFFSET - TLS_DTV_OFFSET
+pub const R_RISCV_TLS_TPREL64: u32 = 11;
+/// PC-relative branch (SB-Type)
+pub const R_RISCV_BRANCH: u32 = 16;
+/// PC-relative jump (UJ-Type)
+pub const R_RISCV_JAL: u32 = 17;
+/// PC-relative call: MACRO call,tail (auipc+jalr pair)
+pub const R_RISCV_CALL: u32 = 18;
+/// PC-relative call (PLT): MACRO call,tail (auipc+jalr pair) PIC
+pub const R_RISCV_CALL_PLT: u32 = 19;
+/// PC-relative GOT reference: MACRO la
+pub const R_RISCV_GOT_HI20: u32 = 20;
+/// PC-relative TLS IE GOT offset: MACRO la.tls.ie
+pub const R_RISCV_TLS_GOT_HI20: u32 = 21;
+/// PC-relative TLS GD reference: MACRO la.tls.gd
+pub const R_RISCV_TLS_GD_HI20: u32 = 22;
+/// PC-relative reference: %pcrel_hi(symbol) (U-Type)
+pub const R_RISCV_PCREL_HI20: u32 = 23;
+/// PC-relative reference: %pcrel_lo(symbol) (I-Type)
+pub const R_RISCV_PCREL_LO12_I: u32 = 24;
+/// PC-relative reference: %pcrel_lo(symbol) (S-Type)
+pub const R_RISCV_PCREL_LO12_S: u32 = 25;
+/// Absolute address: %hi(symbol) (U-Type)
+pub const R_RISCV_HI20: u32 = 26;
+/// Absolute address: %lo(symbol) (I-Type)
+pub const R_RISCV_LO12_I: u32 = 27;
+/// Absolute address: %lo(symbol) (S-Type)
+pub const R_RISCV_LO12_S: u32 = 28;
+/// TLS LE thread offset: %tprel_hi(symbol) (U-Type)
+pub const R_RISCV_TPREL_HI20: u32 = 29;
+/// TLS LE thread offset: %tprel_lo(symbol) (I-Type)
+pub const R_RISCV_TPREL_LO12_I: u32 = 30;
+/// TLS LE thread offset: %tprel_lo(symbol) (S-Type)
+pub const R_RISCV_TPREL_LO12_S: u32 = 31;
+/// TLS LE thread usage: %tprel_add(symbol)
+pub const R_RISCV_TPREL_ADD: u32 = 32;
+/// 8-bit label addition: word8 = S + A
+pub const R_RISCV_ADD8: u32 = 33;
+/// 16-bit label addition: word16 = S + A
+pub const R_RISCV_ADD16: u32 = 34;
+/// 32-bit label addition: word32 = S + A
+pub const R_RISCV_ADD32: u32 = 35;
+/// 64-bit label addition: word64 = S + A
+pub const R_RISCV_ADD64: u32 = 36;
+/// 8-bit label subtraction: word8 = S - A
+pub const R_RISCV_SUB8: u32 = 37;
+/// 16-bit label subtraction: word16 = S - A
+pub const R_RISCV_SUB16: u32 = 38;
+/// 32-bit label subtraction: word32 = S - A
+pub const R_RISCV_SUB32: u32 = 39;
+/// 64-bit label subtraction: word64 = S - A
+pub const R_RISCV_SUB64: u32 = 40;
+/// GNU C++ vtable hierarchy
+pub const R_RISCV_GNU_VTINHERIT: u32 = 41;
+/// GNU C++ vtable member usage
+pub const R_RISCV_GNU_VTENTRY: u32 = 42;
+/// Alignment statement
+pub const R_RISCV_ALIGN: u32 = 43;
+/// PC-relative branch offset (CB-Type)
+pub const R_RISCV_RVC_BRANCH: u32 = 44;
+/// PC-relative jump offset (CJ-Type)
+pub const R_RISCV_RVC_JUMP: u32 = 45;
+/// Absolute address (CI-Type)
+pub const R_RISCV_RVC_LUI: u32 = 46;
+/// GP-relative reference (I-Type)
+pub const R_RISCV_GPREL_I: u32 = 47;
+/// GP-relative reference (S-Type)
+pub const R_RISCV_GPREL_S: u32 = 48;
+/// TP-relative TLS LE load (I-Type)
+pub const R_RISCV_TPREL_I: u32 = 49;
+/// TP-relative TLS LE store (S-Type)
+pub const R_RISCV_TPREL_S: u32 = 50;
+/// Instruction pair can be relaxed
+pub const R_RISCV_RELAX: u32 = 51;
+/// Local label subtraction
+pub const R_RISCV_SUB6: u32 = 52;
+/// Local label subtraction
+pub const R_RISCV_SET6: u32 = 53;
+/// Local label subtraction
+pub const R_RISCV_SET8: u32 = 54;
+/// Local label subtraction
+pub const R_RISCV_SET16: u32 = 55;
+/// Local label subtraction
+pub const R_RISCV_SET32: u32 = 56;
+
+#[inline]
+pub fn r_to_str(typ: u32, machine: u16) -> &'static str {
+ use crate::elf::header::*;
+ match machine {
+ // x86
+ EM_386 => { match typ {
+ R_386_NONE => "386_NONE",
+ R_386_32 => "386_32",
+ R_386_PC32 => "386_PC32",
+ R_386_GOT32 => "386_GOT32",
+ R_386_PLT32 => "386_PLT32",
+ R_386_COPY => "386_COPY",
+ R_386_GLOB_DAT => "386_GLOB_DAT",
+ R_386_JMP_SLOT => "386_JMP_SLOT",
+ R_386_RELATIVE => "386_RELATIVE",
+ R_386_GOTOFF => "386_GOTOFF",
+ R_386_GOTPC => "386_GOTPC",
+ R_386_32PLT => "386_32PLT",
+ R_386_TLS_TPOFF => "386_TLS_TPOFF",
+ R_386_TLS_IE => "386_TLS_IE",
+ R_386_TLS_GOTIE => "386_TLS_GOTIE",
+ R_386_TLS_LE => "386_TLS_LE",
+ R_386_TLS_GD => "386_TLS_GD",
+ R_386_TLS_LDM => "386_TLS_LDM",
+ R_386_16 => "386_16",
+ R_386_PC16 => "386_PC16",
+ R_386_8 => "386_8",
+ R_386_PC8 => "386_PC8",
+ R_386_TLS_GD_32 => "386_TLS_GD_32",
+ R_386_TLS_GD_PUSH => "386_TLS_GD_PUSH",
+ R_386_TLS_GD_CALL => "386_TLS_GD_CALL",
+ R_386_TLS_GD_POP => "386_TLS_GD_POP",
+ R_386_TLS_LDM_32 => "386_TLS_LDM_32",
+ R_386_TLS_LDM_PUSH => "386_TLS_LDM_PUSH",
+ R_386_TLS_LDM_CALL => "386_TLS_LDM_CALL",
+ R_386_TLS_LDM_POP => "386_TLS_LDM_POP",
+ R_386_TLS_LDO_32 => "386_TLS_LDO_32",
+ R_386_TLS_IE_32 => "386_TLS_IE_32",
+ R_386_TLS_LE_32 => "386_TLS_LE_32",
+ R_386_TLS_DTPMOD32 => "386_TLS_DTPMOD32",
+ R_386_TLS_DTPOFF32 => "386_TLS_DTPOFF32",
+ R_386_TLS_TPOFF32 => "386_TLS_TPOFF32",
+ R_386_SIZE32 => "386_SIZE32",
+ R_386_TLS_GOTDESC => "386_TLS_GOTDESC",
+ R_386_TLS_DESC_CALL => "386_TLS_DESC_CALL",
+ R_386_TLS_DESC => "386_TLS_DESC",
+ R_386_IRELATIVE => "386_IRELATIVE",
+ R_386_GOT32X => "386_GOT32X",
+ _ => "R_UNKNOWN_386",
+ }},
+ EM_X86_64 => { match typ {
+ R_X86_64_64 => "X86_64_64",
+ R_X86_64_PC32 => "X86_64_PC32",
+ R_X86_64_GOT32 => "X86_64_GOT32",
+ R_X86_64_PLT32 => "X86_64_PLT32",
+ R_X86_64_COPY => "X86_64_COPY",
+ R_X86_64_GLOB_DAT => "X86_64_GLOB_DAT",
+ R_X86_64_JUMP_SLOT => "X86_64_JUMP_SLOT",
+ R_X86_64_RELATIVE => "X86_64_RELATIVE",
+ R_X86_64_GOTPCREL => "X86_64_GOTPCREL",
+ R_X86_64_32 => "X86_64_32",
+ R_X86_64_32S => "X86_64_32S",
+ R_X86_64_16 => "X86_64_16",
+ R_X86_64_PC16 => "X86_64_PC16",
+ R_X86_64_8 => "X86_64_8",
+ R_X86_64_PC8 => "X86_64_PC8",
+ R_X86_64_DTPMOD64 => "X86_64_DTPMOD64",
+ R_X86_64_DTPOFF64 => "X86_64_DTPOFF64",
+ R_X86_64_TPOFF64 => "X86_64_TPOFF64",
+ R_X86_64_TLSGD => "X86_64_TLSGD",
+ R_X86_64_TLSLD => "X86_64_TLSLD",
+ R_X86_64_DTPOFF32 => "X86_64_DTPOFF32",
+ R_X86_64_GOTTPOFF => "X86_64_GOTTPOFF",
+ R_X86_64_TPOFF32 => "X86_64_TPOFF32",
+ R_X86_64_PC64 => "X86_64_PC64",
+ R_X86_64_GOTOFF64 => "X86_64_GOTOFF64",
+ R_X86_64_GOTPC32 => "X86_64_GOTPC32",
+ R_X86_64_GOT64 => "X86_64_GOT64",
+ R_X86_64_GOTPCREL64 => "X86_64_GOTPCREL64",
+ R_X86_64_GOTPC64 => "X86_64_GOTPC64",
+ R_X86_64_GOTPLT64 => "X86_64_GOTPLT64",
+ R_X86_64_PLTOFF64 => "X86_64_PLTOFF64",
+ R_X86_64_SIZE32 => "X86_64_SIZE32",
+ R_X86_64_SIZE64 => "X86_64_SIZE64",
+ R_X86_64_GOTPC32_TLSDESC => "X86_64_GOTPC32_TLSDESC",
+ R_X86_64_TLSDESC_CALL => "X86_64_TLSDESC_CALL",
+ R_X86_64_TLSDESC => "X86_64_TLSDESC",
+ R_X86_64_IRELATIVE => "X86_64_IRELATIVE",
+ R_X86_64_RELATIVE64 => "X86_64_RELATIVE64",
+ R_X86_64_GOTPCRELX => "R_X86_64_GOTPCRELX",
+ R_X86_64_REX_GOTPCRELX => "R_X86_64_REX_GOTPCRELX",
+ _ => "R_UNKNOWN_X86_64",
+ }},
+ // openrisc
+ EM_OPENRISC => { match typ {
+ R_OR1K_NONE => "OR1K_NONE",
+ R_OR1K_32 => "OR1K_32",
+ R_OR1K_16 => "OR1K_16",
+ R_OR1K_8 => "OR1K_8",
+ R_OR1K_LO_16_IN_INSN => "OR1K_LO_16_IN_INSN",
+ R_OR1K_HI_16_IN_INSN => "OR1K_HI_16_IN_INSN",
+ R_OR1K_INSN_REL_26 => "OR1K_INSN_REL_26",
+ R_OR1K_GNU_VTENTRY => "OR1K_GNU_VTENTRY",
+ R_OR1K_GNU_VTINHERIT => "OR1K_GNU_VTINHERIT",
+ R_OR1K_32_PCREL => "OR1K_32_PCREL",
+ R_OR1K_16_PCREL => "OR1K_16_PCREL",
+ R_OR1K_8_PCREL => "OR1K_8_PCREL",
+ R_OR1K_GOTPC_HI16 => "OR1K_GOTPC_HI16",
+ R_OR1K_GOTPC_LO16 => "OR1K_GOTPC_LO16",
+ R_OR1K_GOT16 => "OR1K_GOT16",
+ R_OR1K_PLT26 => "OR1K_PLT26",
+ R_OR1K_GOTOFF_HI16 => "OR1K_GOTOFF_HI16",
+ R_OR1K_GOTOFF_LO16 => "OR1K_GOTOFF_LO16",
+ R_OR1K_COPY => "OR1K_COPY",
+ R_OR1K_GLOB_DAT => "OR1K_GLOB_DAT",
+ R_OR1K_JMP_SLOT => "OR1K_JMP_SLOT",
+ R_OR1K_RELATIVE => "OR1K_RELATIVE",
+ R_OR1K_TLS_GD_HI16 => "OR1K_TLS_GD_HI16",
+ R_OR1K_TLS_GD_LO16 => "OR1K_TLS_GD_LO16",
+ R_OR1K_TLS_LDM_HI16 => "OR1K_TLS_LDM_HI16",
+ R_OR1K_TLS_LDM_LO16 => "OR1K_TLS_LDM_LO16",
+ R_OR1K_TLS_LDO_HI16 => "OR1K_TLS_LDO_HI16",
+ R_OR1K_TLS_LDO_LO16 => "OR1K_TLS_LDO_LO16",
+ R_OR1K_TLS_IE_HI16 => "OR1K_TLS_IE_HI16",
+ R_OR1K_TLS_IE_LO16 => "OR1K_TLS_IE_LO16",
+ R_OR1K_TLS_LE_HI16 => "OR1K_TLS_LE_HI16",
+ R_OR1K_TLS_LE_LO16 => "OR1K_TLS_LE_LO16",
+ R_OR1K_TLS_TPOFF => "OR1K_TLS_TPOFF",
+ R_OR1K_TLS_DTPOFF => "OR1K_TLS_DTPOFF",
+ R_OR1K_TLS_DTPMOD => "OR1K_TLS_DTPMOD",
+ _ => "R_UNKNOWN_OR1K",
+ }},
+ // arm64
+ EM_AARCH64 => { match typ {
+ R_AARCH64_P32_ABS32 => "AARCH64_P32_ABS32",
+ R_AARCH64_P32_COPY => "AARCH64_P32_COPY",
+ R_AARCH64_P32_GLOB_DAT => "AARCH64_P32_GLOB_DAT",
+ R_AARCH64_P32_JUMP_SLOT => "AARCH64_P32_JUMP_SLOT",
+ R_AARCH64_P32_RELATIVE => "AARCH64_P32_RELATIVE",
+ R_AARCH64_P32_TLS_DTPMOD => "AARCH64_P32_TLS_DTPMOD",
+ R_AARCH64_P32_TLS_DTPREL => "AARCH64_P32_TLS_DTPREL",
+ R_AARCH64_P32_TLS_TPREL => "AARCH64_P32_TLS_TPREL",
+ R_AARCH64_P32_TLSDESC => "AARCH64_P32_TLSDESC",
+ R_AARCH64_P32_IRELATIVE => "AARCH64_P32_IRELATIVE",
+ R_AARCH64_ABS64 => "AARCH64_ABS64",
+ R_AARCH64_ABS32 => "AARCH64_ABS32",
+ R_AARCH64_ABS16 => "AARCH64_ABS16",
+ R_AARCH64_PREL64 => "AARCH64_PREL64",
+ R_AARCH64_PREL32 => "AARCH64_PREL32",
+ R_AARCH64_PREL16 => "AARCH64_PREL16",
+ R_AARCH64_MOVW_UABS_G0 => "AARCH64_MOVW_UABS_G0",
+ R_AARCH64_MOVW_UABS_G0_NC => "AARCH64_MOVW_UABS_G0_NC",
+ R_AARCH64_MOVW_UABS_G1 => "AARCH64_MOVW_UABS_G1",
+ R_AARCH64_MOVW_UABS_G1_NC => "AARCH64_MOVW_UABS_G1_NC",
+ R_AARCH64_MOVW_UABS_G2 => "AARCH64_MOVW_UABS_G2",
+ R_AARCH64_MOVW_UABS_G2_NC => "AARCH64_MOVW_UABS_G2_NC",
+ R_AARCH64_MOVW_UABS_G3 => "AARCH64_MOVW_UABS_G3",
+ R_AARCH64_MOVW_SABS_G0 => "AARCH64_MOVW_SABS_G0",
+ R_AARCH64_MOVW_SABS_G1 => "AARCH64_MOVW_SABS_G1",
+ R_AARCH64_MOVW_SABS_G2 => "AARCH64_MOVW_SABS_G2",
+ R_AARCH64_LD_PREL_LO19 => "AARCH64_LD_PREL_LO19",
+ R_AARCH64_ADR_PREL_LO21 => "AARCH64_ADR_PREL_LO21",
+ R_AARCH64_ADR_PREL_PG_HI21 => "AARCH64_ADR_PREL_PG_HI21",
+ R_AARCH64_ADR_PREL_PG_HI21_NC => "AARCH64_ADR_PREL_PG_HI21_NC",
+ R_AARCH64_ADD_ABS_LO12_NC => "AARCH64_ADD_ABS_LO12_NC",
+ R_AARCH64_LDST8_ABS_LO12_NC => "AARCH64_LDST8_ABS_LO12_NC",
+ R_AARCH64_TSTBR14 => "AARCH64_TSTBR14",
+ R_AARCH64_CONDBR19 => "AARCH64_CONDBR19",
+ R_AARCH64_JUMP26 => "AARCH64_JUMP26",
+ R_AARCH64_CALL26 => "AARCH64_CALL26",
+ R_AARCH64_LDST16_ABS_LO12_NC => "AARCH64_LDST16_ABS_LO12_NC",
+ R_AARCH64_LDST32_ABS_LO12_NC => "AARCH64_LDST32_ABS_LO12_NC",
+ R_AARCH64_LDST64_ABS_LO12_NC => "AARCH64_LDST64_ABS_LO12_NC",
+ R_AARCH64_MOVW_PREL_G0 => "AARCH64_MOVW_PREL_G0",
+ R_AARCH64_MOVW_PREL_G0_NC => "AARCH64_MOVW_PREL_G0_NC",
+ R_AARCH64_MOVW_PREL_G1 => "AARCH64_MOVW_PREL_G1",
+ R_AARCH64_MOVW_PREL_G1_NC => "AARCH64_MOVW_PREL_G1_NC",
+ R_AARCH64_MOVW_PREL_G2 => "AARCH64_MOVW_PREL_G2",
+ R_AARCH64_MOVW_PREL_G2_NC => "AARCH64_MOVW_PREL_G2_NC",
+ R_AARCH64_MOVW_PREL_G3 => "AARCH64_MOVW_PREL_G3",
+ R_AARCH64_LDST128_ABS_LO12_NC => "AARCH64_LDST128_ABS_LO12_NC",
+ R_AARCH64_MOVW_GOTOFF_G0 => "AARCH64_MOVW_GOTOFF_G0",
+ R_AARCH64_MOVW_GOTOFF_G0_NC => "AARCH64_MOVW_GOTOFF_G0_NC",
+ R_AARCH64_MOVW_GOTOFF_G1 => "AARCH64_MOVW_GOTOFF_G1",
+ R_AARCH64_MOVW_GOTOFF_G1_NC => "AARCH64_MOVW_GOTOFF_G1_NC",
+ R_AARCH64_MOVW_GOTOFF_G2 => "AARCH64_MOVW_GOTOFF_G2",
+ R_AARCH64_MOVW_GOTOFF_G2_NC => "AARCH64_MOVW_GOTOFF_G2_NC",
+ R_AARCH64_MOVW_GOTOFF_G3 => "AARCH64_MOVW_GOTOFF_G3",
+ R_AARCH64_GOTREL64 => "AARCH64_GOTREL64",
+ R_AARCH64_GOTREL32 => "AARCH64_GOTREL32",
+ R_AARCH64_GOT_LD_PREL19 => "AARCH64_GOT_LD_PREL19",
+ R_AARCH64_LD64_GOTOFF_LO15 => "AARCH64_LD64_GOTOFF_LO15",
+ R_AARCH64_ADR_GOT_PAGE => "AARCH64_ADR_GOT_PAGE",
+ R_AARCH64_LD64_GOT_LO12_NC => "AARCH64_LD64_GOT_LO12_NC",
+ R_AARCH64_LD64_GOTPAGE_LO15 => "AARCH64_LD64_GOTPAGE_LO15",
+ R_AARCH64_TLSGD_ADR_PREL21 => "AARCH64_TLSGD_ADR_PREL21",
+ R_AARCH64_TLSGD_ADR_PAGE21 => "AARCH64_TLSGD_ADR_PAGE21",
+ R_AARCH64_TLSGD_ADD_LO12_NC => "AARCH64_TLSGD_ADD_LO12_NC",
+ R_AARCH64_TLSGD_MOVW_G1 => "AARCH64_TLSGD_MOVW_G1",
+ R_AARCH64_TLSGD_MOVW_G0_NC => "AARCH64_TLSGD_MOVW_G0_NC",
+ R_AARCH64_TLSLD_ADR_PREL21 => "AARCH64_TLSLD_ADR_PREL21",
+ R_AARCH64_TLSLD_ADR_PAGE21 => "AARCH64_TLSLD_ADR_PAGE21",
+ R_AARCH64_TLSLD_ADD_LO12_NC => "AARCH64_TLSLD_ADD_LO12_NC",
+ R_AARCH64_TLSLD_MOVW_G1 => "AARCH64_TLSLD_MOVW_G1",
+ R_AARCH64_TLSLD_MOVW_G0_NC => "AARCH64_TLSLD_MOVW_G0_NC",
+ R_AARCH64_TLSLD_LD_PREL19 => "AARCH64_TLSLD_LD_PREL19",
+ R_AARCH64_TLSLD_MOVW_DTPREL_G2 => "AARCH64_TLSLD_MOVW_DTPREL_G2",
+ R_AARCH64_TLSLD_MOVW_DTPREL_G1 => "AARCH64_TLSLD_MOVW_DTPREL_G1",
+ R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC => "AARCH64_TLSLD_MOVW_DTPREL_G1_NC",
+ R_AARCH64_TLSLD_MOVW_DTPREL_G0 => "AARCH64_TLSLD_MOVW_DTPREL_G0",
+ R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC => "AARCH64_TLSLD_MOVW_DTPREL_G0_NC",
+ R_AARCH64_TLSLD_ADD_DTPREL_HI12 => "AARCH64_TLSLD_ADD_DTPREL_HI12",
+ R_AARCH64_TLSLD_ADD_DTPREL_LO12 => "AARCH64_TLSLD_ADD_DTPREL_LO12",
+ R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC => "AARCH64_TLSLD_ADD_DTPREL_LO12_NC",
+ R_AARCH64_TLSLD_LDST8_DTPREL_LO12 => "AARCH64_TLSLD_LDST8_DTPREL_LO12",
+ R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC => "AARCH64_TLSLD_LDST8_DTPREL_LO12_NC",
+ R_AARCH64_TLSLD_LDST16_DTPREL_LO12 => "AARCH64_TLSLD_LDST16_DTPREL_LO12",
+ R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC => "AARCH64_TLSLD_LDST16_DTPREL_LO12_NC",
+ R_AARCH64_TLSLD_LDST32_DTPREL_LO12 => "AARCH64_TLSLD_LDST32_DTPREL_LO12",
+ R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC => "AARCH64_TLSLD_LDST32_DTPREL_LO12_NC",
+ R_AARCH64_TLSLD_LDST64_DTPREL_LO12 => "AARCH64_TLSLD_LDST64_DTPREL_LO12",
+ R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC => "AARCH64_TLSLD_LDST64_DTPREL_LO12_NC",
+ R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 => "AARCH64_TLSIE_MOVW_GOTTPREL_G1",
+ R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC => "AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC",
+ R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 => "AARCH64_TLSIE_ADR_GOTTPREL_PAGE21",
+ R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC => "AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC",
+ R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 => "AARCH64_TLSIE_LD_GOTTPREL_PREL19",
+ R_AARCH64_TLSLE_MOVW_TPREL_G2 => "AARCH64_TLSLE_MOVW_TPREL_G2",
+ R_AARCH64_TLSLE_MOVW_TPREL_G1 => "AARCH64_TLSLE_MOVW_TPREL_G1",
+ R_AARCH64_TLSLE_MOVW_TPREL_G1_NC => "AARCH64_TLSLE_MOVW_TPREL_G1_NC",
+ R_AARCH64_TLSLE_MOVW_TPREL_G0 => "AARCH64_TLSLE_MOVW_TPREL_G0",
+ R_AARCH64_TLSLE_MOVW_TPREL_G0_NC => "AARCH64_TLSLE_MOVW_TPREL_G0_NC",
+ R_AARCH64_TLSLE_ADD_TPREL_HI12 => "AARCH64_TLSLE_ADD_TPREL_HI12",
+ R_AARCH64_TLSLE_ADD_TPREL_LO12 => "AARCH64_TLSLE_ADD_TPREL_LO12",
+ R_AARCH64_TLSLE_ADD_TPREL_LO12_NC => "AARCH64_TLSLE_ADD_TPREL_LO12_NC",
+ R_AARCH64_TLSLE_LDST8_TPREL_LO12 => "AARCH64_TLSLE_LDST8_TPREL_LO12",
+ R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC => "AARCH64_TLSLE_LDST8_TPREL_LO12_NC",
+ R_AARCH64_TLSLE_LDST16_TPREL_LO12 => "AARCH64_TLSLE_LDST16_TPREL_LO12",
+ R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC => "AARCH64_TLSLE_LDST16_TPREL_LO12_NC",
+ R_AARCH64_TLSLE_LDST32_TPREL_LO12 => "AARCH64_TLSLE_LDST32_TPREL_LO12",
+ R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC => "AARCH64_TLSLE_LDST32_TPREL_LO12_NC",
+ R_AARCH64_TLSLE_LDST64_TPREL_LO12 => "AARCH64_TLSLE_LDST64_TPREL_LO12",
+ R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC => "AARCH64_TLSLE_LDST64_TPREL_LO12_NC",
+ R_AARCH64_TLSDESC_LD_PREL19 => "AARCH64_TLSDESC_LD_PREL19",
+ R_AARCH64_TLSDESC_ADR_PREL21 => "AARCH64_TLSDESC_ADR_PREL21",
+ R_AARCH64_TLSDESC_ADR_PAGE21 => "AARCH64_TLSDESC_ADR_PAGE21",
+ R_AARCH64_TLSDESC_LD64_LO12 => "AARCH64_TLSDESC_LD64_LO12",
+ R_AARCH64_TLSDESC_ADD_LO12 => "AARCH64_TLSDESC_ADD_LO12",
+ R_AARCH64_TLSDESC_OFF_G1 => "AARCH64_TLSDESC_OFF_G1",
+ R_AARCH64_TLSDESC_OFF_G0_NC => "AARCH64_TLSDESC_OFF_G0_NC",
+ R_AARCH64_TLSDESC_LDR => "AARCH64_TLSDESC_LDR",
+ R_AARCH64_TLSDESC_ADD => "AARCH64_TLSDESC_ADD",
+ R_AARCH64_TLSDESC_CALL => "AARCH64_TLSDESC_CALL",
+ R_AARCH64_TLSLE_LDST128_TPREL_LO12 => "AARCH64_TLSLE_LDST128_TPREL_LO12",
+ R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC => "AARCH64_TLSLE_LDST128_TPREL_LO12_NC",
+ R_AARCH64_TLSLD_LDST128_DTPREL_LO12 => "AARCH64_TLSLD_LDST128_DTPREL_LO12",
+ R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC => "AARCH64_TLSLD_LDST128_DTPREL_LO12_NC",
+ R_AARCH64_COPY => "AARCH64_COPY",
+ R_AARCH64_GLOB_DAT => "AARCH64_GLOB_DAT",
+ R_AARCH64_JUMP_SLOT => "AARCH64_JUMP_SLOT",
+ R_AARCH64_RELATIVE => "AARCH64_RELATIVE",
+ R_AARCH64_TLS_DTPMOD => "AARCH64_TLS_DTPMOD",
+ R_AARCH64_TLS_DTPREL => "AARCH64_TLS_DTPREL",
+ R_AARCH64_TLS_TPREL => "AARCH64_TLS_TPREL",
+ R_AARCH64_TLSDESC => "AARCH64_TLSDESC",
+ R_AARCH64_IRELATIVE => "AARCH64_IRELATIVE",
+ _ => "R_UNKNOWN_AARCH64",
+ }},
+ // arm
+ EM_ARM => { match typ {
+ R_ARM_PC24 => "ARM_PC24",
+ R_ARM_ABS32 => "ARM_ABS32",
+ R_ARM_REL32 => "ARM_REL32",
+ R_ARM_PC13 => "ARM_PC13",
+ R_ARM_ABS16 => "ARM_ABS16",
+ R_ARM_ABS12 => "ARM_ABS12",
+ R_ARM_THM_ABS5 => "ARM_THM_ABS5",
+ R_ARM_ABS8 => "ARM_ABS8",
+ R_ARM_SBREL32 => "ARM_SBREL32",
+ R_ARM_THM_PC22 => "ARM_THM_PC22",
+ R_ARM_THM_PC8 => "ARM_THM_PC8",
+ R_ARM_AMP_VCALL9 => "ARM_AMP_VCALL9",
+ R_ARM_TLS_DESC => "ARM_TLS_DESC",
+ R_ARM_THM_SWI8 => "ARM_THM_SWI8",
+ R_ARM_XPC25 => "ARM_XPC25",
+ R_ARM_THM_XPC22 => "ARM_THM_XPC22",
+ R_ARM_TLS_DTPMOD32 => "ARM_TLS_DTPMOD32",
+ R_ARM_TLS_DTPOFF32 => "ARM_TLS_DTPOFF32",
+ R_ARM_TLS_TPOFF32 => "ARM_TLS_TPOFF32",
+ R_ARM_COPY => "ARM_COPY",
+ R_ARM_GLOB_DAT => "ARM_GLOB_DAT",
+ R_ARM_JUMP_SLOT => "ARM_JUMP_SLOT",
+ R_ARM_RELATIVE => "ARM_RELATIVE",
+ R_ARM_GOTOFF => "ARM_GOTOFF",
+ R_ARM_GOTPC => "ARM_GOTPC",
+ R_ARM_GOT32 => "ARM_GOT32",
+ R_ARM_PLT32 => "ARM_PLT32",
+ R_ARM_CALL => "ARM_CALL",
+ R_ARM_JUMP24 => "ARM_JUMP24",
+ R_ARM_THM_JUMP24 => "ARM_THM_JUMP24",
+ R_ARM_BASE_ABS => "ARM_BASE_ABS",
+ R_ARM_ALU_PCREL_7_0 => "ARM_ALU_PCREL_7_0",
+ R_ARM_ALU_PCREL_15_8 => "ARM_ALU_PCREL_15_8",
+ R_ARM_ALU_PCREL_23_15 => "ARM_ALU_PCREL_23_15",
+ R_ARM_LDR_SBREL_11_0 => "ARM_LDR_SBREL_11_0",
+ R_ARM_ALU_SBREL_19_12 => "ARM_ALU_SBREL_19_12",
+ R_ARM_ALU_SBREL_27_20 => "ARM_ALU_SBREL_27_20",
+ R_ARM_TARGET1 => "ARM_TARGET1",
+ R_ARM_SBREL31 => "ARM_SBREL31",
+ R_ARM_V4BX => "ARM_V4BX",
+ R_ARM_TARGET2 => "ARM_TARGET2",
+ R_ARM_PREL31 => "ARM_PREL31",
+ R_ARM_MOVW_ABS_NC => "ARM_MOVW_ABS_NC",
+ R_ARM_MOVT_ABS => "ARM_MOVT_ABS",
+ R_ARM_MOVW_PREL_NC => "ARM_MOVW_PREL_NC",
+ R_ARM_MOVT_PREL => "ARM_MOVT_PREL",
+ R_ARM_THM_MOVW_ABS_NC => "ARM_THM_MOVW_ABS_NC",
+ R_ARM_THM_MOVT_ABS => "ARM_THM_MOVT_ABS",
+ R_ARM_THM_MOVW_PREL_NC => "ARM_THM_MOVW_PREL_NC",
+ R_ARM_THM_MOVT_PREL => "ARM_THM_MOVT_PREL",
+ R_ARM_THM_JUMP19 => "ARM_THM_JUMP19",
+ R_ARM_THM_JUMP6 => "ARM_THM_JUMP6",
+ R_ARM_THM_ALU_PREL_11_0 => "ARM_THM_ALU_PREL_11_0",
+ R_ARM_THM_PC12 => "ARM_THM_PC12",
+ R_ARM_ABS32_NOI => "ARM_ABS32_NOI",
+ R_ARM_REL32_NOI => "ARM_REL32_NOI",
+ R_ARM_ALU_PC_G0_NC => "ARM_ALU_PC_G0_NC",
+ R_ARM_ALU_PC_G0 => "ARM_ALU_PC_G0",
+ R_ARM_ALU_PC_G1_NC => "ARM_ALU_PC_G1_NC",
+ R_ARM_ALU_PC_G1 => "ARM_ALU_PC_G1",
+ R_ARM_ALU_PC_G2 => "ARM_ALU_PC_G2",
+ R_ARM_LDR_PC_G1 => "ARM_LDR_PC_G1",
+ R_ARM_LDR_PC_G2 => "ARM_LDR_PC_G2",
+ R_ARM_LDRS_PC_G0 => "ARM_LDRS_PC_G0",
+ R_ARM_LDRS_PC_G1 => "ARM_LDRS_PC_G1",
+ R_ARM_LDRS_PC_G2 => "ARM_LDRS_PC_G2",
+ R_ARM_LDC_PC_G0 => "ARM_LDC_PC_G0",
+ R_ARM_LDC_PC_G1 => "ARM_LDC_PC_G1",
+ R_ARM_LDC_PC_G2 => "ARM_LDC_PC_G2",
+ R_ARM_ALU_SB_G0_NC => "ARM_ALU_SB_G0_NC",
+ R_ARM_ALU_SB_G0 => "ARM_ALU_SB_G0",
+ R_ARM_ALU_SB_G1_NC => "ARM_ALU_SB_G1_NC",
+ R_ARM_ALU_SB_G1 => "ARM_ALU_SB_G1",
+ R_ARM_ALU_SB_G2 => "ARM_ALU_SB_G2",
+ R_ARM_LDR_SB_G0 => "ARM_LDR_SB_G0",
+ R_ARM_LDR_SB_G1 => "ARM_LDR_SB_G1",
+ R_ARM_LDR_SB_G2 => "ARM_LDR_SB_G2",
+ R_ARM_LDRS_SB_G0 => "ARM_LDRS_SB_G0",
+ R_ARM_LDRS_SB_G1 => "ARM_LDRS_SB_G1",
+ R_ARM_LDRS_SB_G2 => "ARM_LDRS_SB_G2",
+ R_ARM_LDC_SB_G0 => "ARM_LDC_SB_G0",
+ R_ARM_LDC_SB_G1 => "ARM_LDC_SB_G1",
+ R_ARM_LDC_SB_G2 => "ARM_LDC_SB_G2",
+ R_ARM_MOVW_BREL_NC => "ARM_MOVW_BREL_NC",
+ R_ARM_MOVT_BREL => "ARM_MOVT_BREL",
+ R_ARM_MOVW_BREL => "ARM_MOVW_BREL",
+ R_ARM_THM_MOVW_BREL_NC => "ARM_THM_MOVW_BREL_NC",
+ R_ARM_THM_MOVT_BREL => "ARM_THM_MOVT_BREL",
+ R_ARM_THM_MOVW_BREL => "ARM_THM_MOVW_BREL",
+ R_ARM_TLS_GOTDESC => "ARM_TLS_GOTDESC",
+ R_ARM_TLS_CALL => "ARM_TLS_CALL",
+ R_ARM_TLS_DESCSEQ => "ARM_TLS_DESCSEQ",
+ R_ARM_THM_TLS_CALL => "ARM_THM_TLS_CALL",
+ R_ARM_PLT32_ABS => "ARM_PLT32_ABS",
+ R_ARM_GOT_ABS => "ARM_GOT_ABS",
+ R_ARM_GOT_PREL => "ARM_GOT_PREL",
+ R_ARM_GOT_BREL12 => "ARM_GOT_BREL12",
+ R_ARM_GOTOFF12 => "ARM_GOTOFF12",
+ R_ARM_GOTRELAX => "ARM_GOTRELAX",
+ R_ARM_GNU_VTENTRY => "ARM_GNU_VTENTRY",
+ R_ARM_GNU_VTINHERIT => "ARM_GNU_VTINHERIT",
+ R_ARM_THM_PC11 => "ARM_THM_PC11",
+ R_ARM_THM_PC9 => "ARM_THM_PC9",
+ R_ARM_TLS_GD32 => "ARM_TLS_GD32",
+ R_ARM_TLS_LDM32 => "ARM_TLS_LDM32",
+ R_ARM_TLS_LDO32 => "ARM_TLS_LDO32",
+ R_ARM_TLS_IE32 => "ARM_TLS_IE32",
+ R_ARM_TLS_LE32 => "ARM_TLS_LE32",
+ R_ARM_TLS_LDO12 => "ARM_TLS_LDO12",
+ R_ARM_TLS_LE12 => "ARM_TLS_LE12",
+ R_ARM_TLS_IE12GP => "ARM_TLS_IE12GP",
+ R_ARM_ME_TOO => "ARM_ME_TOO",
+ R_ARM_THM_TLS_DESCSEQ16 => "ARM_THM_TLS_DESCSEQ16",
+ R_ARM_THM_TLS_DESCSEQ32 => "ARM_THM_TLS_DESCSEQ32",
+ R_ARM_THM_GOT_BREL12 => "ARM_THM_GOT_BREL12",
+ R_ARM_IRELATIVE => "ARM_IRELATIVE",
+ R_ARM_RXPC25 => "ARM_RXPC25",
+ R_ARM_RSBREL32 => "ARM_RSBREL32",
+ R_ARM_THM_RPC22 => "ARM_THM_RPC22",
+ R_ARM_RREL32 => "ARM_RREL32",
+ R_ARM_RABS22 => "ARM_RABS22",
+ R_ARM_RPC24 => "ARM_RPC24",
+ R_ARM_RBASE => "ARM_RBASE",
+ _ => "R_UNKNOWN_ARM",
+ }},
+ // MIPS
+ EM_MIPS | EM_MIPS_RS3_LE | EM_MIPS_X => { match typ {
+ R_MIPS_NONE => "R_MIPS_NONE",
+ R_MIPS_16 => "R_MIPS_16",
+ R_MIPS_32 => "R_MIPS_32",
+ R_MIPS_REL32 => "R_MIPS_REL32",
+ R_MIPS_26 => "R_MIPS_26",
+ R_MIPS_HI16 => "R_MIPS_HI16",
+ R_MIPS_LO16 => "R_MIPS_LO16",
+ R_MIPS_GPREL16 => "R_MIPS_GPREL16",
+ R_MIPS_LITERAL => "R_MIPS_LITERAL",
+ R_MIPS_GOT16 => "R_MIPS_GOT16",
+ R_MIPS_PC16 => "R_MIPS_PC16",
+ R_MIPS_CALL16 => "R_MIPS_CALL16",
+ R_MIPS_GPREL32 => "R_MIPS_GPREL32",
+ R_MIPS_SHIFT5 => "R_MIPS_SHIFT5",
+ R_MIPS_SHIFT6 => "R_MIPS_SHIFT6",
+ R_MIPS_64 => "R_MIPS_64",
+ R_MIPS_GOT_DISP => "R_MIPS_GOT_DISP",
+ R_MIPS_GOT_PAGE => "R_MIPS_GOT_PAGE",
+ R_MIPS_GOT_OFST => "R_MIPS_GOT_OFST",
+ R_MIPS_GOT_HI16 => "R_MIPS_GOT_HI16",
+ R_MIPS_GOT_LO16 => "R_MIPS_GOT_LO16",
+ R_MIPS_SUB => "R_MIPS_SUB",
+ R_MIPS_INSERT_A => "R_MIPS_INSERT_A",
+ R_MIPS_INSERT_B => "R_MIPS_INSERT_B",
+ R_MIPS_DELETE => "R_MIPS_DELETE",
+ R_MIPS_HIGHER => "R_MIPS_HIGHER",
+ R_MIPS_HIGHEST => "R_MIPS_HIGHEST",
+ R_MIPS_CALL_HI16 => "R_MIPS_CALL_HI16",
+ R_MIPS_CALL_LO16 => "R_MIPS_CALL_LO16",
+ R_MIPS_SCN_DISP => "R_MIPS_SCN_DISP",
+ R_MIPS_REL16 => "R_MIPS_REL16",
+ R_MIPS_ADD_IMMEDIATE => "R_MIPS_ADD_IMMEDIATE",
+ R_MIPS_PJUMP => "R_MIPS_PJUMP",
+ R_MIPS_RELGOT => "R_MIPS_RELGOT",
+ R_MIPS_JALR => "R_MIPS_JALR",
+ R_MIPS_TLS_DTPMOD32 => "R_MIPS_TLS_DTPMOD32",
+ R_MIPS_TLS_DTPREL32 => "R_MIPS_TLS_DTPREL32",
+ R_MIPS_TLS_DTPMOD64 => "R_MIPS_TLS_DTPMOD64",
+ R_MIPS_TLS_DTPREL64 => "R_MIPS_TLS_DTPREL64",
+ R_MIPS_TLS_GD => "R_MIPS_TLS_GD",
+ R_MIPS_TLS_LDM => "R_MIPS_TLS_LDM",
+ R_MIPS_TLS_DTPREL_HI16 => "R_MIPS_TLS_DTPREL_HI16",
+ R_MIPS_TLS_DTPREL_LO16 => "R_MIPS_TLS_DTPREL_LO16",
+ R_MIPS_TLS_GOTTPREL => "R_MIPS_TLS_GOTTPREL",
+ R_MIPS_TLS_TPREL32 => "R_MIPS_TLS_TPREL32",
+ R_MIPS_TLS_TPREL64 => "R_MIPS_TLS_TPREL64",
+ R_MIPS_TLS_TPREL_HI16 => "R_MIPS_TLS_TPREL_HI16",
+ R_MIPS_TLS_TPREL_LO16 => "R_MIPS_TLS_TPREL_LO16",
+ R_MIPS_GLOB_DAT => "R_MIPS_GLOB_DAT",
+ R_MIPS_COPY => "R_MIPS_COPY",
+ R_MIPS_JUMP_SLOT => "R_MIPS_JUMP_SLOT",
+ _ => "R_UNKNOWN_MIPS",
+ }},
+ // RISC-V
+ EM_RISCV => { match typ {
+ R_RISCV_NONE => "R_RISCV_NONE",
+ R_RISCV_32 => "R_RISCV_32",
+ R_RISCV_64 => "R_RISCV_64",
+ R_RISCV_RELATIVE => "R_RISCV_RELATIVE",
+ R_RISCV_COPY => "R_RISCV_COPY",
+ R_RISCV_JUMP_SLOT => "R_RISCV_JUMP_SLOT",
+ R_RISCV_TLS_DTPMOD32 => "R_RISCV_TLS_DTPMOD32",
+ R_RISCV_TLS_DTPMOD64 => "R_RISCV_TLS_DTPMOD64",
+ R_RISCV_TLS_DTPREL32 => "R_RISCV_TLS_DTPREL32",
+ R_RISCV_TLS_DTPREL64 => "R_RISCV_TLS_DTPREL64",
+ R_RISCV_TLS_TPREL32 => "R_RISCV_TLS_TPREL32",
+ R_RISCV_TLS_TPREL64 => "R_RISCV_TLS_TPREL64",
+ R_RISCV_BRANCH => "R_RISCV_BRANCH",
+ R_RISCV_JAL => "R_RISCV_JAL",
+ R_RISCV_CALL => "R_RISCV_CALL",
+ R_RISCV_CALL_PLT => "R_RISCV_CALL_PLT",
+ R_RISCV_GOT_HI20 => "R_RISCV_GOT_HI20",
+ R_RISCV_TLS_GOT_HI20 => "R_RISCV_TLS_GOT_HI20",
+ R_RISCV_TLS_GD_HI20 => "R_RISCV_TLS_GD_HI20",
+ R_RISCV_PCREL_HI20 => "R_RISCV_PCREL_HI20",
+ R_RISCV_PCREL_LO12_I => "R_RISCV_PCREL_LO12_I",
+ R_RISCV_PCREL_LO12_S => "R_RISCV_PCREL_LO12_S",
+ R_RISCV_HI20 => "R_RISCV_HI20",
+ R_RISCV_LO12_I => "R_RISCV_LO12_I",
+ R_RISCV_LO12_S => "R_RISCV_LO12_S",
+ R_RISCV_TPREL_HI20 => "R_RISCV_TPREL_HI20",
+ R_RISCV_TPREL_LO12_I => "R_RISCV_TPREL_LO12_I",
+ R_RISCV_TPREL_LO12_S => "R_RISCV_TPREL_LO12_S",
+ R_RISCV_TPREL_ADD => "R_RISCV_TPREL_ADD",
+ R_RISCV_ADD8 => "R_RISCV_ADD8",
+ R_RISCV_ADD16 => "R_RISCV_ADD16",
+ R_RISCV_ADD32 => "R_RISCV_ADD32",
+ R_RISCV_ADD64 => "R_RISCV_ADD64",
+ R_RISCV_SUB8 => "R_RISCV_SUB8",
+ R_RISCV_SUB16 => "R_RISCV_SUB16",
+ R_RISCV_SUB32 => "R_RISCV_SUB32",
+ R_RISCV_SUB64 => "R_RISCV_SUB64",
+ R_RISCV_GNU_VTINHERIT => "R_RISCV_GNU_VTINHERIT",
+ R_RISCV_GNU_VTENTRY => "R_RISCV_GNU_VTENTRY",
+ R_RISCV_ALIGN => "R_RISCV_ALIGN",
+ R_RISCV_RVC_BRANCH => "R_RISCV_RVC_BRANCH",
+ R_RISCV_RVC_JUMP => "R_RISCV_RVC_JUMP",
+ R_RISCV_RVC_LUI => "R_RISCV_RVC_LUI",
+ R_RISCV_GPREL_I => "R_RISCV_GPREL_I",
+ R_RISCV_GPREL_S => "R_RISCV_GPREL_S",
+ R_RISCV_TPREL_I => "R_RISCV_TPREL_I",
+ R_RISCV_TPREL_S => "R_RISCV_TPREL_S",
+ R_RISCV_RELAX => "R_RISCV_RELAX",
+ R_RISCV_SUB6 => "R_RISCV_SUB6",
+ R_RISCV_SET6 => "R_RISCV_SET6",
+ R_RISCV_SET8 => "R_RISCV_SET8",
+ R_RISCV_SET16 => "R_RISCV_SET16",
+ R_RISCV_SET32 => "R_RISCV_SET32",
+ _ => "R_UNKNOWN_RISCV",
+ }},
+ _ => "R_UNKNOWN",
+ }
+}
diff --git a/third_party/rust/goblin/src/elf/dynamic.rs b/third_party/rust/goblin/src/elf/dynamic.rs
new file mode 100644
index 0000000000..c410469c6a
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/dynamic.rs
@@ -0,0 +1,807 @@
+macro_rules! elf_dyn {
+ ($size:ty) => {
+ // XXX: Do not import scroll traits here.
+ // See: https://github.com/rust-lang/rust/issues/65090#issuecomment-538668155
+
+ #[repr(C)]
+ #[derive(Copy, Clone, PartialEq, Default)]
+ #[cfg_attr(
+ feature = "alloc",
+ derive(scroll::Pread, scroll::Pwrite, scroll::SizeWith)
+ )]
+ /// An entry in the dynamic array
+ pub struct Dyn {
+ /// Dynamic entry type
+ pub d_tag: $size,
+ /// Integer value
+ pub d_val: $size,
+ }
+
+ use plain;
+ unsafe impl plain::Plain for Dyn {}
+ };
+}
+
+// TODO: figure out what's the best, most friendly + safe API choice here - u32s or u64s
+// remember that DT_TAG is "pointer sized"/used as address sometimes Original rationale: I
+// decided to use u64 instead of u32 due to pattern matching use case seems safer to cast the
+// elf32's d_tag from u32 -> u64 at runtime instead of casting the elf64's d_tag from u64 ->
+// u32 at runtime
+
+/// Marks end of dynamic section
+pub const DT_NULL: u64 = 0;
+/// Name of needed library
+pub const DT_NEEDED: u64 = 1;
+/// Size in bytes of PLT relocs
+pub const DT_PLTRELSZ: u64 = 2;
+/// Processor defined value
+pub const DT_PLTGOT: u64 = 3;
+/// Address of symbol hash table
+pub const DT_HASH: u64 = 4;
+/// Address of string table
+pub const DT_STRTAB: u64 = 5;
+/// Address of symbol table
+pub const DT_SYMTAB: u64 = 6;
+/// Address of Rela relocs
+pub const DT_RELA: u64 = 7;
+/// Total size of Rela relocs
+pub const DT_RELASZ: u64 = 8;
+/// Size of one Rela reloc
+pub const DT_RELAENT: u64 = 9;
+/// Size of string table
+pub const DT_STRSZ: u64 = 10;
+/// Size of one symbol table entry
+pub const DT_SYMENT: u64 = 11;
+/// Address of init function
+pub const DT_INIT: u64 = 12;
+/// Address of termination function
+pub const DT_FINI: u64 = 13;
+/// Name of shared object
+pub const DT_SONAME: u64 = 14;
+/// Library search path (deprecated)
+pub const DT_RPATH: u64 = 15;
+/// Start symbol search here
+pub const DT_SYMBOLIC: u64 = 16;
+/// Address of Rel relocs
+pub const DT_REL: u64 = 17;
+/// Total size of Rel relocs
+pub const DT_RELSZ: u64 = 18;
+/// Size of one Rel reloc
+pub const DT_RELENT: u64 = 19;
+/// Type of reloc in PLT
+pub const DT_PLTREL: u64 = 20;
+/// For debugging; unspecified
+pub const DT_DEBUG: u64 = 21;
+/// Reloc might modify .text
+pub const DT_TEXTREL: u64 = 22;
+/// Address of PLT relocs
+pub const DT_JMPREL: u64 = 23;
+/// Process relocations of object
+pub const DT_BIND_NOW: u64 = 24;
+/// Array with addresses of init fct
+pub const DT_INIT_ARRAY: u64 = 25;
+/// Array with addresses of fini fct
+pub const DT_FINI_ARRAY: u64 = 26;
+/// Size in bytes of DT_INIT_ARRAY
+pub const DT_INIT_ARRAYSZ: u64 = 27;
+/// Size in bytes of DT_FINI_ARRAY
+pub const DT_FINI_ARRAYSZ: u64 = 28;
+/// Library search path
+pub const DT_RUNPATH: u64 = 29;
+/// Flags for the object being loaded
+pub const DT_FLAGS: u64 = 30;
+/// Start of encoded range
+pub const DT_ENCODING: u64 = 32;
+/// Array with addresses of preinit fct
+pub const DT_PREINIT_ARRAY: u64 = 32;
+/// size in bytes of DT_PREINIT_ARRAY
+pub const DT_PREINIT_ARRAYSZ: u64 = 33;
+/// Number used
+pub const DT_NUM: u64 = 34;
+/// Start of OS-specific
+pub const DT_LOOS: u64 = 0x6000_000d;
+/// End of OS-specific
+pub const DT_HIOS: u64 = 0x6fff_f000;
+/// Start of processor-specific
+pub const DT_LOPROC: u64 = 0x7000_0000;
+/// End of processor-specific
+pub const DT_HIPROC: u64 = 0x7fff_ffff;
+// Most used by any processor
+// pub const DT_PROCNUM: u64 = DT_MIPS_NUM;
+
+/// DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
+/// Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
+///
+/// If any adjustment is made to the ELF object after it has been
+/// built these entries will need to be adjusted.
+pub const DT_ADDRRNGLO: u64 = 0x6fff_fe00;
+/// GNU-style hash table
+pub const DT_GNU_HASH: u64 = 0x6fff_fef5;
+///
+pub const DT_TLSDESC_PLT: u64 = 0x6fff_fef6;
+///
+pub const DT_TLSDESC_GOT: u64 = 0x6fff_fef7;
+/// Start of conflict section
+pub const DT_GNU_CONFLICT: u64 = 0x6fff_fef8;
+/// Library list
+pub const DT_GNU_LIBLIST: u64 = 0x6fff_fef9;
+/// Configuration information
+pub const DT_CONFIG: u64 = 0x6fff_fefa;
+/// Dependency auditing
+pub const DT_DEPAUDIT: u64 = 0x6fff_fefb;
+/// Object auditing
+pub const DT_AUDIT: u64 = 0x6fff_fefc;
+/// PLT padding
+pub const DT_PLTPAD: u64 = 0x6fff_fefd;
+/// Move table
+pub const DT_MOVETAB: u64 = 0x6fff_fefe;
+/// Syminfo table
+pub const DT_SYMINFO: u64 = 0x6fff_feff;
+///
+pub const DT_ADDRRNGHI: u64 = 0x6fff_feff;
+
+//DT_ADDRTAGIDX(tag) (DT_ADDRRNGHI - (tag)) /* Reverse order! */
+pub const DT_ADDRNUM: u64 = 11;
+
+/// The versioning entry types. The next are defined as part of the GNU extension
+pub const DT_VERSYM: u64 = 0x6fff_fff0;
+pub const DT_RELACOUNT: u64 = 0x6fff_fff9;
+pub const DT_RELCOUNT: u64 = 0x6fff_fffa;
+/// State flags, see DF_1_* below
+pub const DT_FLAGS_1: u64 = 0x6fff_fffb;
+/// Address of version definition table
+pub const DT_VERDEF: u64 = 0x6fff_fffc;
+/// Number of version definitions
+pub const DT_VERDEFNUM: u64 = 0x6fff_fffd;
+/// Address of table with needed versions
+pub const DT_VERNEED: u64 = 0x6fff_fffe;
+/// Number of needed versions
+pub const DT_VERNEEDNUM: u64 = 0x6fff_ffff;
+
+/// Converts a tag to its string representation.
+#[inline]
+pub fn tag_to_str(tag: u64) -> &'static str {
+ match tag {
+ DT_NULL => "DT_NULL",
+ DT_NEEDED => "DT_NEEDED",
+ DT_PLTRELSZ => "DT_PLTRELSZ",
+ DT_PLTGOT => "DT_PLTGOT",
+ DT_HASH => "DT_HASH",
+ DT_STRTAB => "DT_STRTAB",
+ DT_SYMTAB => "DT_SYMTAB",
+ DT_RELA => "DT_RELA",
+ DT_RELASZ => "DT_RELASZ",
+ DT_RELAENT => "DT_RELAENT",
+ DT_STRSZ => "DT_STRSZ",
+ DT_SYMENT => "DT_SYMENT",
+ DT_INIT => "DT_INIT",
+ DT_FINI => "DT_FINI",
+ DT_SONAME => "DT_SONAME",
+ DT_RPATH => "DT_RPATH",
+ DT_SYMBOLIC => "DT_SYMBOLIC",
+ DT_REL => "DT_REL",
+ DT_RELSZ => "DT_RELSZ",
+ DT_RELENT => "DT_RELENT",
+ DT_PLTREL => "DT_PLTREL",
+ DT_DEBUG => "DT_DEBUG",
+ DT_TEXTREL => "DT_TEXTREL",
+ DT_JMPREL => "DT_JMPREL",
+ DT_BIND_NOW => "DT_BIND_NOW",
+ DT_INIT_ARRAY => "DT_INIT_ARRAY",
+ DT_FINI_ARRAY => "DT_FINI_ARRAY",
+ DT_INIT_ARRAYSZ => "DT_INIT_ARRAYSZ",
+ DT_FINI_ARRAYSZ => "DT_FINI_ARRAYSZ",
+ DT_RUNPATH => "DT_RUNPATH",
+ DT_FLAGS => "DT_FLAGS",
+ DT_PREINIT_ARRAY => "DT_PREINIT_ARRAY",
+ DT_PREINIT_ARRAYSZ => "DT_PREINIT_ARRAYSZ",
+ DT_NUM => "DT_NUM",
+ DT_LOOS => "DT_LOOS",
+ DT_HIOS => "DT_HIOS",
+ DT_LOPROC => "DT_LOPROC",
+ DT_HIPROC => "DT_HIPROC",
+ DT_VERSYM => "DT_VERSYM",
+ DT_RELACOUNT => "DT_RELACOUNT",
+ DT_RELCOUNT => "DT_RELCOUNT",
+ DT_GNU_HASH => "DT_GNU_HASH",
+ DT_VERDEF => "DT_VERDEF",
+ DT_VERDEFNUM => "DT_VERDEFNUM",
+ DT_VERNEED => "DT_VERNEED",
+ DT_VERNEEDNUM => "DT_VERNEEDNUM",
+ DT_FLAGS_1 => "DT_FLAGS_1",
+ _ => "UNKNOWN_TAG",
+ }
+}
+
+// Values of `d_un.d_val` in the DT_FLAGS entry
+/// Object may use DF_ORIGIN.
+pub const DF_ORIGIN: u64 = 0x0000_0001;
+/// Symbol resolutions starts here.
+pub const DF_SYMBOLIC: u64 = 0x0000_0002;
+/// Object contains text relocations.
+pub const DF_TEXTREL: u64 = 0x0000_0004;
+/// No lazy binding for this object.
+pub const DF_BIND_NOW: u64 = 0x0000_0008;
+/// Module uses the static TLS model.
+pub const DF_STATIC_TLS: u64 = 0x0000_0010;
+
+pub fn df_tag_to_str(tag: u64) -> &'static str {
+ match tag {
+ DF_ORIGIN => "DF_ORIGIN",
+ DF_SYMBOLIC => "DF_SYMBOLIC",
+ DF_TEXTREL => "DF_TEXTREL",
+ DF_BIND_NOW => "DF_BIND_NOW",
+ DF_STATIC_TLS => "DF_STATIC_TLS",
+ _ => "UNKNOWN_TAG",
+ }
+}
+
+/// === State flags ===
+/// selectable in the `d_un.d_val` element of the DT_FLAGS_1 entry in the dynamic section.
+///
+/// Set RTLD_NOW for this object.
+pub const DF_1_NOW: u64 = 0x0000_0001;
+/// Set RTLD_GLOBAL for this object.
+pub const DF_1_GLOBAL: u64 = 0x0000_0002;
+/// Set RTLD_GROUP for this object.
+pub const DF_1_GROUP: u64 = 0x0000_0004;
+/// Set RTLD_NODELETE for this object.
+pub const DF_1_NODELETE: u64 = 0x0000_0008;
+/// Trigger filtee loading at runtime.
+pub const DF_1_LOADFLTR: u64 = 0x0000_0010;
+/// Set RTLD_INITFIRST for this object.
+pub const DF_1_INITFIRST: u64 = 0x0000_0020;
+/// Set RTLD_NOOPEN for this object.
+pub const DF_1_NOOPEN: u64 = 0x0000_0040;
+/// $ORIGIN must be handled.
+pub const DF_1_ORIGIN: u64 = 0x0000_0080;
+/// Direct binding enabled.
+pub const DF_1_DIRECT: u64 = 0x0000_0100;
+pub const DF_1_TRANS: u64 = 0x0000_0200;
+/// Object is used to interpose.
+pub const DF_1_INTERPOSE: u64 = 0x0000_0400;
+/// Ignore default lib search path.
+pub const DF_1_NODEFLIB: u64 = 0x0000_0800;
+/// Object can't be dldump'ed.
+pub const DF_1_NODUMP: u64 = 0x0000_1000;
+/// Configuration alternative created.
+pub const DF_1_CONFALT: u64 = 0x0000_2000;
+/// Filtee terminates filters search.
+pub const DF_1_ENDFILTEE: u64 = 0x0000_4000;
+/// Disp reloc applied at build time.
+pub const DF_1_DISPRELDNE: u64 = 0x0000_8000;
+/// Disp reloc applied at run-time.
+pub const DF_1_DISPRELPND: u64 = 0x0001_0000;
+/// Object has no-direct binding.
+pub const DF_1_NODIRECT: u64 = 0x0002_0000;
+pub const DF_1_IGNMULDEF: u64 = 0x0004_0000;
+pub const DF_1_NOKSYMS: u64 = 0x0008_0000;
+pub const DF_1_NOHDR: u64 = 0x0010_0000;
+/// Object is modified after built.
+pub const DF_1_EDITED: u64 = 0x0020_0000;
+pub const DF_1_NORELOC: u64 = 0x0040_0000;
+/// Object has individual interposers.
+pub const DF_1_SYMINTPOSE: u64 = 0x0080_0000;
+/// Global auditing required.
+pub const DF_1_GLOBAUDIT: u64 = 0x0100_0000;
+/// Singleton dyn are used.
+pub const DF_1_SINGLETON: u64 = 0x0200_0000;
+/// Object is a Position Independent Executable (PIE).
+pub const DF_1_PIE: u64 = 0x0800_0000;
+
+pub fn df_1_tag_to_str(tag: u64) -> &'static str {
+ match tag {
+ DF_1_NOW => "DF_1_NOW",
+ DF_1_GLOBAL => "DF_1_GLOBAL",
+ DF_1_GROUP => "DF_1_GROUP",
+ DF_1_NODELETE => "DF_1_NODELETE",
+ DF_1_LOADFLTR => "DF_1_LOADFLTR",
+ DF_1_INITFIRST => "DF_1_INITFIRST",
+ DF_1_NOOPEN => "DF_1_NOOPEN",
+ DF_1_ORIGIN => "DF_1_ORIGIN",
+ DF_1_DIRECT => "DF_1_DIRECT",
+ DF_1_TRANS => "DF_1_TRANS",
+ DF_1_INTERPOSE => "DF_1_INTERPOSE",
+ DF_1_NODEFLIB => "DF_1_NODEFLIB",
+ DF_1_NODUMP => "DF_1_NODUMP",
+ DF_1_CONFALT => "DF_1_CONFALT",
+ DF_1_ENDFILTEE => "DF_1_ENDFILTEE",
+ DF_1_DISPRELDNE => "DF_1_DISPRELDNE",
+ DF_1_DISPRELPND => "DF_1_DISPRELPND",
+ DF_1_NODIRECT => "DF_1_NODIRECT",
+ DF_1_IGNMULDEF => "DF_1_IGNMULDEF",
+ DF_1_NOKSYMS => "DF_1_NOKSYMS",
+ DF_1_NOHDR => "DF_1_NOHDR",
+ DF_1_EDITED => "DF_1_EDITED",
+ DF_1_NORELOC => "DF_1_NORELOC",
+ DF_1_SYMINTPOSE => "DF_1_SYMINTPOSE",
+ DF_1_GLOBAUDIT => "DF_1_GLOBAUDIT",
+ DF_1_SINGLETON => "DF_1_SINGLETON",
+ DF_1_PIE => "DF_1_PIE",
+ _ => "UNKNOWN_TAG",
+ }
+}
+
+if_alloc! {
+ use core::fmt;
+ use scroll::ctx;
+ use core::result;
+ use crate::container::{Ctx, Container};
+ use crate::strtab::Strtab;
+ use alloc::vec::Vec;
+
+ #[derive(Default, PartialEq, Clone)]
+ pub struct Dyn {
+ pub d_tag: u64,
+ pub d_val: u64,
+ }
+
+ impl Dyn {
+ #[inline]
+ pub fn size(container: Container) -> usize {
+ use scroll::ctx::SizeWith;
+ Self::size_with(&Ctx::from(container))
+ }
+ }
+
+ impl fmt::Debug for Dyn {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Dyn")
+ .field("d_tag", &tag_to_str(self.d_tag))
+ .field("d_val", &format_args!("0x{:x}", self.d_val))
+ .finish()
+ }
+ }
+
+ impl ctx::SizeWith<Ctx> for Dyn {
+ fn size_with(&Ctx { container, .. }: &Ctx) -> usize {
+ match container {
+ Container::Little => {
+ dyn32::SIZEOF_DYN
+ },
+ Container::Big => {
+ dyn64::SIZEOF_DYN
+ },
+ }
+ }
+ }
+
+ impl<'a> ctx::TryFromCtx<'a, Ctx> for Dyn {
+ type Error = crate::error::Error;
+ fn try_from_ctx(bytes: &'a [u8], Ctx { container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> {
+ use scroll::Pread;
+ let dynamic = match container {
+ Container::Little => {
+ (bytes.pread_with::<dyn32::Dyn>(0, le)?.into(), dyn32::SIZEOF_DYN)
+ },
+ Container::Big => {
+ (bytes.pread_with::<dyn64::Dyn>(0, le)?.into(), dyn64::SIZEOF_DYN)
+ }
+ };
+ Ok(dynamic)
+ }
+ }
+
+ impl ctx::TryIntoCtx<Ctx> for Dyn {
+ type Error = crate::error::Error;
+ fn try_into_ctx(self, bytes: &mut [u8], Ctx { container, le}: Ctx) -> result::Result<usize, Self::Error> {
+ use scroll::Pwrite;
+ match container {
+ Container::Little => {
+ let dynamic: dyn32::Dyn = self.into();
+ Ok(bytes.pwrite_with(dynamic, 0, le)?)
+ },
+ Container::Big => {
+ let dynamic: dyn64::Dyn = self.into();
+ Ok(bytes.pwrite_with(dynamic, 0, le)?)
+ }
+ }
+ }
+ }
+
+ #[derive(Debug)]
+ pub struct Dynamic {
+ pub dyns: Vec<Dyn>,
+ pub info: DynamicInfo,
+ }
+
+ impl Dynamic {
+ #[cfg(feature = "endian_fd")]
+ /// Returns a vector of dynamic entries from the underlying byte `bytes`, with `endianness`, using the provided `phdrs`
+ pub fn parse(bytes: &[u8], phdrs: &[crate::elf::program_header::ProgramHeader], ctx: Ctx) -> crate::error::Result<Option<Self>> {
+ use scroll::ctx::SizeWith;
+ use scroll::Pread;
+ use crate::elf::program_header;
+ for phdr in phdrs {
+ if phdr.p_type == program_header::PT_DYNAMIC {
+ let offset = phdr.p_offset as usize;
+ let filesz = phdr.p_filesz as usize;
+ // Ensure offset and filesz are valid.
+ let bytes = if filesz > 0 {
+ bytes
+ .pread_with::<&[u8]>(offset, filesz)
+ .map_err(|_| crate::error::Error::Malformed(format!("Invalid PT_DYNAMIC size (offset {:#x}, filesz {:#x})",
+ offset, filesz)))?
+ } else {
+ &[]
+ };
+ let size = Dyn::size_with(&ctx);
+ // the validity of `count` was implicitly checked by reading `bytes`.
+ let count = filesz / size;
+ let mut dyns = Vec::with_capacity(count);
+ let mut offset = 0;
+ for _ in 0..count {
+ let dynamic = bytes.gread_with::<Dyn>(&mut offset, ctx)?;
+ let tag = dynamic.d_tag;
+ dyns.push(dynamic);
+ if tag == DT_NULL { break }
+ }
+ let mut info = DynamicInfo::default();
+ for dynamic in &dyns {
+ info.update(phdrs, dynamic);
+ }
+ return Ok(Some(Dynamic { dyns: dyns, info: info, }));
+ }
+ }
+ Ok(None)
+ }
+
+ pub fn get_libraries<'a>(&self, strtab: &Strtab<'a>) -> Vec<&'a str> {
+ use log::warn;
+ let count = self.info.needed_count.min(self.dyns.len());
+ let mut needed = Vec::with_capacity(count);
+ for dynamic in &self.dyns {
+ if dynamic.d_tag as u64 == DT_NEEDED {
+ if let Some(lib) = strtab.get_at(dynamic.d_val as usize) {
+ needed.push(lib)
+ } else {
+ warn!("Invalid DT_NEEDED {}", dynamic.d_val)
+ }
+ }
+ }
+ needed
+ }
+ }
+}
+
+macro_rules! elf_dyn_std_impl {
+ ($size:ident, $phdr:ty) => {
+
+ #[cfg(test)]
+ mod tests {
+ use super::*;
+ #[test]
+ fn size_of() {
+ assert_eq!(::std::mem::size_of::<Dyn>(), SIZEOF_DYN);
+ }
+ }
+
+ if_alloc! {
+ use core::fmt;
+ use core::slice;
+ use alloc::vec::Vec;
+
+ use crate::elf::program_header::{PT_DYNAMIC};
+ use crate::strtab::Strtab;
+
+ use crate::elf::dynamic::Dyn as ElfDyn;
+
+ if_std! {
+ use std::fs::File;
+ use std::io::{Read, Seek};
+ use std::io::SeekFrom::Start;
+ use crate::error::Result;
+ }
+
+ impl From<ElfDyn> for Dyn {
+ fn from(dynamic: ElfDyn) -> Self {
+ Dyn {
+ d_tag: dynamic.d_tag as $size,
+ d_val: dynamic.d_val as $size,
+ }
+ }
+ }
+ impl From<Dyn> for ElfDyn {
+ fn from(dynamic: Dyn) -> Self {
+ ElfDyn {
+ d_tag: u64::from(dynamic.d_tag),
+ d_val: u64::from(dynamic.d_val),
+ }
+ }
+ }
+
+ impl fmt::Debug for Dyn {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Dyn")
+ .field("d_tag", &tag_to_str(u64::from(self.d_tag)))
+ .field("d_val", &format_args!("0x{:x}", self.d_val))
+ .finish()
+ }
+ }
+
+ /// Returns a vector of dynamic entries from the given fd and program headers
+ #[cfg(feature = "std")]
+ pub fn from_fd(mut fd: &File, phdrs: &[$phdr]) -> Result<Option<Vec<Dyn>>> {
+ for phdr in phdrs {
+ if phdr.p_type == PT_DYNAMIC {
+ // FIXME: validate filesz before allocating
+ let filesz = phdr.p_filesz as usize;
+ let dync = filesz / SIZEOF_DYN;
+ let mut dyns = vec![Dyn::default(); dync];
+ fd.seek(Start(u64::from(phdr.p_offset)))?;
+ unsafe {
+ fd.read_exact(plain::as_mut_bytes(&mut *dyns))?;
+ }
+ dyns.dedup();
+ return Ok(Some(dyns));
+ }
+ }
+ Ok(None)
+ }
+
+ /// Given a bias and a memory address (typically for a _correctly_ mmap'd binary in memory), returns the `_DYNAMIC` array as a slice of that memory
+ pub unsafe fn from_raw<'a>(bias: usize, vaddr: usize) -> &'a [Dyn] {
+ let dynp = vaddr.wrapping_add(bias) as *const Dyn;
+ let mut idx = 0;
+ while u64::from((*dynp.offset(idx)).d_tag) != DT_NULL {
+ idx += 1;
+ }
+ slice::from_raw_parts(dynp, idx as usize)
+ }
+
+ // TODO: these bare functions have always seemed awkward, but not sure where they should go...
+ /// Maybe gets and returns the dynamic array with the same lifetime as the [phdrs], using the provided bias with wrapping addition.
+ /// If the bias is wrong, it will either segfault or give you incorrect values, beware
+ pub unsafe fn from_phdrs(bias: usize, phdrs: &[$phdr]) -> Option<&[Dyn]> {
+ for phdr in phdrs {
+ // FIXME: change to casting to u64 similar to DT_*?
+ if phdr.p_type as u32 == PT_DYNAMIC {
+ return Some(from_raw(bias, phdr.p_vaddr as usize));
+ }
+ }
+ None
+ }
+
+ /// Gets the needed libraries from the `_DYNAMIC` array, with the str slices lifetime tied to the dynamic array/strtab's lifetime(s)
+ pub unsafe fn get_needed<'a>(dyns: &[Dyn], strtab: *const Strtab<'a>, count: usize) -> Vec<&'a str> {
+ let mut needed = Vec::with_capacity(count.min(dyns.len()));
+ for dynamic in dyns {
+ if u64::from(dynamic.d_tag) == DT_NEEDED {
+ let lib = &(*strtab)[dynamic.d_val as usize];
+ needed.push(lib);
+ }
+ }
+ needed
+ }
+ }
+ };
+}
+
+macro_rules! elf_dynamic_info_std_impl {
+ ($size:ident, $phdr:ty) => {
+ /// Convert a virtual memory address to a file offset
+ fn vm_to_offset(phdrs: &[$phdr], address: $size) -> Option<$size> {
+ for ph in phdrs {
+ if ph.p_type == crate::elf::program_header::PT_LOAD && address >= ph.p_vaddr {
+ let offset = address - ph.p_vaddr;
+ if offset < ph.p_memsz {
+ return ph.p_offset.checked_add(offset);
+ }
+ }
+ }
+ None
+ }
+
+ /// Important dynamic linking info generated via a single pass through the `_DYNAMIC` array
+ #[derive(Default, PartialEq)]
+ pub struct DynamicInfo {
+ pub rela: usize,
+ pub relasz: usize,
+ pub relaent: $size,
+ pub relacount: usize,
+ pub rel: usize,
+ pub relsz: usize,
+ pub relent: $size,
+ pub relcount: usize,
+ pub gnu_hash: Option<$size>,
+ pub hash: Option<$size>,
+ pub strtab: usize,
+ pub strsz: usize,
+ pub symtab: usize,
+ pub syment: usize,
+ pub pltgot: Option<$size>,
+ pub pltrelsz: usize,
+ pub pltrel: $size,
+ pub jmprel: usize,
+ pub verdef: $size,
+ pub verdefnum: $size,
+ pub verneed: $size,
+ pub verneednum: $size,
+ pub versym: $size,
+ pub init: $size,
+ pub fini: $size,
+ pub init_array: $size,
+ pub init_arraysz: usize,
+ pub fini_array: $size,
+ pub fini_arraysz: usize,
+ pub needed_count: usize,
+ pub flags: $size,
+ pub flags_1: $size,
+ pub soname: usize,
+ pub textrel: bool,
+ }
+
+ impl DynamicInfo {
+ #[inline]
+ pub fn update(&mut self, phdrs: &[$phdr], dynamic: &Dyn) {
+ match u64::from(dynamic.d_tag) {
+ DT_RELA => self.rela = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0) as usize, // .rela.dyn
+ DT_RELASZ => self.relasz = dynamic.d_val as usize,
+ DT_RELAENT => self.relaent = dynamic.d_val as _,
+ DT_RELACOUNT => self.relacount = dynamic.d_val as usize,
+ DT_REL => self.rel = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0) as usize, // .rel.dyn
+ DT_RELSZ => self.relsz = dynamic.d_val as usize,
+ DT_RELENT => self.relent = dynamic.d_val as _,
+ DT_RELCOUNT => self.relcount = dynamic.d_val as usize,
+ DT_GNU_HASH => self.gnu_hash = vm_to_offset(phdrs, dynamic.d_val),
+ DT_HASH => self.hash = vm_to_offset(phdrs, dynamic.d_val),
+ DT_STRTAB => {
+ self.strtab = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0) as usize
+ }
+ DT_STRSZ => self.strsz = dynamic.d_val as usize,
+ DT_SYMTAB => {
+ self.symtab = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0) as usize
+ }
+ DT_SYMENT => self.syment = dynamic.d_val as usize,
+ DT_PLTGOT => self.pltgot = vm_to_offset(phdrs, dynamic.d_val),
+ DT_PLTRELSZ => self.pltrelsz = dynamic.d_val as usize,
+ DT_PLTREL => self.pltrel = dynamic.d_val as _,
+ DT_JMPREL => {
+ self.jmprel = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0) as usize
+ } // .rela.plt
+ DT_VERDEF => self.verdef = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0),
+ DT_VERDEFNUM => self.verdefnum = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0),
+ DT_VERNEED => self.verneed = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0),
+ DT_VERNEEDNUM => self.verneednum = dynamic.d_val as _,
+ DT_VERSYM => self.versym = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0),
+ DT_INIT => self.init = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0),
+ DT_FINI => self.fini = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0),
+ DT_INIT_ARRAY => {
+ self.init_array = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0)
+ }
+ DT_INIT_ARRAYSZ => self.init_arraysz = dynamic.d_val as _,
+ DT_FINI_ARRAY => {
+ self.fini_array = vm_to_offset(phdrs, dynamic.d_val).unwrap_or(0)
+ }
+ DT_FINI_ARRAYSZ => self.fini_arraysz = dynamic.d_val as _,
+ DT_NEEDED => self.needed_count += 1,
+ DT_FLAGS => self.flags = dynamic.d_val as _,
+ DT_FLAGS_1 => self.flags_1 = dynamic.d_val as _,
+ DT_SONAME => self.soname = dynamic.d_val as _,
+ DT_TEXTREL => self.textrel = true,
+ _ => (),
+ }
+ }
+ pub fn new(dynamic: &[Dyn], phdrs: &[$phdr]) -> DynamicInfo {
+ let mut info = DynamicInfo::default();
+ for dyna in dynamic {
+ info.update(phdrs, &dyna);
+ }
+ info
+ }
+ }
+
+ if_alloc! {
+ impl fmt::Debug for DynamicInfo {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let gnu_hash = self.gnu_hash.unwrap_or(0);
+ let hash = self.hash.unwrap_or(0);
+ let pltgot = self.pltgot.unwrap_or(0);
+
+ let flags: Vec<&'static str> = [DF_ORIGIN, DF_SYMBOLIC, DF_TEXTREL, DF_BIND_NOW, DF_STATIC_TLS,][..]
+ .iter()
+ .filter(|f| (self.flags as u64 & *f) != 0)
+ .map(|f| df_tag_to_str(*f))
+ .collect();
+
+ let flags_1: Vec<&'static str> = [
+ DF_1_NOW,
+ DF_1_GLOBAL,
+ DF_1_GROUP,
+ DF_1_NODELETE,
+ DF_1_LOADFLTR,
+ DF_1_INITFIRST,
+ DF_1_NOOPEN,
+ DF_1_ORIGIN,
+ DF_1_DIRECT,
+ DF_1_TRANS,
+ DF_1_INTERPOSE,
+ DF_1_NODEFLIB,
+ DF_1_NODUMP,
+ DF_1_CONFALT,
+ DF_1_ENDFILTEE,
+ DF_1_DISPRELDNE,
+ DF_1_DISPRELPND,
+ DF_1_NODIRECT,
+ DF_1_IGNMULDEF,
+ DF_1_NOKSYMS,
+ DF_1_NOHDR,
+ DF_1_EDITED,
+ DF_1_NORELOC,
+ DF_1_SYMINTPOSE,
+ DF_1_GLOBAUDIT,
+ DF_1_SINGLETON,
+ DF_1_PIE,
+ ][..]
+ .iter()
+ .filter(|f| (self.flags_1 as u64 & *f) != 0)
+ .map(|f| df_1_tag_to_str(*f))
+ .collect();
+
+ f.debug_struct("DynamicInfo")
+ .field("rela", &format_args!("0x{:x}", self.rela))
+ .field("relasz", &self.relasz)
+ .field("relaent", &self.relaent)
+ .field("relacount", &self.relacount)
+ .field("gnu_hash", &format_args!("0x{:x}", gnu_hash))
+ .field("hash", &format_args!("0x{:x}", hash))
+ .field("strtab", &format_args!("0x{:x}", self.strtab))
+ .field("strsz", &self.strsz)
+ .field("symtab", &format_args!("0x{:x}", self.symtab))
+ .field("syment", &self.syment)
+ .field("pltgot", &format_args!("0x{:x}", pltgot))
+ .field("pltrelsz", &self.pltrelsz)
+ .field("pltrel", &self.pltrel)
+ .field("jmprel", &format_args!("0x{:x}", self.jmprel))
+ .field("verdef", &format_args!("0x{:x}", self.verdef))
+ .field("verdefnum", &self.verdefnum)
+ .field("verneed", &format_args!("0x{:x}", self.verneed))
+ .field("verneednum", &self.verneednum)
+ .field("versym", &format_args!("0x{:x}", self.versym))
+ .field("init", &format_args!("0x{:x}", self.init))
+ .field("fini", &format_args!("0x{:x}", self.fini))
+ .field("init_array", &format_args!("{:#x}", self.init_array))
+ .field("init_arraysz", &self.init_arraysz)
+ .field("needed_count", &self.needed_count)
+ .field("flags", &format_args!("{:#0width$x} {:?}", self.flags, flags, width = core::mem::size_of_val(&self.flags)))
+ .field("flags_1", &format_args!("{:#0width$x} {:?}", self.flags_1, flags_1, width = core::mem::size_of_val(&self.flags_1)))
+ .field("soname", &self.soname)
+ .field("textrel", &self.textrel)
+ .finish()
+ }
+ }
+ }
+ };
+}
+
+if_alloc! {
+ elf_dynamic_info_std_impl!(u64, crate::elf::program_header::ProgramHeader);
+}
+
+pub mod dyn32 {
+ pub use crate::elf::dynamic::*;
+
+ elf_dyn!(u32);
+
+ pub const SIZEOF_DYN: usize = 8;
+
+ elf_dyn_std_impl!(u32, crate::elf32::program_header::ProgramHeader);
+ elf_dynamic_info_std_impl!(
+ u32,
+ crate::elf::program_header::program_header32::ProgramHeader
+ );
+}
+
+pub mod dyn64 {
+ pub use crate::elf::dynamic::*;
+
+ elf_dyn!(u64);
+
+ pub const SIZEOF_DYN: usize = 16;
+
+ elf_dyn_std_impl!(u64, crate::elf64::program_header::ProgramHeader);
+ elf_dynamic_info_std_impl!(
+ u64,
+ crate::elf::program_header::program_header64::ProgramHeader
+ );
+}
diff --git a/third_party/rust/goblin/src/elf/gnu_hash.rs b/third_party/rust/goblin/src/elf/gnu_hash.rs
new file mode 100644
index 0000000000..d785382c26
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/gnu_hash.rs
@@ -0,0 +1,220 @@
+//! A Gnu Hash table as 4 sections:
+//!
+//! 1. Header
+//! 2. Bloom Filter
+//! 3. Hash Buckets
+//! 4. Chains
+//!
+//! The header has is an array of four `u32`s:
+//!
+//! 1. nbuckets
+//! 2. symndx
+//! 3. maskwords
+//! 4. shift2
+//!
+//! See more:
+//! * http://www.linker-aliens.org/blogs/ali/entry/gnu_hash_elf_sections
+//! or https://blogs.oracle.com/solaris/gnu-hash-elf-sections-v2
+//! * https://flapenguin.me/2017/05/10/elf-lookup-dt-gnu-hash/
+
+/// GNU hash function: accepts a symbol name and returns a value that may be
+/// used to compute a bucket index.
+///
+/// Consequently, if the hashing function returns the value `x` for some name,
+/// `buckets[x % nbuckets]` gives an index, `y`, into both the symbol table
+/// and the chain table.
+pub fn hash(symbol: &str) -> u32 {
+ const HASH_SEED: u32 = 5381;
+ symbol.bytes().fold(HASH_SEED, |hash, b| {
+ hash.wrapping_mul(33).wrapping_add(u32::from(b))
+ })
+}
+
+#[cfg(test)]
+mod tests {
+ use super::hash;
+ #[test]
+ fn test_hash() {
+ assert_eq!(hash(""), 0x0000_1505);
+ assert_eq!(hash("printf"), 0x156b_2bb8);
+ assert_eq!(hash("exit"), 0x7c96_7e3f);
+ assert_eq!(hash("syscall"), 0xbac2_12a0);
+ assert_eq!(hash("flapenguin.me"), 0x8ae9_f18e);
+ }
+}
+
+macro_rules! elf_gnu_hash_impl {
+ ($IntTy:ty) => {
+ use crate::elf::sym::Sym;
+ use crate::strtab::Strtab;
+ use core::fmt;
+ use core::mem;
+ use core::slice;
+
+ const INT_SIZE: usize = mem::size_of::<$IntTy>();
+ const U32_SIZE: usize = mem::size_of::<u32>();
+ /// Size of a bits mask in bloom filter
+ const ELFCLASS_BITS: u32 = INT_SIZE as u32 * 8;
+
+ /// A better hash table for the ELF used by GNU systems in GNU-compatible software.
+ pub struct GnuHash<'a> {
+ /// Index of the first symbol in the `.dynsym` table which is accessible with
+ /// the hash table
+ symindex: u32,
+ /// Shift count used in the bloom filter
+ shift2: u32,
+ /// 2 bit bloom filter on `chains`
+ // Either 32 or 64-bit depending on the class of object
+ bloom_filter: &'a [$IntTy],
+ /// GNU hash table bucket array; indexes start at 0. This array holds symbol
+ /// table indexes and contains the index of hashes in `chains`
+ buckets: &'a [u32],
+ /// Hash values; indexes start at 0. This array holds symbol table indexes.
+ chains: &'a [u32], // => chains[dynsyms.len() - symindex]
+ dynsyms: &'a [Sym],
+ }
+
+ impl fmt::Debug for GnuHash<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("GnuHash")
+ .field("nbuckets", &self.buckets.len())
+ .field("symindex", &self.symindex)
+ .field("maskwords", &(self.bloom_filter.len() - 1))
+ .field("shift2", &self.shift2)
+ .field("bloom_filter", &self.bloom_filter.as_ptr())
+ .field("bucket", &self.buckets.as_ptr())
+ .field("chains", &self.chains.as_ptr())
+ .finish()
+ }
+ }
+
+ impl<'a> GnuHash<'a> {
+ /// Initialize a GnuHash from a pointer to `.hash` (or `.gnu.hash`) section
+ /// and total number of dynamic symbols.
+ /// # Safety
+ ///
+ /// This function creates a `GnuHash` directly from a raw pointer
+ pub unsafe fn from_raw_table(
+ hashtab: &'a [u8],
+ dynsyms: &'a [Sym],
+ ) -> Result<Self, &'static str> {
+ if hashtab.as_ptr() as usize % INT_SIZE != 0 {
+ return Err("hashtab is not aligned with 64-bit");
+ }
+
+ if hashtab.len() <= 16 {
+ return Err("failed to read in number of buckets");
+ }
+
+ let [nbuckets, symindex, maskwords, shift2] =
+ (hashtab.as_ptr() as *const u32 as *const [u32; 4]).read();
+
+ if !maskwords.is_power_of_two() {
+ return Err("maskwords must be a power of two");
+ }
+
+ let hashtab = &hashtab[16..];
+ {
+ // SAFETY: Condition to check for an overflow
+ // size_of(chains) + size_of(buckets) + size_of(bloom_filter) == size_of(hashtab)
+
+ if dynsyms.len() <= symindex as usize {
+ return Err("symindex must be smaller than dynsyms.len()");
+ }
+ let chains_size = (dynsyms.len() - symindex as usize).checked_mul(U32_SIZE);
+ let buckets_size = (nbuckets as usize).checked_mul(U32_SIZE);
+ let bloom_size = (maskwords as usize).checked_mul(INT_SIZE);
+
+ let total_size = match (chains_size, buckets_size, bloom_size) {
+ (Some(a), Some(b), Some(c)) => {
+ a.checked_add(b).and_then(|t| t.checked_add(c))
+ }
+ _ => None,
+ };
+ match total_size {
+ Some(size) if size == hashtab.len() => {}
+ _ => return Err("index out of bound or non-complete hash section"),
+ }
+ }
+
+ let bloom_filter_ptr = hashtab.as_ptr() as *const $IntTy;
+ let buckets_ptr = bloom_filter_ptr.add(maskwords as usize) as *const u32;
+ let chains_ptr = buckets_ptr.add(nbuckets as usize);
+ let bloom_filter = slice::from_raw_parts(bloom_filter_ptr, maskwords as usize);
+ let buckets = slice::from_raw_parts(buckets_ptr, nbuckets as usize);
+ let chains = slice::from_raw_parts(chains_ptr, dynsyms.len() - symindex as usize);
+ Ok(Self {
+ symindex,
+ shift2,
+ bloom_filter,
+ buckets,
+ chains,
+ dynsyms,
+ })
+ }
+
+ /// Locate the hash chain, and corresponding hash value element.
+ #[cold]
+ fn lookup(&self, symbol: &str, hash: u32, dynstrtab: &Strtab) -> Option<&'a Sym> {
+ const MASK_LOWEST_BIT: u32 = 0xffff_fffe;
+ let bucket = self.buckets[hash as usize % self.buckets.len()];
+
+ // Empty hash chain, symbol not present
+ if bucket < self.symindex {
+ return None;
+ }
+ // Walk the chain until the symbol is found or the chain is exhausted.
+ let chain_idx = bucket - self.symindex;
+ let hash = hash & MASK_LOWEST_BIT;
+ let chains = &self.chains.get((chain_idx as usize)..)?;
+ let dynsyms = &self.dynsyms.get((bucket as usize)..)?;
+ for (hash2, symb) in chains.iter().zip(dynsyms.iter()) {
+ if (hash == (hash2 & MASK_LOWEST_BIT))
+ && (symbol == &dynstrtab[symb.st_name as usize])
+ {
+ return Some(symb);
+ }
+ // Chain ends with an element with the lowest bit set to 1.
+ if hash2 & 1 == 1 {
+ break;
+ }
+ }
+ None
+ }
+
+ /// Check if symbol maybe is in the hash table, or definitely not in it.
+ #[inline]
+ fn check_maybe_match(&self, hash: u32) -> bool {
+ const MASK: u32 = ELFCLASS_BITS - 1;
+ let hash2 = hash >> self.shift2;
+ // `x & (N - 1)` is equivalent to `x % N` iff `N = 2^y`.
+ let bitmask: $IntTy = 1 << (hash & (MASK)) | 1 << (hash2 & MASK);
+ let bloom_idx = (hash / ELFCLASS_BITS) & (self.bloom_filter.len() as u32 - 1);
+ let bitmask_word = self.bloom_filter[bloom_idx as usize];
+ (bitmask_word & bitmask) == bitmask
+ }
+
+ /// Given a symbol, a hash of that symbol, a dynamic string table and
+ /// a `dynstrtab` to cross-reference names, maybe returns a Sym.
+ pub fn find(&self, symbol: &str, dynstrtab: &Strtab) -> Option<&'a Sym> {
+ let hash = self::hash(symbol);
+ self.find_with_hash(symbol, hash, dynstrtab)
+ }
+
+ /// This function will not check if the passed `hash` is really
+ /// the hash of `symbol`
+ pub fn find_with_hash(
+ &self,
+ symbol: &str,
+ hash: u32,
+ dynstrtab: &Strtab,
+ ) -> Option<&'a Sym> {
+ if self.check_maybe_match(hash) {
+ self.lookup(symbol, hash, dynstrtab)
+ } else {
+ None
+ }
+ }
+ }
+ };
+}
diff --git a/third_party/rust/goblin/src/elf/header.rs b/third_party/rust/goblin/src/elf/header.rs
new file mode 100644
index 0000000000..2c05e8ffc6
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/header.rs
@@ -0,0 +1,630 @@
+include!("constants_header.rs");
+
+macro_rules! elf_header {
+ ($size:ident) => {
+ use core::fmt;
+
+ #[repr(C)]
+ #[derive(Clone, Copy, Default, PartialEq)]
+ pub struct Header {
+ /// Magic number and other info
+ pub e_ident: [u8; SIZEOF_IDENT],
+ /// Object file type
+ pub e_type: u16,
+ /// Architecture
+ pub e_machine: u16,
+ /// Object file version
+ pub e_version: u32,
+ /// Entry point virtual address
+ pub e_entry: $size,
+ /// Program header table file offset
+ pub e_phoff: $size,
+ /// Section header table file offset
+ pub e_shoff: $size,
+ /// Processor-specific flags
+ pub e_flags: u32,
+ /// ELF header size in bytes
+ pub e_ehsize: u16,
+ /// Program header table entry size
+ pub e_phentsize: u16,
+ /// Program header table entry count
+ pub e_phnum: u16,
+ /// Section header table entry size
+ pub e_shentsize: u16,
+ /// Section header table entry count
+ pub e_shnum: u16,
+ /// Section header string table index
+ pub e_shstrndx: u16,
+ }
+
+ use plain;
+ // Declare that this is a plain type.
+ unsafe impl plain::Plain for Header {}
+
+ impl Header {
+ /// Returns the corresponding ELF header from the given byte array.
+ pub fn from_bytes(bytes: &[u8; SIZEOF_EHDR]) -> &Header {
+ // FIXME: Length is ensured correct because it's encoded in the type,
+ // but it can still panic due to invalid alignment.
+ plain::from_bytes(bytes).unwrap()
+ }
+ }
+ impl fmt::Debug for Header {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Header")
+ .field("e_ident", &format_args!("{:?}", self.e_ident))
+ .field("e_type", &et_to_str(self.e_type))
+ .field("e_machine", &format_args!("0x{:x}", self.e_machine))
+ .field("e_version", &format_args!("0x{:x}", self.e_version))
+ .field("e_entry", &format_args!("0x{:x}", self.e_entry))
+ .field("e_phoff", &format_args!("0x{:x}", self.e_phoff))
+ .field("e_shoff", &format_args!("0x{:x}", self.e_shoff))
+ .field("e_flags", &format_args!("{:x}", self.e_flags))
+ .field("e_ehsize", &self.e_ehsize)
+ .field("e_phentsize", &self.e_phentsize)
+ .field("e_phnum", &self.e_phnum)
+ .field("e_shentsize", &self.e_shentsize)
+ .field("e_shnum", &self.e_shnum)
+ .field("e_shstrndx", &self.e_shstrndx)
+ .finish()
+ }
+ }
+ };
+}
+
+/// No file type.
+pub const ET_NONE: u16 = 0;
+/// Relocatable file.
+pub const ET_REL: u16 = 1;
+/// Executable file.
+pub const ET_EXEC: u16 = 2;
+/// Shared object file.
+pub const ET_DYN: u16 = 3;
+/// Core file.
+pub const ET_CORE: u16 = 4;
+/// Number of defined types.
+pub const ET_NUM: u16 = 5;
+/// OS-specific range start
+pub const ET_LOOS: u16 = 0xfe00;
+/// OS-specific range end
+pub const ET_HIOS: u16 = 0xfeff;
+/// Processor-specific range start
+pub const ET_LOPROC: u16 = 0xff00;
+/// Processor-specific range end
+pub const ET_HIPROC: u16 = 0xffff;
+
+/// The ELF magic number.
+pub const ELFMAG: &[u8; 4] = b"\x7FELF";
+/// Sizeof ELF magic number.
+pub const SELFMAG: usize = 4;
+
+/// File class byte index.
+pub const EI_CLASS: usize = 4;
+/// Invalid class.
+pub const ELFCLASSNONE: u8 = 0;
+/// 32-bit objects.
+pub const ELFCLASS32: u8 = 1;
+/// 64-bit objects.
+pub const ELFCLASS64: u8 = 2;
+/// ELF class number.
+pub const ELFCLASSNUM: u8 = 3;
+
+/// Data encoding byte index.
+pub const EI_DATA: usize = 5;
+/// Invalid data encoding.
+pub const ELFDATANONE: u8 = 0;
+/// 2's complement, little endian.
+pub const ELFDATA2LSB: u8 = 1;
+/// 2's complement, big endian.
+pub const ELFDATA2MSB: u8 = 2;
+
+/// File version byte index.
+pub const EI_VERSION: usize = 6;
+/// Current ELF version.
+pub const EV_CURRENT: u8 = 1;
+
+/// OS ABI byte index.
+pub const EI_OSABI: usize = 7;
+/// UNIX System V ABI.
+pub const ELFOSABI_NONE: u8 = 0;
+/// UNIX System V ABI.
+///
+/// Alias.
+pub const ELFOSABI_SYSV: u8 = ELFOSABI_NONE;
+/// HP-UX.
+pub const ELFOSABI_HPUX: u8 = 1;
+/// NetBSD.
+pub const ELFOSABI_NETBSD: u8 = 2;
+/// Object uses GNU ELF extensions.
+pub const ELFOSABI_GNU: u8 = 3;
+/// Object uses GNU ELF extensions.
+///
+/// Alias.
+pub const ELFOSABI_LINUX: u8 = ELFOSABI_GNU;
+/// Sun Solaris.
+pub const ELFOSABI_SOLARIS: u8 = 6;
+/// IBM AIX.
+pub const ELFOSABI_AIX: u8 = 7;
+/// SGI Irix.
+pub const ELFOSABI_IRIX: u8 = 8;
+/// FreeBSD
+pub const ELFOSABI_FREEBSD: u8 = 9;
+/// Compaq TRU64 UNIX.
+pub const ELFOSABI_TRU64: u8 = 10;
+/// Novell Modesto.
+pub const ELFOSABI_MODESTO: u8 = 11;
+/// OpenBSD.
+pub const ELFOSABI_OPENBSD: u8 = 12;
+/// ARM EABI.
+pub const ELFOSABI_ARM_AEABI: u8 = 64;
+/// ARM.
+pub const ELFOSABI_ARM: u8 = 97;
+/// Standalone (embedded) application.
+pub const ELFOSABI_STANDALONE: u8 = 255;
+
+/// ABI version byte index.
+pub const EI_ABIVERSION: usize = 8;
+
+/// Number of bytes in an identifier.
+pub const SIZEOF_IDENT: usize = 16;
+
+/// Convert a ELF class byte to the associated string.
+#[inline]
+pub fn class_to_str(et: u8) -> &'static str {
+ match et {
+ ELFCLASSNONE => "NONE",
+ ELFCLASS32 => "ELF32",
+ ELFCLASS64 => "ELF64",
+ _ => "UNKNOWN_CLASS",
+ }
+}
+
+/// Convert an ET value to their associated string.
+#[inline]
+pub fn et_to_str(et: u16) -> &'static str {
+ match et {
+ ET_NONE => "NONE",
+ ET_REL => "REL",
+ ET_EXEC => "EXEC",
+ ET_DYN => "DYN",
+ ET_CORE => "CORE",
+ ET_NUM => "NUM",
+ _ => "UNKNOWN_ET",
+ }
+}
+
+if_alloc! {
+ use crate::error;
+ use scroll::{ctx, Endian};
+ use core::fmt;
+ use crate::container::{Ctx, Container};
+ use alloc::string::ToString;
+
+ #[derive(Copy, Clone, PartialEq)]
+ /// An ELF header
+ pub struct Header {
+ pub e_ident : [u8; SIZEOF_IDENT],
+ pub e_type : u16,
+ pub e_machine : u16,
+ pub e_version : u32,
+ pub e_entry : u64,
+ pub e_phoff : u64,
+ pub e_shoff : u64,
+ pub e_flags : u32,
+ pub e_ehsize : u16,
+ pub e_phentsize : u16,
+ pub e_phnum : u16,
+ pub e_shentsize : u16,
+ pub e_shnum : u16,
+ pub e_shstrndx : u16,
+ }
+
+ impl Header {
+ /// Return the size of the underlying program header, given a `container`
+ #[inline]
+ pub fn size(ctx: Ctx) -> usize {
+ use scroll::ctx::SizeWith;
+ Self::size_with(&ctx)
+ }
+ /// Returns the container type this header specifies
+ pub fn container(&self) -> error::Result<Container> {
+ use crate::error::Error;
+ match self.e_ident[EI_CLASS] {
+ ELFCLASS32 => { Ok(Container::Little) },
+ ELFCLASS64 => { Ok(Container::Big) },
+ class => Err(Error::Malformed(format!("Invalid class in Header: {}", class)))
+ }
+ }
+ /// Returns the byte order this header specifies
+ pub fn endianness(&self) -> error::Result<scroll::Endian> {
+ use crate::error::Error;
+ match self.e_ident[EI_DATA] {
+ ELFDATA2LSB => { Ok(scroll::LE) },
+ ELFDATA2MSB => { Ok(scroll::BE) },
+ class => Err(Error::Malformed(format!("Invalid endianness in Header: {}", class)))
+ }
+ }
+ pub fn new(ctx: Ctx) -> Self {
+ use crate::elf32;
+ use crate::elf64;
+ let (typ, ehsize, phentsize, shentsize) = match ctx.container {
+ Container::Little => {
+ (ELFCLASS32, header32::SIZEOF_EHDR,
+ elf32::program_header::SIZEOF_PHDR,
+ elf32::section_header::SIZEOF_SHDR)
+ },
+ Container::Big => {
+ (ELFCLASS64, header64::SIZEOF_EHDR,
+ elf64::program_header::SIZEOF_PHDR,
+ elf64::section_header::SIZEOF_SHDR)
+ }
+ };
+ let byteorder = match ctx.le { Endian::Little => ELFDATA2LSB, Endian::Big => ELFDATA2MSB };
+ Header {
+ e_ident: [
+ 127,
+ 69,
+ 76,
+ 70,
+ typ,
+ byteorder,
+ EV_CURRENT,
+ ELFOSABI_NONE,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ],
+ e_type: ET_DYN,
+ e_machine: EM_NONE,
+ e_version: 1,
+ e_entry: 0x0,
+ e_phoff: 0x0,
+ e_shoff: 0x0,
+ e_flags: 0,
+ e_ehsize: ehsize as u16,
+ e_phentsize: phentsize as u16,
+ e_phnum: 0,
+ e_shentsize: shentsize as u16,
+ e_shnum: 0,
+ e_shstrndx: 0,
+ }
+ }
+ }
+
+ impl fmt::Debug for Header {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Header")
+ .field("e_ident", &format_args!("{:?}", self.e_ident))
+ .field("e_type", &et_to_str(self.e_type))
+ .field("e_machine", &format_args!("0x{:x}", self.e_machine))
+ .field("e_version", &format_args!("0x{:x}", self.e_version))
+ .field("e_entry", &format_args!("0x{:x}", self.e_entry))
+ .field("e_phoff", &format_args!("0x{:x}", self.e_phoff))
+ .field("e_shoff", &format_args!("0x{:x}", self.e_shoff))
+ .field("e_flags", &format_args!("{:x}", self.e_flags))
+ .field("e_ehsize", &self.e_ehsize)
+ .field("e_phentsize", &self.e_phentsize)
+ .field("e_phnum", &self.e_phnum)
+ .field("e_shentsize", &self.e_shentsize)
+ .field("e_shnum", &self.e_shnum)
+ .field("e_shstrndx", &self.e_shstrndx)
+ .finish()
+ }
+ }
+
+ impl ctx::SizeWith<crate::container::Ctx> for Header {
+ fn size_with(ctx: &crate::container::Ctx) -> usize {
+ match ctx.container {
+ Container::Little => {
+ header32::SIZEOF_EHDR
+ },
+ Container::Big => {
+ header64::SIZEOF_EHDR
+ },
+ }
+ }
+ }
+
+ impl<'a> ctx::TryFromCtx<'a, scroll::Endian> for Header {
+ type Error = crate::error::Error;
+ fn try_from_ctx(bytes: &'a [u8], _ctx: scroll::Endian) -> error::Result<(Self, usize)> {
+ use scroll::Pread;
+ if bytes.len() < SIZEOF_IDENT {
+ return Err(error::Error::Malformed("Too small".to_string()));
+ }
+ let ident: &[u8] = &bytes[..SIZEOF_IDENT];
+ if &ident[0..SELFMAG] != ELFMAG {
+ let magic: u64 = ident.pread_with(0, scroll::LE)?;
+ return Err(error::Error::BadMagic(magic));
+ }
+ let class = ident[EI_CLASS];
+ match class {
+ ELFCLASS32 => {
+ Ok((Header::from(bytes.pread::<header32::Header>(0)?), header32::SIZEOF_EHDR))
+ },
+ ELFCLASS64 => {
+ Ok((Header::from(bytes.pread::<header64::Header>(0)?), header64::SIZEOF_EHDR))
+ },
+ _ => {
+ Err(error::Error::Malformed(format!("invalid ELF class {:x}", class)))
+ }
+ }
+ }
+ }
+
+ impl ctx::TryIntoCtx<scroll::Endian> for Header {
+ type Error = crate::error::Error;
+ fn try_into_ctx(self, bytes: &mut [u8], _ctx: scroll::Endian) -> Result<usize, Self::Error> {
+ use scroll::Pwrite;
+ match self.container()? {
+ Container::Little => {
+ bytes.pwrite(header32::Header::from(self), 0)
+ },
+ Container::Big => {
+ bytes.pwrite(header64::Header::from(self), 0)
+ }
+ }
+ }
+ }
+ impl ctx::IntoCtx<crate::container::Ctx> for Header {
+ fn into_ctx(self, bytes: &mut [u8], ctx: crate::container::Ctx) {
+ use scroll::Pwrite;
+ match ctx.container {
+ Container::Little => {
+ bytes.pwrite_with(header32::Header::from(self), 0, ctx.le).unwrap()
+ },
+ Container::Big => {
+ bytes.pwrite_with(header64::Header::from(self), 0, ctx.le).unwrap()
+ }
+ };
+ }
+ }
+} // end if_alloc
+
+macro_rules! elf_header_std_impl {
+ ($size:expr, $width:ty) => {
+
+ if_alloc! {
+ use crate::elf::header::Header as ElfHeader;
+ use crate::error::Error;
+ #[cfg(any(feature = "std", feature = "endian_fd"))]
+ use crate::error::Result;
+
+ use scroll::{ctx, Pread};
+
+ use core::result;
+
+ if_std! {
+ use std::fs::File;
+ use std::io::{Read};
+ }
+
+ impl From<ElfHeader> for Header {
+ fn from(eh: ElfHeader) -> Self {
+ Header {
+ e_ident: eh.e_ident,
+ e_type: eh.e_type,
+ e_machine: eh.e_machine,
+ e_version: eh.e_version,
+ e_entry: eh.e_entry as $width,
+ e_phoff: eh.e_phoff as $width,
+ e_shoff: eh.e_shoff as $width,
+ e_flags: eh.e_flags,
+ e_ehsize: eh.e_ehsize,
+ e_phentsize: eh.e_phentsize,
+ e_phnum: eh.e_phnum,
+ e_shentsize: eh.e_shentsize,
+ e_shnum: eh.e_shnum,
+ e_shstrndx: eh.e_shstrndx,
+ }
+ }
+ }
+
+ impl From<Header> for ElfHeader {
+ fn from(eh: Header) -> Self {
+ ElfHeader {
+ e_ident: eh.e_ident,
+ e_type: eh.e_type,
+ e_machine: eh.e_machine,
+ e_version: eh.e_version,
+ e_entry: u64::from(eh.e_entry),
+ e_phoff: u64::from(eh.e_phoff),
+ e_shoff: u64::from(eh.e_shoff),
+ e_flags: eh.e_flags,
+ e_ehsize: eh.e_ehsize,
+ e_phentsize: eh.e_phentsize,
+ e_phnum: eh.e_phnum,
+ e_shentsize: eh.e_shentsize,
+ e_shnum: eh.e_shnum,
+ e_shstrndx: eh.e_shstrndx,
+ }
+ }
+ }
+
+ impl<'a> ctx::TryFromCtx<'a, scroll::Endian> for Header {
+ type Error = crate::error::Error;
+ fn try_from_ctx(bytes: &'a [u8], _: scroll::Endian) -> result::Result<(Self, usize), Self::Error> {
+ let mut elf_header = Header::default();
+ let offset = &mut 0;
+ bytes.gread_inout(offset, &mut elf_header.e_ident)?;
+ let endianness =
+ match elf_header.e_ident[EI_DATA] {
+ ELFDATA2LSB => scroll::LE,
+ ELFDATA2MSB => scroll::BE,
+ d => return Err(Error::Malformed(format!("invalid ELF endianness DATA type {:x}", d)).into()),
+ };
+ elf_header.e_type = bytes.gread_with(offset, endianness)?;
+ elf_header.e_machine = bytes.gread_with(offset, endianness)?;
+ elf_header.e_version = bytes.gread_with(offset, endianness)?;
+ elf_header.e_entry = bytes.gread_with(offset, endianness)?;
+ elf_header.e_phoff = bytes.gread_with(offset, endianness)?;
+ elf_header.e_shoff = bytes.gread_with(offset, endianness)?;
+ elf_header.e_flags = bytes.gread_with(offset, endianness)?;
+ elf_header.e_ehsize = bytes.gread_with(offset, endianness)?;
+ elf_header.e_phentsize = bytes.gread_with(offset, endianness)?;
+ elf_header.e_phnum = bytes.gread_with(offset, endianness)?;
+ elf_header.e_shentsize = bytes.gread_with(offset, endianness)?;
+ elf_header.e_shnum = bytes.gread_with(offset, endianness)?;
+ elf_header.e_shstrndx = bytes.gread_with(offset, endianness)?;
+ Ok((elf_header, SIZEOF_EHDR))
+ }
+ }
+
+ impl ctx::TryIntoCtx<scroll::Endian> for Header {
+ type Error = crate::error::Error;
+ /// a Pwrite impl for Header: **note** we use the endianness value in the header, and not a parameter
+ fn try_into_ctx(self, bytes: &mut [u8], _endianness: scroll::Endian) -> result::Result<usize, Self::Error> {
+ use scroll::{Pwrite};
+ let offset = &mut 0;
+ let endianness =
+ match self.e_ident[EI_DATA] {
+ ELFDATA2LSB => scroll::LE,
+ ELFDATA2MSB => scroll::BE,
+ d => return Err(Error::Malformed(format!("invalid ELF DATA type {:x}", d)).into()),
+ };
+ for i in 0..self.e_ident.len() {
+ bytes.gwrite(self.e_ident[i], offset)?;
+ }
+ bytes.gwrite_with(self.e_type , offset, endianness)?;
+ bytes.gwrite_with(self.e_machine , offset, endianness)?;
+ bytes.gwrite_with(self.e_version , offset, endianness)?;
+ bytes.gwrite_with(self.e_entry , offset, endianness)?;
+ bytes.gwrite_with(self.e_phoff , offset, endianness)?;
+ bytes.gwrite_with(self.e_shoff , offset, endianness)?;
+ bytes.gwrite_with(self.e_flags , offset, endianness)?;
+ bytes.gwrite_with(self.e_ehsize , offset, endianness)?;
+ bytes.gwrite_with(self.e_phentsize , offset, endianness)?;
+ bytes.gwrite_with(self.e_phnum , offset, endianness)?;
+ bytes.gwrite_with(self.e_shentsize , offset, endianness)?;
+ bytes.gwrite_with(self.e_shnum , offset, endianness)?;
+ bytes.gwrite_with(self.e_shstrndx , offset, endianness)?;
+ Ok(SIZEOF_EHDR)
+ }
+ }
+
+ impl Header {
+ /// Load a header from a file. **You must** ensure the seek is at the correct position.
+ #[cfg(feature = "std")]
+ pub fn from_fd(bytes: &mut File) -> Result<Header> {
+ let mut elf_header = [0; $size];
+ bytes.read_exact(&mut elf_header)?;
+ Ok(*Header::from_bytes(&elf_header))
+ }
+
+ #[cfg(feature = "endian_fd")]
+ /// Parses an ELF header from the given bytes
+ pub fn parse(bytes: &[u8]) -> Result<Header> {
+ use super::{EI_DATA, ELFDATA2LSB, ELFDATA2MSB, SIZEOF_IDENT};
+
+ let mut elf_header = Header::default();
+ let mut offset = &mut 0;
+ for i in 0..SIZEOF_IDENT {
+ elf_header.e_ident[i] = bytes.gread(&mut offset)?;
+ }
+ let endianness =
+ match elf_header.e_ident[EI_DATA] {
+ ELFDATA2LSB => scroll::LE,
+ ELFDATA2MSB => scroll::BE,
+ d => return Err(Error::Malformed(format!("invalid ELF DATA type {:x}", d)).into()),
+ };
+ elf_header.e_type = bytes.gread_with(offset, endianness)?;
+ elf_header.e_machine = bytes.gread_with(offset, endianness)?;
+ elf_header.e_version = bytes.gread_with(offset, endianness)?;
+ elf_header.e_entry = bytes.gread_with(offset, endianness)?;
+ elf_header.e_phoff = bytes.gread_with(offset, endianness)?;
+ elf_header.e_shoff = bytes.gread_with(offset, endianness)?;
+ elf_header.e_flags = bytes.gread_with(offset, endianness)?;
+ elf_header.e_ehsize = bytes.gread_with(offset, endianness)?;
+ elf_header.e_phentsize = bytes.gread_with(offset, endianness)?;
+ elf_header.e_phnum = bytes.gread_with(offset, endianness)?;
+ elf_header.e_shentsize = bytes.gread_with(offset, endianness)?;
+ elf_header.e_shnum = bytes.gread_with(offset, endianness)?;
+ elf_header.e_shstrndx = bytes.gread_with(offset, endianness)?;
+ Ok(elf_header)
+ }
+ }
+ } // end if_alloc
+ };
+}
+
+// tests
+
+macro_rules! elf_header_test {
+ ($class:expr) => {
+ #[cfg(test)]
+ mod tests {
+ use super::*;
+ use crate::container::{Container, Ctx};
+ use crate::elf::header::Header as ElfHeader;
+ use alloc::vec::Vec;
+ use scroll::{Pread, Pwrite};
+ #[test]
+ fn size_of() {
+ assert_eq!(::std::mem::size_of::<Header>(), SIZEOF_EHDR);
+ }
+ #[test]
+ fn header_read_write() {
+ let crt1: Vec<u8> = if $class == ELFCLASS64 {
+ include!("../../etc/crt1.rs")
+ } else {
+ include!("../../etc/crt132.rs")
+ };
+ let header: Header = crt1.pread(0).unwrap();
+ assert_eq!(header.e_type, ET_REL);
+ println!("header: {:?}", &header);
+ let mut bytes = [0u8; SIZEOF_EHDR];
+ bytes.pwrite(header, 0).unwrap();
+ let header2: Header = bytes.pread(0).unwrap();
+ assert_eq!(header, header2);
+ }
+ #[test]
+ fn elfheader_read_write() {
+ let (container, crt1): (Container, Vec<u8>) = if $class == ELFCLASS64 {
+ (Container::Big, include!("../../etc/crt1.rs"))
+ } else {
+ (Container::Little, include!("../../etc/crt132.rs"))
+ };
+ let header: Header = crt1.pread(0).unwrap();
+ assert_eq!(header.e_type, ET_REL);
+ println!("header: {:?}", &header);
+ let mut bytes = [0u8; SIZEOF_EHDR];
+ let header_ = Header::from(header.clone());
+ bytes.pwrite(header_, 0).unwrap();
+ let header2: Header = bytes.pread(0).unwrap();
+ assert_eq!(header, header2);
+ let header = ElfHeader::new(Ctx::from(container));
+ println!("header: {:?}", &header);
+
+ let mut bytes = vec![0; 100];
+ bytes.pwrite(header, 0).unwrap();
+ }
+ }
+ };
+}
+
+pub mod header32 {
+ pub use super::*;
+
+ pub const SIZEOF_EHDR: usize = 52;
+ pub const ELFCLASS: u8 = ELFCLASS32;
+
+ elf_header!(u32);
+ elf_header_std_impl!(SIZEOF_EHDR, u32);
+ elf_header_test!(ELFCLASS);
+}
+
+pub mod header64 {
+ pub use super::*;
+
+ pub const SIZEOF_EHDR: usize = 64;
+ pub const ELFCLASS: u8 = ELFCLASS64;
+
+ elf_header!(u64);
+ elf_header_std_impl!(SIZEOF_EHDR, u64);
+ elf_header_test!(ELFCLASS);
+}
diff --git a/third_party/rust/goblin/src/elf/mod.rs b/third_party/rust/goblin/src/elf/mod.rs
new file mode 100644
index 0000000000..70c49146ff
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/mod.rs
@@ -0,0 +1,571 @@
+//! The generic ELF module, which gives access to ELF constants and other helper functions, which are independent of ELF bithood. Also defines an `Elf` struct which implements a unified parser that returns a wrapped `Elf64` or `Elf32` binary.
+//!
+//! To access the exact 32-bit or 64-bit versions, use [goblin::elf32::Header](header/header32/struct.Header.html)/[goblin::elf64::Header](header/header64/struct.Header.html), etc., for the various 32/64-bit structs.
+//!
+//! # Example
+//!
+//! ```rust
+//! use std::fs::File;
+//!
+//! pub fn read (bytes: &[u8]) {
+//! match goblin::elf::Elf::parse(&bytes) {
+//! Ok(binary) => {
+//! let entry = binary.entry;
+//! for ph in binary.program_headers {
+//! if ph.p_type == goblin::elf::program_header::PT_LOAD {
+//! // TODO: you should validate p_filesz before allocating.
+//! let mut _buf = vec![0u8; ph.p_filesz as usize];
+//! // read responsibly
+//! }
+//! }
+//! },
+//! Err(_) => ()
+//! }
+//! }
+//! ```
+//!
+//! This will properly access the underlying 32-bit or 64-bit binary automatically. Note that since
+//! 32-bit binaries typically have shorter 32-bit values in some cases (specifically for addresses and pointer
+//! values), these values are upcasted to u64/i64s when appropriate.
+//!
+//! See [goblin::elf::Elf](struct.Elf.html) for more information.
+//!
+//! You are still free to use the specific 32-bit or 64-bit versions by accessing them through `goblin::elf64`, etc., but you will have to parse and/or construct the various components yourself.
+//! In other words, there is no unified 32/64-bit `Elf` struct.
+//!
+//! # Note
+//! To use the automagic ELF datatype union parser, you _must_ enable/opt-in to the `elf64`, `elf32`, and
+//! `endian_fd` features if you disable `default`.
+
+#[macro_use]
+pub(crate) mod gnu_hash;
+
+// These are shareable values for the 32/64 bit implementations.
+//
+// They are publicly re-exported by the pub-using module
+pub mod compression_header;
+pub mod header;
+pub mod program_header;
+pub mod section_header;
+#[macro_use]
+pub mod sym;
+pub mod dynamic;
+#[macro_use]
+pub mod reloc;
+pub mod note;
+#[cfg(all(any(feature = "elf32", feature = "elf64"), feature = "alloc"))]
+pub mod symver;
+
+macro_rules! if_sylvan {
+ ($($i:item)*) => ($(
+ #[cfg(all(feature = "elf32", feature = "elf64", feature = "endian_fd"))]
+ $i
+ )*)
+}
+
+if_sylvan! {
+ use scroll::{ctx, Pread, Endian};
+ use crate::strtab::Strtab;
+ use crate::error;
+ use crate::container::{Container, Ctx};
+ use alloc::vec::Vec;
+ use core::cmp;
+
+ pub use header::Header;
+ pub use program_header::ProgramHeader;
+ pub use section_header::SectionHeader;
+ pub use sym::Symtab;
+ pub use sym::Sym;
+ pub use dynamic::Dyn;
+ pub use dynamic::Dynamic;
+ pub use reloc::Reloc;
+ pub use reloc::RelocSection;
+ pub use symver::{VersymSection, VerdefSection, VerneedSection};
+
+ pub type ProgramHeaders = Vec<ProgramHeader>;
+ pub type SectionHeaders = Vec<SectionHeader>;
+ pub type ShdrIdx = usize;
+
+ #[derive(Debug)]
+ /// An ELF binary. The underlying data structures are read according to the headers byte order and container size (32 or 64).
+ pub struct Elf<'a> {
+ /// The ELF header, which provides a rudimentary index into the rest of the binary
+ pub header: Header,
+ /// The program headers; they primarily tell the kernel and the dynamic linker
+ /// how to load this binary
+ pub program_headers: ProgramHeaders,
+ /// The sections headers. These are strippable, never count on them being
+ /// here unless you're a static linker!
+ pub section_headers: SectionHeaders,
+ /// The section header string table
+ pub shdr_strtab: Strtab<'a>,
+ /// The string table for the dynamically accessible symbols
+ pub dynstrtab: Strtab<'a>,
+ /// The dynamically accessible symbols, i.e., exports, imports.
+ /// This is what the dynamic linker uses to dynamically load and link your binary,
+ /// or find imported symbols for binaries which dynamically link against your library
+ pub dynsyms: Symtab<'a>,
+ /// The debugging symbol table
+ pub syms: Symtab<'a>,
+ /// The string table for the symbol table
+ pub strtab: Strtab<'a>,
+ /// Contains dynamic linking information, with the _DYNAMIC array + a preprocessed DynamicInfo for that array
+ pub dynamic: Option<Dynamic>,
+ /// The dynamic relocation entries (strings, copy-data, etc.) with an addend
+ pub dynrelas: RelocSection<'a>,
+ /// The dynamic relocation entries without an addend
+ pub dynrels: RelocSection<'a>,
+ /// The plt relocation entries (procedure linkage table). For 32-bit binaries these are usually Rel (no addend)
+ pub pltrelocs: RelocSection<'a>,
+ /// Section relocations by section index (only present if this is a relocatable object file)
+ pub shdr_relocs: Vec<(ShdrIdx, RelocSection<'a>)>,
+ /// The binary's soname, if it has one
+ pub soname: Option<&'a str>,
+ /// The binary's program interpreter (e.g., dynamic linker), if it has one
+ pub interpreter: Option<&'a str>,
+ /// A list of this binary's dynamic libraries it uses, if there are any
+ pub libraries: Vec<&'a str>,
+ /// A list of runtime search paths for this binary's dynamic libraries it uses, if there
+ /// are any. (deprecated)
+ pub rpaths: Vec<&'a str>,
+ /// A list of runtime search paths for this binary's dynamic libraries it uses, if there
+ /// are any.
+ pub runpaths: Vec<&'a str>,
+ /// Whether this is a 64-bit elf or not
+ pub is_64: bool,
+ /// Whether this is a shared object or not
+ pub is_lib: bool,
+ /// The binaries entry point address, if it has one
+ pub entry: u64,
+ /// Whether the binary is little endian or not
+ pub little_endian: bool,
+ /// Contains the symbol version information from the optional section
+ /// [`SHT_GNU_VERSYM`][section_header::SHT_GNU_VERSYM] (GNU extenstion).
+ pub versym : Option<VersymSection<'a>>,
+ /// Contains the version definition information from the optional section
+ /// [`SHT_GNU_VERDEF`][section_header::SHT_GNU_VERDEF] (GNU extenstion).
+ pub verdef : Option<VerdefSection<'a>>,
+ /// Contains the version needed information from the optional section
+ /// [`SHT_GNU_VERNEED`][section_header::SHT_GNU_VERNEED] (GNU extenstion).
+ pub verneed : Option<VerneedSection<'a>>,
+ ctx: Ctx,
+ }
+
+ impl<'a> Elf<'a> {
+ /// Try to iterate notes in PT_NOTE program headers; returns `None` if there aren't any note headers in this binary
+ pub fn iter_note_headers(&self, data: &'a [u8]) -> Option<note::NoteIterator<'a>> {
+ let mut iters = vec![];
+ for phdr in &self.program_headers {
+ if phdr.p_type == program_header::PT_NOTE {
+ let offset = phdr.p_offset as usize;
+ let alignment = phdr.p_align as usize;
+
+ iters.push(note::NoteDataIterator {
+ data,
+ offset,
+ size: offset.saturating_add(phdr.p_filesz as usize),
+ ctx: (alignment, self.ctx)
+ });
+ }
+ }
+
+ if iters.is_empty() {
+ None
+ } else {
+ Some(note::NoteIterator {
+ iters: iters,
+ index: 0,
+ })
+ }
+ }
+ /// Try to iterate notes in SHT_NOTE sections; returns `None` if there aren't any note sections in this binary
+ ///
+ /// If a section_name is given, only the section with the according name is iterated.
+ pub fn iter_note_sections(
+ &self,
+ data: &'a [u8],
+ section_name: Option<&str>,
+ ) -> Option<note::NoteIterator<'a>> {
+ let mut iters = vec![];
+ for sect in &self.section_headers {
+ if sect.sh_type != section_header::SHT_NOTE {
+ continue;
+ }
+
+ if section_name.is_some() && self.shdr_strtab.get_at(sect.sh_name) != section_name {
+ continue;
+ }
+
+ let offset = sect.sh_offset as usize;
+ let alignment = sect.sh_addralign as usize;
+ iters.push(note::NoteDataIterator {
+ data,
+ offset,
+ size: offset.saturating_add(sect.sh_size as usize),
+ ctx: (alignment, self.ctx)
+ });
+ }
+
+ if iters.is_empty() {
+ None
+ } else {
+ Some(note::NoteIterator {
+ iters: iters,
+ index: 0,
+ })
+ }
+ }
+ pub fn is_object_file(&self) -> bool {
+ self.header.e_type == header::ET_REL
+ }
+
+ /// Parses the contents to get the Header only. This `bytes` buffer should contain at least the length for parsing Header.
+ pub fn parse_header(bytes: &'a [u8]) -> error::Result<Header> {
+ bytes.pread::<Header>(0)
+ }
+
+ /// Lazy parse the ELF contents. This function mainly just assembles an Elf struct. Once we have the struct, we can choose to parse whatever we want.
+ pub fn lazy_parse(header: Header) -> error::Result<Self> {
+ let misc = parse_misc(&header)?;
+
+ Ok(Elf {
+ header,
+ program_headers: vec![],
+ section_headers: Default::default(),
+ shdr_strtab: Default::default(),
+ dynamic: None,
+ dynsyms: Default::default(),
+ dynstrtab: Strtab::default(),
+ syms: Default::default(),
+ strtab: Default::default(),
+ dynrelas: Default::default(),
+ dynrels: Default::default(),
+ pltrelocs: Default::default(),
+ shdr_relocs: Default::default(),
+ soname: None,
+ interpreter: None,
+ libraries: vec![],
+ rpaths: vec![],
+ runpaths: vec![],
+ is_64: misc.is_64,
+ is_lib: misc.is_lib,
+ entry: misc.entry,
+ little_endian: misc.little_endian,
+ ctx: misc.ctx,
+ versym: None,
+ verdef: None,
+ verneed: None,
+ })
+ }
+
+ /// Parses the contents of the byte stream in `bytes`, and maybe returns a unified binary
+ pub fn parse(bytes: &'a [u8]) -> error::Result<Self> {
+ let header = Self::parse_header(bytes)?;
+ let misc = parse_misc(&header)?;
+ let ctx = misc.ctx;
+
+ let program_headers = ProgramHeader::parse(bytes, header.e_phoff as usize, header.e_phnum as usize, ctx)?;
+
+ let mut interpreter = None;
+ for ph in &program_headers {
+ if ph.p_type == program_header::PT_INTERP && ph.p_filesz != 0 {
+ let count = (ph.p_filesz - 1) as usize;
+ let offset = ph.p_offset as usize;
+ interpreter = bytes.pread_with::<&str>(offset, ::scroll::ctx::StrCtx::Length(count)).ok();
+ }
+ }
+
+ let section_headers = SectionHeader::parse(bytes, header.e_shoff as usize, header.e_shnum as usize, ctx)?;
+
+ let get_strtab = |section_headers: &[SectionHeader], mut section_idx: usize| {
+ if section_idx == section_header::SHN_XINDEX as usize {
+ if section_headers.is_empty() {
+ return Ok(Strtab::default())
+ }
+ section_idx = section_headers[0].sh_link as usize;
+ }
+
+ if section_idx >= section_headers.len() {
+ // FIXME: warn! here
+ Ok(Strtab::default())
+ } else {
+ let shdr = &section_headers[section_idx];
+ shdr.check_size(bytes.len())?;
+ Strtab::parse(bytes, shdr.sh_offset as usize, shdr.sh_size as usize, 0x0)
+ }
+ };
+
+ let strtab_idx = header.e_shstrndx as usize;
+ let shdr_strtab = get_strtab(&section_headers, strtab_idx)?;
+
+ let mut syms = Symtab::default();
+ let mut strtab = Strtab::default();
+ if let Some(shdr) = section_headers.iter().rfind(|shdr| shdr.sh_type as u32 == section_header::SHT_SYMTAB) {
+ let size = shdr.sh_entsize;
+ let count = if size == 0 { 0 } else { shdr.sh_size / size };
+ syms = Symtab::parse(bytes, shdr.sh_offset as usize, count as usize, ctx)?;
+ strtab = get_strtab(&section_headers, shdr.sh_link as usize)?;
+ }
+
+ let mut is_pie = false;
+ let mut soname = None;
+ let mut libraries = vec![];
+ let mut rpaths = vec![];
+ let mut runpaths = vec![];
+ let mut dynsyms = Symtab::default();
+ let mut dynrelas = RelocSection::default();
+ let mut dynrels = RelocSection::default();
+ let mut pltrelocs = RelocSection::default();
+ let mut dynstrtab = Strtab::default();
+ let dynamic = Dynamic::parse(bytes, &program_headers, ctx)?;
+ if let Some(ref dynamic) = dynamic {
+ let dyn_info = &dynamic.info;
+
+ is_pie = dyn_info.flags_1 & dynamic::DF_1_PIE != 0;
+ dynstrtab = Strtab::parse(bytes,
+ dyn_info.strtab,
+ dyn_info.strsz,
+ 0x0)?;
+
+ if dyn_info.soname != 0 {
+ // FIXME: warn! here
+ soname = dynstrtab.get_at(dyn_info.soname);
+ }
+ if dyn_info.needed_count > 0 {
+ libraries = dynamic.get_libraries(&dynstrtab);
+ }
+ for dyn_ in &dynamic.dyns {
+ if dyn_.d_tag == dynamic::DT_RPATH {
+ if let Some(path) = dynstrtab.get_at(dyn_.d_val as usize) {
+ rpaths.push(path);
+ }
+ } else if dyn_.d_tag == dynamic::DT_RUNPATH {
+ if let Some(path) = dynstrtab.get_at(dyn_.d_val as usize) {
+ runpaths.push(path);
+ }
+ }
+ }
+ // parse the dynamic relocations
+ dynrelas = RelocSection::parse(bytes, dyn_info.rela, dyn_info.relasz, true, ctx)?;
+ dynrels = RelocSection::parse(bytes, dyn_info.rel, dyn_info.relsz, false, ctx)?;
+ let is_rela = dyn_info.pltrel as u64 == dynamic::DT_RELA;
+ pltrelocs = RelocSection::parse(bytes, dyn_info.jmprel, dyn_info.pltrelsz, is_rela, ctx)?;
+
+ let mut num_syms = if let Some(gnu_hash) = dyn_info.gnu_hash {
+ gnu_hash_len(bytes, gnu_hash as usize, ctx)?
+ } else if let Some(hash) = dyn_info.hash {
+ hash_len(bytes, hash as usize, header.e_machine, ctx)?
+ } else {
+ 0
+ };
+ let max_reloc_sym = dynrelas.iter()
+ .chain(dynrels.iter())
+ .chain(pltrelocs.iter())
+ .fold(0, |num, reloc| cmp::max(num, reloc.r_sym));
+ if max_reloc_sym != 0 {
+ num_syms = cmp::max(num_syms, max_reloc_sym + 1);
+ }
+ dynsyms = Symtab::parse(bytes, dyn_info.symtab, num_syms, ctx)?;
+ }
+
+ let mut shdr_relocs = vec![];
+ for (idx, section) in section_headers.iter().enumerate() {
+ let is_rela = section.sh_type == section_header::SHT_RELA;
+ if is_rela || section.sh_type == section_header::SHT_REL {
+ section.check_size(bytes.len())?;
+ let sh_relocs = RelocSection::parse(bytes, section.sh_offset as usize, section.sh_size as usize, is_rela, ctx)?;
+ shdr_relocs.push((idx, sh_relocs));
+ }
+ }
+
+ let versym = symver::VersymSection::parse(bytes, &section_headers, ctx)?;
+ let verdef = symver::VerdefSection::parse(bytes, &section_headers, ctx)?;
+ let verneed = symver::VerneedSection::parse(bytes, &section_headers, ctx)?;
+
+ let is_lib = misc.is_lib && !is_pie;
+
+ Ok(Elf {
+ header,
+ program_headers,
+ section_headers,
+ shdr_strtab,
+ dynamic,
+ dynsyms,
+ dynstrtab,
+ syms,
+ strtab,
+ dynrelas,
+ dynrels,
+ pltrelocs,
+ shdr_relocs,
+ soname,
+ interpreter,
+ libraries,
+ rpaths,
+ runpaths,
+ is_64: misc.is_64,
+ is_lib,
+ entry: misc.entry,
+ little_endian: misc.little_endian,
+ ctx: ctx,
+ versym,
+ verdef,
+ verneed,
+ })
+ }
+ }
+
+ impl<'a> ctx::TryFromCtx<'a, (usize, Endian)> for Elf<'a> {
+ type Error = crate::error::Error;
+ fn try_from_ctx(src: &'a [u8], (_, _): (usize, Endian)) -> Result<(Elf<'a>, usize), Self::Error> {
+ let elf = Elf::parse(src)?;
+ Ok((elf, src.len()))
+ }
+ }
+
+ fn gnu_hash_len(bytes: &[u8], offset: usize, ctx: Ctx) -> error::Result<usize> {
+ let buckets_num = bytes.pread_with::<u32>(offset, ctx.le)? as usize;
+ let min_chain = bytes.pread_with::<u32>(offset + 4, ctx.le)? as usize;
+ let bloom_size = bytes.pread_with::<u32>(offset + 8, ctx.le)? as usize;
+ // We could handle min_chain==0 if we really had to, but it shouldn't happen.
+ if buckets_num == 0 || min_chain == 0 || bloom_size == 0 {
+ return Err(error::Error::Malformed(format!("Invalid DT_GNU_HASH: buckets_num={} min_chain={} bloom_size={}",
+ buckets_num, min_chain, bloom_size)));
+ }
+ // Find the last bucket.
+ let buckets_offset = offset + 16 + bloom_size * if ctx.container.is_big() { 8 } else { 4 };
+ let mut max_chain = 0;
+ for bucket in 0..buckets_num {
+ let chain = bytes.pread_with::<u32>(buckets_offset + bucket * 4, ctx.le)? as usize;
+ if max_chain < chain {
+ max_chain = chain;
+ }
+ }
+ if max_chain < min_chain {
+ return Ok(0);
+ }
+ // Find the last chain within the bucket.
+ let mut chain_offset = buckets_offset + buckets_num * 4 + (max_chain - min_chain) * 4;
+ loop {
+ let hash = bytes.pread_with::<u32>(chain_offset, ctx.le)?;
+ max_chain += 1;
+ chain_offset += 4;
+ if hash & 1 != 0 {
+ return Ok(max_chain);
+ }
+ }
+ }
+
+ fn hash_len(bytes: &[u8], offset: usize, machine: u16, ctx: Ctx) -> error::Result<usize> {
+ // Based on readelf code.
+ let nchain = if (machine == header::EM_FAKE_ALPHA || machine == header::EM_S390) && ctx.container.is_big() {
+ bytes.pread_with::<u64>(offset.saturating_add(4), ctx.le)? as usize
+ } else {
+ bytes.pread_with::<u32>(offset.saturating_add(4), ctx.le)? as usize
+ };
+ Ok(nchain)
+ }
+
+ struct Misc {
+ is_64: bool,
+ is_lib: bool,
+ entry: u64,
+ little_endian: bool,
+ ctx: Ctx,
+ }
+
+ fn parse_misc(header: &Header) -> error::Result<Misc> {
+ let entry = header.e_entry as usize;
+ let is_lib = header.e_type == header::ET_DYN;
+ let is_lsb = header.e_ident[header::EI_DATA] == header::ELFDATA2LSB;
+ let endianness = scroll::Endian::from(is_lsb);
+ let class = header.e_ident[header::EI_CLASS];
+ if class != header::ELFCLASS64 && class != header::ELFCLASS32 {
+ return Err(error::Error::Malformed(format!("Unknown values in ELF ident header: class: {} endianness: {}",
+ class,
+ header.e_ident[header::EI_DATA])));
+ }
+ let is_64 = class == header::ELFCLASS64;
+ let container = if is_64 { Container::Big } else { Container::Little };
+ let ctx = Ctx::new(container, endianness);
+
+ Ok(Misc{
+ is_64,
+ is_lib,
+ entry: entry as u64,
+ little_endian:is_lsb,
+ ctx,
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn parse_crt1_64bit() {
+ let crt1: Vec<u8> = include!("../../etc/crt1.rs");
+ match Elf::parse(&crt1) {
+ Ok(binary) => {
+ assert!(binary.is_64);
+ assert!(!binary.is_lib);
+ assert_eq!(binary.entry, 0);
+ assert!(binary.syms.get(1000).is_none());
+ assert!(binary.syms.get(5).is_some());
+ let syms = binary.syms.to_vec();
+ assert!(!binary.section_headers.is_empty());
+ for (i, sym) in syms.iter().enumerate() {
+ if i == 11 {
+ let symtab = binary.strtab;
+ println!("sym: {:?}", &sym);
+ assert_eq!(&symtab[sym.st_name], "_start");
+ break;
+ }
+ }
+ assert!(!syms.is_empty());
+ }
+ Err(err) => {
+ panic!("failed: {}", err);
+ }
+ }
+ }
+
+ #[test]
+ fn parse_crt1_32bit() {
+ let crt1: Vec<u8> = include!("../../etc/crt132.rs");
+ match Elf::parse(&crt1) {
+ Ok(binary) => {
+ assert!(!binary.is_64);
+ assert!(!binary.is_lib);
+ assert_eq!(binary.entry, 0);
+ assert!(binary.syms.get(1000).is_none());
+ assert!(binary.syms.get(5).is_some());
+ let syms = binary.syms.to_vec();
+ assert!(!binary.section_headers.is_empty());
+ for (i, sym) in syms.iter().enumerate() {
+ if i == 11 {
+ let symtab = binary.strtab;
+ println!("sym: {:?}", &sym);
+ assert_eq!(&symtab[sym.st_name], "__libc_csu_fini");
+ break;
+ }
+ }
+ assert!(!syms.is_empty());
+ }
+ Err(err) => {
+ panic!("failed: {}", err);
+ }
+ }
+ }
+
+ // See https://github.com/m4b/goblin/issues/257
+ #[test]
+ #[allow(unused)]
+ fn no_use_statement_conflict() {
+ use crate::elf::section_header::*;
+ use crate::elf::*;
+
+ fn f(_: SectionHeader) {}
+ }
+}
diff --git a/third_party/rust/goblin/src/elf/note.rs b/third_party/rust/goblin/src/elf/note.rs
new file mode 100644
index 0000000000..2e2864d25f
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/note.rs
@@ -0,0 +1,319 @@
+// Defined note types for GNU systems.
+
+#[cfg(feature = "log")]
+use log::debug;
+#[cfg(feature = "alloc")]
+use scroll::{IOread, IOwrite, Pread, Pwrite, SizeWith};
+
+/// ABI information.
+///
+/// The descriptor consists of words:
+/// * word 0: OS descriptor
+/// * word 1: major version of the ABI
+/// * word 2: minor version of the ABI
+/// * word 3: subminor version of the ABI
+pub const NT_GNU_ABI_TAG: u32 = 1;
+
+/// Old name
+pub const ELF_NOTE_ABI: u32 = NT_GNU_ABI_TAG;
+// Known OSes. These values can appear in word 0 of an
+// `NT_GNU_ABI_TAG` note section entry.
+pub const ELF_NOTE_OS_LINUX: u32 = 0;
+pub const ELF_NOTE_OS_GNU: u32 = 1;
+pub const ELF_NOTE_OS_SOLARIS2: u32 = 2;
+pub const ELF_NOTE_OS_FREEBSD: u32 = 3;
+
+/// Synthetic `hwcap` information.
+///
+/// The descriptor begins with two words:
+/// * word 0: number of entries
+/// * word 1: bitmask of enabled entries
+///
+/// Then follow variable-length entries, one byte followed by a '\0'-terminated
+/// `hwcap` name string. The byte gives the bit number to test if enabled,
+/// `(1U << bit) & bitmask`.
+pub const NT_GNU_HWCAP: u32 = 2;
+
+/// Build ID bits as generated by ld --build-id.
+///
+/// The descriptor consists of any nonzero number of bytes.
+pub const NT_GNU_BUILD_ID: u32 = 3;
+
+/// Version note generated by GNU gold containing a version string.
+pub const NT_GNU_GOLD_VERSION: u32 = 4;
+
+/// Program property note
+pub const NT_GNU_PROPERTY_TYPE_0: u32 = 5;
+
+///Contains copy of prstatus struct.
+pub const NT_PRSTATUS: u32 = 1;
+
+///Contains copy of prpsinfo struct.
+pub const NT_PRPSINFO: u32 = 3;
+
+///Fields of siginfo_t.
+pub const NT_SIGINFO: u32 = 0x5349_4749;
+
+///Description of mapped files.
+pub const NT_FILE: u32 = 0x4649_4c45;
+
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "alloc", derive(Pread, Pwrite, IOread, IOwrite, SizeWith))]
+#[repr(C)]
+/// Note section contents. Each entry in the note section begins with a header
+/// of a fixed form.
+pub struct Nhdr32 {
+ /// Length of the note's name (includes the terminator)
+ pub n_namesz: u32,
+ /// Length of the note's descriptor
+ pub n_descsz: u32,
+ /// Type of the note
+ pub n_type: u32,
+}
+
+// Declare that this is a plain type.
+unsafe impl plain::Plain for Nhdr32 {}
+
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "alloc", derive(Pread, Pwrite, IOread, IOwrite, SizeWith))]
+#[repr(C)]
+/// Note section contents. Each entry in the note section begins with a header
+/// of a fixed form.
+pub struct Nhdr64 {
+ /// Length of the note's name (includes the terminator)
+ pub n_namesz: u64,
+ /// Length of the note's descriptor.
+ pub n_descsz: u64,
+ /// Type of the note.
+ pub n_type: u64,
+}
+
+// Declare that this is a plain type.
+unsafe impl plain::Plain for Nhdr64 {}
+
+if_alloc! {
+ use crate::error;
+ use crate::container;
+ use scroll::ctx;
+ use alloc::vec::Vec;
+
+ /// An iterator over ELF binary notes in a note section or segment
+ pub struct NoteDataIterator<'a> {
+ pub data: &'a [u8],
+ pub size: usize,
+ pub offset: usize,
+ pub ctx: (usize, container::Ctx), // (alignment, ctx)
+ }
+
+ impl<'a> Iterator for NoteDataIterator<'a> {
+ type Item = error::Result<Note<'a>>;
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.offset >= self.size {
+ None
+ } else {
+ debug!("NoteIterator - {:#x}", self.offset);
+ match self.data.gread_with(&mut self.offset, self.ctx) {
+ Ok(res) => Some(Ok(res)),
+ Err(e) => Some(Err(e))
+ }
+ }
+ }
+ }
+
+ /// An iterator over ELF binary notes
+ pub struct NoteIterator<'a> {
+ pub iters: Vec<NoteDataIterator<'a>>,
+ pub index: usize,
+ }
+
+ impl<'a> Iterator for NoteIterator<'a> {
+ type Item = error::Result<Note<'a>>;
+ fn next(&mut self) -> Option<Self::Item> {
+ while self.index < self.iters.len() {
+ if let Some(note_result) = self.iters[self.index].next() {
+ return Some(note_result);
+ }
+
+ self.index += 1;
+ }
+
+ None
+ }
+ }
+
+ #[derive(Debug)]
+ struct NoteHeader {
+ n_namesz: usize,
+ n_descsz: usize,
+ n_type: u32,
+ }
+
+ impl From<Nhdr32> for NoteHeader {
+ fn from(header: Nhdr32) -> Self {
+ NoteHeader {
+ n_namesz: header.n_namesz as usize,
+ n_descsz: header.n_descsz as usize,
+ n_type: header.n_type,
+ }
+ }
+ }
+
+ impl From<Nhdr64> for NoteHeader {
+ fn from(header: Nhdr64) -> Self {
+ NoteHeader {
+ n_namesz: header.n_namesz as usize,
+ n_descsz: header.n_descsz as usize,
+ n_type: header.n_type as u32,
+ }
+ }
+ }
+
+ fn align(alignment: usize, offset: &mut usize) {
+ let diff = *offset % alignment;
+ if diff != 0 {
+ *offset += alignment - diff;
+ }
+ }
+
+ /// A 32/64 bit Note struct, with the name and desc pre-parsed
+ #[derive(Debug)]
+ pub struct Note<'a> {
+ /// The type of this note
+ pub n_type: u32,
+ /// NUL terminated string, where `namesz` includes the terminator
+ pub name: &'a str, // needs padding such that namesz + padding % {wordsize} == 0
+ /// arbitrary data of length `descsz`
+ pub desc: &'a [u8], // needs padding such that descsz + padding % {wordsize} == 0
+ }
+
+ impl<'a> Note<'a> {
+ pub fn type_to_str(&self) -> &'static str {
+ match self.n_type {
+ NT_GNU_ABI_TAG => "NT_GNU_ABI_TAG",
+ NT_GNU_HWCAP => "NT_GNU_HWCAP",
+ NT_GNU_BUILD_ID => "NT_GNU_BUILD_ID",
+ NT_GNU_GOLD_VERSION => "NT_GNU_GOLD_VERSION",
+ NT_GNU_PROPERTY_TYPE_0 => "NT_GNU_PROPERTY_0",
+ _ => "NT_UNKNOWN"
+ }
+ }
+ }
+
+ impl<'a> ctx::TryFromCtx<'a, (usize, container::Ctx)> for Note<'a> {
+ type Error = error::Error;
+ fn try_from_ctx(bytes: &'a [u8], (alignment, ctx): (usize, container::Ctx)) -> Result<(Self, usize), Self::Error> {
+ let offset = &mut 0;
+ let mut alignment = alignment;
+ if alignment < 4 {
+ alignment = 4;
+ }
+ let header: NoteHeader = {
+ match alignment {
+ 4|8 => bytes.gread_with::<Nhdr32>(offset, ctx.le)?.into(),
+ _ => return Err(error::Error::Malformed(format!("Notes has unimplemented alignment requirement: {:#x}", alignment)))
+ }
+ };
+ debug!("{:?} - {:#x}", header, *offset);
+ // -1 because includes \0 terminator
+ let name = bytes.gread_with::<&'a str>(offset, ctx::StrCtx::Length(header.n_namesz.saturating_sub(1)))?;
+ if header.n_namesz > 0 {
+ *offset += 1;
+ }
+ align(alignment, offset);
+ debug!("note name {} - {:#x}", name, *offset);
+ let desc = bytes.gread_with::<&'a [u8]>(offset, header.n_descsz)?;
+ align(alignment, offset);
+ debug!("desc {:?} - {:#x}", desc, *offset);
+ Ok((Note {
+ name,
+ desc,
+ n_type: header.n_type,
+ }, *offset))
+ }
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use super::*;
+
+ static NOTE_DATA: [u8; 132] = [0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00,
+ 0xbc, 0xfc, 0x66, 0xcd, 0xc7, 0xd5, 0x14, 0x7b,
+ 0x53, 0xb1, 0x10, 0x11, 0x94, 0x86, 0x8e, 0xf9,
+ 0x4f, 0xe8, 0xdd, 0xdb, 0x04, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x47, 0x4E, 0x55, 0x00, 0x02, 0x80, 0x00, 0xC0,
+ 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0xC0,
+ 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0xC0,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00];
+
+ static CONTEXT: (usize, container::Ctx) = (4, container::Ctx {
+ container: container::Container::Big,
+ le: ::scroll::Endian::Little,
+ });
+
+ fn make_note_iter(start: usize, end: usize) -> NoteDataIterator<'static> {
+ NoteDataIterator {
+ data: &NOTE_DATA,
+ size: end,
+ offset: start,
+ ctx: CONTEXT,
+ }
+ }
+
+ #[test]
+ fn iter_single_section() {
+ let mut notes = NoteIterator {
+ iters: vec![make_note_iter(0, 132)],
+ index: 0,
+ };
+
+ assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_ABI_TAG);
+ assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_BUILD_ID);
+ assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_PROPERTY_TYPE_0);
+ assert!(notes.next().is_none());
+ }
+
+ #[test]
+ fn iter_multiple_sections() {
+ let mut notes = NoteIterator {
+ iters: vec![make_note_iter(0, 32), make_note_iter(32, 68), make_note_iter(68, 132)],
+ index: 0,
+ };
+
+ assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_ABI_TAG);
+ assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_BUILD_ID);
+ assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_PROPERTY_TYPE_0);
+ assert!(notes.next().is_none());
+ }
+
+ #[test]
+ fn skip_empty_sections() {
+ let mut notes = NoteIterator {
+ iters: vec![
+ make_note_iter(0, 32),
+ make_note_iter(0, 0),
+ make_note_iter(32, 68),
+ ],
+ index: 0,
+ };
+
+ assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_ABI_TAG);
+ assert_eq!(notes.next().unwrap().unwrap().n_type, NT_GNU_BUILD_ID);
+ assert!(notes.next().is_none());
+ }
+
+ #[test]
+ fn ignore_no_sections() {
+ let mut notes = NoteIterator { iters: vec![], index: 0 };
+ assert!(notes.next().is_none());
+ }
+ }
+}
diff --git a/third_party/rust/goblin/src/elf/program_header.rs b/third_party/rust/goblin/src/elf/program_header.rs
new file mode 100644
index 0000000000..80d499ca68
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/program_header.rs
@@ -0,0 +1,430 @@
+/* Legal values for p_type (segment type). */
+
+/// Programg header table entry unused
+pub const PT_NULL: u32 = 0;
+/// Loadable program segment
+pub const PT_LOAD: u32 = 1;
+/// Dynamic linking information
+pub const PT_DYNAMIC: u32 = 2;
+/// Program interpreter
+pub const PT_INTERP: u32 = 3;
+/// Auxiliary information
+pub const PT_NOTE: u32 = 4;
+/// Reserved
+pub const PT_SHLIB: u32 = 5;
+/// Entry for header table itself
+pub const PT_PHDR: u32 = 6;
+/// Thread-local storage segment
+pub const PT_TLS: u32 = 7;
+/// Number of defined types
+pub const PT_NUM: u32 = 8;
+/// Start of OS-specific
+pub const PT_LOOS: u32 = 0x6000_0000;
+/// GCC .eh_frame_hdr segment
+pub const PT_GNU_EH_FRAME: u32 = 0x6474_e550;
+/// GNU property notes for linker and run-time loaders
+pub const PT_GNU_PROPERTY: u32 = 0x6474_e553;
+/// Indicates stack executability
+pub const PT_GNU_STACK: u32 = 0x6474_e551;
+/// Read-only after relocation
+pub const PT_GNU_RELRO: u32 = 0x6474_e552;
+/// Sun Specific segment
+pub const PT_LOSUNW: u32 = 0x6fff_fffa;
+/// Sun Specific segment
+pub const PT_SUNWBSS: u32 = 0x6fff_fffa;
+/// Stack segment
+pub const PT_SUNWSTACK: u32 = 0x6fff_fffb;
+/// End of OS-specific
+pub const PT_HISUNW: u32 = 0x6fff_ffff;
+/// End of OS-specific
+pub const PT_HIOS: u32 = 0x6fff_ffff;
+/// Start of processor-specific
+pub const PT_LOPROC: u32 = 0x7000_0000;
+/// ARM unwind segment
+pub const PT_ARM_EXIDX: u32 = 0x7000_0001;
+/// End of processor-specific
+pub const PT_HIPROC: u32 = 0x7fff_ffff;
+
+/* Legal values for p_flags (segment flags). */
+
+/// Segment is executable
+pub const PF_X: u32 = 1;
+/// Segment is writable
+pub const PF_W: u32 = 1 << 1;
+/// Segment is readable
+pub const PF_R: u32 = 1 << 2;
+/// Bits reserved for OS-specific usage
+pub const PF_MASKOS: u32 = 0x0ff0_0000;
+/// Bits reserved for processor-specific usage
+pub const PF_MASKPROC: u32 = 0xf000_0000;
+
+pub fn pt_to_str(pt: u32) -> &'static str {
+ match pt {
+ PT_NULL => "PT_NULL",
+ PT_LOAD => "PT_LOAD",
+ PT_DYNAMIC => "PT_DYNAMIC",
+ PT_INTERP => "PT_INTERP",
+ PT_NOTE => "PT_NOTE",
+ PT_SHLIB => "PT_SHLIB",
+ PT_PHDR => "PT_PHDR",
+ PT_TLS => "PT_TLS",
+ PT_NUM => "PT_NUM",
+ PT_LOOS => "PT_LOOS",
+ PT_GNU_EH_FRAME => "PT_GNU_EH_FRAME",
+ PT_GNU_PROPERTY => "PT_GNU_PROPERTY",
+ PT_GNU_STACK => "PT_GNU_STACK",
+ PT_GNU_RELRO => "PT_GNU_RELRO",
+ PT_SUNWBSS => "PT_SUNWBSS",
+ PT_SUNWSTACK => "PT_SUNWSTACK",
+ PT_HIOS => "PT_HIOS",
+ PT_LOPROC => "PT_LOPROC",
+ PT_HIPROC => "PT_HIPROC",
+ PT_ARM_EXIDX => "PT_ARM_EXIDX",
+ _ => "UNKNOWN_PT",
+ }
+}
+
+if_alloc! {
+ use core::fmt;
+ use scroll::ctx;
+ use core::result;
+ use core::ops::Range;
+ use crate::container::{Ctx, Container};
+ use alloc::vec::Vec;
+
+ #[derive(Default, PartialEq, Clone)]
+ /// A unified ProgramHeader - convertable to and from 32-bit and 64-bit variants
+ pub struct ProgramHeader {
+ pub p_type : u32,
+ pub p_flags : u32,
+ pub p_offset: u64,
+ pub p_vaddr : u64,
+ pub p_paddr : u64,
+ pub p_filesz: u64,
+ pub p_memsz : u64,
+ pub p_align : u64,
+ }
+
+ impl ProgramHeader {
+ /// Return the size of the underlying program header, given a `Ctx`
+ #[inline]
+ pub fn size(ctx: Ctx) -> usize {
+ use scroll::ctx::SizeWith;
+ Self::size_with(&ctx)
+ }
+ /// Create a new `PT_LOAD` ELF program header
+ pub fn new() -> Self {
+ ProgramHeader {
+ p_type : PT_LOAD,
+ p_flags : 0,
+ p_offset: 0,
+ p_vaddr : 0,
+ p_paddr : 0,
+ p_filesz: 0,
+ p_memsz : 0,
+ //TODO: check if this is true for 32-bit pt_load
+ p_align : 2 << 20,
+ }
+ }
+ /// Returns this program header's file offset range
+ pub fn file_range(&self) -> Range<usize> {
+ self.p_offset as usize..self.p_offset.saturating_add(self.p_filesz) as usize
+ }
+ /// Returns this program header's virtual memory range
+ pub fn vm_range(&self) -> Range<usize> {
+ self.p_vaddr as usize..self.p_vaddr.saturating_add(self.p_memsz) as usize
+ }
+ /// Sets the executable flag
+ pub fn executable(&mut self) {
+ self.p_flags |= PF_X;
+ }
+ /// Sets the write flag
+ pub fn write(&mut self) {
+ self.p_flags |= PF_W;
+ }
+ /// Sets the read flag
+ pub fn read(&mut self) {
+ self.p_flags |= PF_R;
+ }
+ /// Whether this program header is executable
+ pub fn is_executable(&self) -> bool {
+ self.p_flags & PF_X != 0
+ }
+ /// Whether this program header is readable
+ pub fn is_read(&self) -> bool {
+ self.p_flags & PF_R != 0
+ }
+ /// Whether this program header is writable
+ pub fn is_write(&self) -> bool {
+ self.p_flags & PF_W != 0
+ }
+ #[cfg(feature = "endian_fd")]
+ pub fn parse(bytes: &[u8], mut offset: usize, count: usize, ctx: Ctx) -> crate::error::Result<Vec<ProgramHeader>> {
+ use scroll::Pread;
+ // Sanity check to avoid OOM
+ if count > bytes.len() / Self::size(ctx) {
+ return Err(crate::error::Error::BufferTooShort(count, "program headers"));
+ }
+ let mut program_headers = Vec::with_capacity(count);
+ for _ in 0..count {
+ let phdr = bytes.gread_with(&mut offset, ctx)?;
+ program_headers.push(phdr);
+ }
+ Ok(program_headers)
+ }
+ }
+
+ impl fmt::Debug for ProgramHeader {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("ProgramHeader")
+ .field("p_type", &pt_to_str(self.p_type))
+ .field("p_flags", &format_args!("0x{:x}", self.p_flags))
+ .field("p_offset", &format_args!("0x{:x}", self.p_offset))
+ .field("p_vaddr", &format_args!("0x{:x}", self.p_vaddr))
+ .field("p_paddr", &format_args!("0x{:x}", self.p_paddr))
+ .field("p_filesz", &format_args!("0x{:x}", self.p_filesz))
+ .field("p_memsz", &format_args!("0x{:x}", self.p_memsz))
+ .field("p_align", &self.p_align)
+ .finish()
+ }
+ }
+
+ impl ctx::SizeWith<Ctx> for ProgramHeader {
+ fn size_with(ctx: &Ctx) -> usize {
+ match ctx.container {
+ Container::Little => {
+ program_header32::SIZEOF_PHDR
+ },
+ Container::Big => {
+ program_header64::SIZEOF_PHDR
+ },
+ }
+ }
+ }
+
+ impl<'a> ctx::TryFromCtx<'a, Ctx> for ProgramHeader {
+ type Error = crate::error::Error;
+ fn try_from_ctx(bytes: &'a [u8], Ctx { container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> {
+ use scroll::Pread;
+ let res = match container {
+ Container::Little => {
+ (bytes.pread_with::<program_header32::ProgramHeader>(0, le)?.into(), program_header32::SIZEOF_PHDR)
+ },
+ Container::Big => {
+ (bytes.pread_with::<program_header64::ProgramHeader>(0, le)?.into(), program_header64::SIZEOF_PHDR)
+ }
+ };
+ Ok(res)
+ }
+ }
+
+ impl ctx::TryIntoCtx<Ctx> for ProgramHeader {
+ type Error = crate::error::Error;
+ fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result<usize, Self::Error> {
+ use scroll::Pwrite;
+ match container {
+ Container::Little => {
+ let phdr: program_header32::ProgramHeader = self.into();
+ Ok(bytes.pwrite_with(phdr, 0, le)?)
+ },
+ Container::Big => {
+ let phdr: program_header64::ProgramHeader = self.into();
+ Ok(bytes.pwrite_with(phdr, 0, le)?)
+ }
+ }
+ }
+ }
+} // end if_alloc
+
+macro_rules! elf_program_header_std_impl {
+ ($size:ty) => {
+ #[cfg(test)]
+ mod tests {
+ use super::*;
+ #[test]
+ fn size_of() {
+ assert_eq!(::std::mem::size_of::<ProgramHeader>(), SIZEOF_PHDR);
+ }
+ }
+
+ if_alloc! {
+
+
+ use crate::elf::program_header::ProgramHeader as ElfProgramHeader;
+ #[cfg(any(feature = "std", feature = "endian_fd"))]
+ use crate::error::Result;
+
+ use plain::Plain;
+
+ if_std! {
+ use std::fs::File;
+ use std::io::{Seek, Read};
+ use std::io::SeekFrom::Start;
+ }
+
+ impl From<ProgramHeader> for ElfProgramHeader {
+ fn from(ph: ProgramHeader) -> Self {
+ ElfProgramHeader {
+ p_type : ph.p_type,
+ p_flags : ph.p_flags,
+ p_offset : u64::from(ph.p_offset),
+ p_vaddr : u64::from(ph.p_vaddr),
+ p_paddr : u64::from(ph.p_paddr),
+ p_filesz : u64::from(ph.p_filesz),
+ p_memsz : u64::from(ph.p_memsz),
+ p_align : u64::from(ph.p_align),
+ }
+ }
+ }
+
+ impl From<ElfProgramHeader> for ProgramHeader {
+ fn from(ph: ElfProgramHeader) -> Self {
+ ProgramHeader {
+ p_type : ph.p_type,
+ p_flags : ph.p_flags,
+ p_offset : ph.p_offset as $size,
+ p_vaddr : ph.p_vaddr as $size,
+ p_paddr : ph.p_paddr as $size,
+ p_filesz : ph.p_filesz as $size,
+ p_memsz : ph.p_memsz as $size,
+ p_align : ph.p_align as $size,
+ }
+ }
+ }
+ } // end if_alloc
+
+ use core::fmt;
+ use core::slice;
+
+ impl fmt::Debug for ProgramHeader {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("ProgramHeader")
+ .field("p_type", &pt_to_str(self.p_type))
+ .field("p_flags", &format_args!("0x{:x}", self.p_flags))
+ .field("p_offset", &format_args!("0x{:x}", self.p_offset))
+ .field("p_vaddr", &format_args!("0x{:x}", self.p_vaddr))
+ .field("p_paddr", &format_args!("0x{:x}", self.p_paddr))
+ .field("p_filesz", &format_args!("0x{:x}", self.p_filesz))
+ .field("p_memsz", &format_args!("0x{:x}", self.p_memsz))
+ .field("p_align", &self.p_align)
+ .finish()
+ }
+ }
+
+ impl ProgramHeader {
+ #[cfg(feature = "endian_fd")]
+ pub fn parse(
+ bytes: &[u8],
+ mut offset: usize,
+ count: usize,
+ ctx: ::scroll::Endian,
+ ) -> Result<Vec<ProgramHeader>> {
+ use scroll::Pread;
+ let mut program_headers = vec![ProgramHeader::default(); count];
+ let offset = &mut offset;
+ bytes.gread_inout_with(offset, &mut program_headers, ctx)?;
+ Ok(program_headers)
+ }
+
+ #[cfg(feature = "alloc")]
+ pub fn from_bytes(bytes: &[u8], phnum: usize) -> Vec<ProgramHeader> {
+ let mut phdrs = vec![ProgramHeader::default(); phnum];
+ phdrs
+ .copy_from_bytes(bytes)
+ .expect("buffer is too short for given number of entries");
+ phdrs
+ }
+
+ /// # Safety
+ ///
+ /// This function creates a `ProgramHeader` directly from a raw pointer
+ pub unsafe fn from_raw_parts<'a>(
+ phdrp: *const ProgramHeader,
+ phnum: usize,
+ ) -> &'a [ProgramHeader] {
+ slice::from_raw_parts(phdrp, phnum)
+ }
+
+ #[cfg(feature = "std")]
+ pub fn from_fd(fd: &mut File, offset: u64, count: usize) -> Result<Vec<ProgramHeader>> {
+ let mut phdrs = vec![ProgramHeader::default(); count];
+ fd.seek(Start(offset))?;
+ unsafe {
+ fd.read_exact(plain::as_mut_bytes(&mut *phdrs))?;
+ }
+ Ok(phdrs)
+ }
+ }
+ };
+}
+
+#[cfg(feature = "alloc")]
+use scroll::{Pread, Pwrite, SizeWith};
+
+pub mod program_header32 {
+ pub use crate::elf::program_header::*;
+
+ #[repr(C)]
+ #[derive(Copy, Clone, PartialEq, Default)]
+ #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))]
+ /// A 32-bit ProgramHeader typically specifies how to map executable and data segments into memory
+ pub struct ProgramHeader {
+ /// Segment type
+ pub p_type: u32,
+ /// Segment file offset
+ pub p_offset: u32,
+ /// Segment virtual address
+ pub p_vaddr: u32,
+ /// Segment physical address
+ pub p_paddr: u32,
+ /// Segment size in file
+ pub p_filesz: u32,
+ /// Segment size in memory
+ pub p_memsz: u32,
+ /// Segment flags
+ pub p_flags: u32,
+ /// Segment alignment
+ pub p_align: u32,
+ }
+
+ pub const SIZEOF_PHDR: usize = 32;
+
+ // Declare that this is a plain type.
+ unsafe impl plain::Plain for ProgramHeader {}
+
+ elf_program_header_std_impl!(u32);
+}
+
+pub mod program_header64 {
+ pub use crate::elf::program_header::*;
+
+ #[repr(C)]
+ #[derive(Copy, Clone, PartialEq, Default)]
+ #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))]
+ /// A 64-bit ProgramHeader typically specifies how to map executable and data segments into memory
+ pub struct ProgramHeader {
+ /// Segment type
+ pub p_type: u32,
+ /// Segment flags
+ pub p_flags: u32,
+ /// Segment file offset
+ pub p_offset: u64,
+ /// Segment virtual address
+ pub p_vaddr: u64,
+ /// Segment physical address
+ pub p_paddr: u64,
+ /// Segment size in file
+ pub p_filesz: u64,
+ /// Segment size in memory
+ pub p_memsz: u64,
+ /// Segment alignment
+ pub p_align: u64,
+ }
+
+ pub const SIZEOF_PHDR: usize = 56;
+
+ // Declare that this is a plain type.
+ unsafe impl plain::Plain for ProgramHeader {}
+
+ elf_program_header_std_impl!(u64);
+}
diff --git a/third_party/rust/goblin/src/elf/reloc.rs b/third_party/rust/goblin/src/elf/reloc.rs
new file mode 100644
index 0000000000..eeb3e1a2ac
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/reloc.rs
@@ -0,0 +1,522 @@
+//! # Relocation computations
+//!
+//! The following notation is used to describe relocation computations
+//! specific to x86_64 ELF.
+//!
+//! * A: The addend used to compute the value of the relocatable field.
+//! * B: The base address at which a shared object is loaded into memory
+//! during execution. Generally, a shared object file is built with a
+//! base virtual address of 0. However, the execution address of the
+//! shared object is different.
+//! * G: The offset into the global offset table at which the address of
+//! the relocation entry's symbol resides during execution.
+//! * GOT: The address of the global offset table.
+//! * L: The section offset or address of the procedure linkage table entry
+//! for a symbol.
+//! * P: The section offset or address of the storage unit being relocated,
+//! computed using r_offset.
+//! * S: The value of the symbol whose index resides in the relocation entry.
+//! * Z: The size of the symbol whose index resides in the relocation entry.
+//!
+//! Below are some common x86_64 relocation computations you might find useful:
+//!
+//! | Relocation | Value | Size | Formula |
+//! |:--------------------------|:------|:----------|:------------------|
+//! | `R_X86_64_NONE` | 0 | NONE | NONE |
+//! | `R_X86_64_64` | 1 | 64 | S + A |
+//! | `R_X86_64_PC32` | 2 | 32 | S + A - P |
+//! | `R_X86_64_GOT32` | 3 | 32 | G + A |
+//! | `R_X86_64_PLT32` | 4 | 32 | L + A - P |
+//! | `R_X86_64_COPY` | 5 | NONE | NONE |
+//! | `R_X86_64_GLOB_DAT` | 6 | 64 | S |
+//! | `R_X86_64_JUMP_SLOT` | 7 | 64 | S |
+//! | `R_X86_64_RELATIVE` | 8 | 64 | B + A |
+//! | `R_X86_64_GOTPCREL` | 9 | 32 | G + GOT + A - P |
+//! | `R_X86_64_32` | 10 | 32 | S + A |
+//! | `R_X86_64_32S` | 11 | 32 | S + A |
+//! | `R_X86_64_16` | 12 | 16 | S + A |
+//! | `R_X86_64_PC16` | 13 | 16 | S + A - P |
+//! | `R_X86_64_8` | 14 | 8 | S + A |
+//! | `R_X86_64_PC8` | 15 | 8 | S + A - P |
+//! | `R_X86_64_DTPMOD64` | 16 | 64 | |
+//! | `R_X86_64_DTPOFF64` | 17 | 64 | |
+//! | `R_X86_64_TPOFF64` | 18 | 64 | |
+//! | `R_X86_64_TLSGD` | 19 | 32 | |
+//! | `R_X86_64_TLSLD` | 20 | 32 | |
+//! | `R_X86_64_DTPOFF32` | 21 | 32 | |
+//! | `R_X86_64_GOTTPOFF` | 22 | 32 | |
+//! | `R_X86_64_TPOFF32` | 23 | 32 | |
+//! | `R_X86_64_PC64` | 24 | 64 | S + A - P |
+//! | `R_X86_64_GOTOFF64` | 25 | 64 | S + A - GOT |
+//! | `R_X86_64_GOTPC32` | 26 | 32 | GOT + A - P |
+//! | `R_X86_64_SIZE32` | 32 | 32 | Z + A |
+//! | `R_X86_64_SIZE64` | 33 | 64 | Z + A |
+//! | `R_X86_64_GOTPC32_TLSDESC` 34 | 32 | |
+//! | `R_X86_64_TLSDESC_CALL` | 35 | NONE | |
+//! | `R_X86_64_TLSDESC` | 36 | 64 × 2 | |
+//! | `R_X86_64_IRELATIVE` | 37 | 64 | indirect (B + A) |
+//!
+//! TLS information is at http://people.redhat.com/aoliva/writeups/TLS/RFC-TLSDESC-x86.txt
+//!
+//! `R_X86_64_IRELATIVE` is similar to `R_X86_64_RELATIVE` except that
+//! the value used in this relocation is the program address returned by the function,
+//! which takes no arguments, at the address of the result of the corresponding
+//! `R_X86_64_RELATIVE` relocation.
+//!
+//! Read more https://docs.oracle.com/cd/E23824_01/html/819-0690/chapter6-54839.html
+
+include!("constants_relocation.rs");
+
+macro_rules! elf_reloc {
+ ($size:ident, $isize:ty) => {
+ use core::fmt;
+ #[cfg(feature = "alloc")]
+ use scroll::{Pread, Pwrite, SizeWith};
+ #[repr(C)]
+ #[derive(Clone, Copy, PartialEq, Default)]
+ #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))]
+ /// Relocation with an explicit addend
+ pub struct Rela {
+ /// Address
+ pub r_offset: $size,
+ /// Relocation type and symbol index
+ pub r_info: $size,
+ /// Addend
+ pub r_addend: $isize,
+ }
+ #[repr(C)]
+ #[derive(Clone, PartialEq, Default)]
+ #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))]
+ /// Relocation without an addend
+ pub struct Rel {
+ /// address
+ pub r_offset: $size,
+ /// relocation type and symbol address
+ pub r_info: $size,
+ }
+ use plain;
+ unsafe impl plain::Plain for Rela {}
+ unsafe impl plain::Plain for Rel {}
+
+ impl fmt::Debug for Rela {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let sym = r_sym(self.r_info);
+ let typ = r_type(self.r_info);
+ f.debug_struct("Rela")
+ .field("r_offset", &format_args!("{:x}", self.r_offset))
+ .field("r_info", &format_args!("{:x}", self.r_info))
+ .field("r_addend", &format_args!("{:x}", self.r_addend))
+ .field("r_typ", &typ)
+ .field("r_sym", &sym)
+ .finish()
+ }
+ }
+ impl fmt::Debug for Rel {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let sym = r_sym(self.r_info);
+ let typ = r_type(self.r_info);
+ f.debug_struct("Rel")
+ .field("r_offset", &format_args!("{:x}", self.r_offset))
+ .field("r_info", &format_args!("{:x}", self.r_info))
+ .field("r_typ", &typ)
+ .field("r_sym", &sym)
+ .finish()
+ }
+ }
+ };
+}
+
+macro_rules! elf_rela_std_impl {
+ ($size:ident, $isize:ty) => {
+ if_alloc! {
+ use crate::elf::reloc::Reloc;
+
+ use core::slice;
+
+ if_std! {
+ use crate::error::Result;
+
+ use std::fs::File;
+ use std::io::{Read, Seek};
+ use std::io::SeekFrom::Start;
+ }
+
+ impl From<Rela> for Reloc {
+ fn from(rela: Rela) -> Self {
+ Reloc {
+ r_offset: u64::from(rela.r_offset),
+ r_addend: Some(i64::from(rela.r_addend)),
+ r_sym: r_sym(rela.r_info) as usize,
+ r_type: r_type(rela.r_info),
+ }
+ }
+ }
+
+ impl From<Rel> for Reloc {
+ fn from(rel: Rel) -> Self {
+ Reloc {
+ r_offset: u64::from(rel.r_offset),
+ r_addend: None,
+ r_sym: r_sym(rel.r_info) as usize,
+ r_type: r_type(rel.r_info),
+ }
+ }
+ }
+
+ impl From<Reloc> for Rela {
+ fn from(rela: Reloc) -> Self {
+ let r_info = r_info(rela.r_sym as $size, $size::from(rela.r_type));
+ Rela {
+ r_offset: rela.r_offset as $size,
+ r_info: r_info,
+ r_addend: rela.r_addend.unwrap_or(0) as $isize,
+ }
+ }
+ }
+
+ impl From<Reloc> for Rel {
+ fn from(rel: Reloc) -> Self {
+ let r_info = r_info(rel.r_sym as $size, $size::from(rel.r_type));
+ Rel {
+ r_offset: rel.r_offset as $size,
+ r_info: r_info,
+ }
+ }
+ }
+
+ /// Gets the rela entries given a rela pointer and the _size_ of the rela section in the binary,
+ /// in bytes.
+ /// Assumes the pointer is valid and can safely return a slice of memory pointing to the relas because:
+ /// 1. `ptr` points to memory received from the kernel (i.e., it loaded the executable), _or_
+ /// 2. The binary has already been mmapped (i.e., it's a `SharedObject`), and hence it's safe to return a slice of that memory.
+ /// 3. Or if you obtained the pointer in some other lawful manner
+ pub unsafe fn from_raw_rela<'a>(ptr: *const Rela, size: usize) -> &'a [Rela] {
+ slice::from_raw_parts(ptr, size / SIZEOF_RELA)
+ }
+
+ /// Gets the rel entries given a rel pointer and the _size_ of the rel section in the binary,
+ /// in bytes.
+ /// Assumes the pointer is valid and can safely return a slice of memory pointing to the rels because:
+ /// 1. `ptr` points to memory received from the kernel (i.e., it loaded the executable), _or_
+ /// 2. The binary has already been mmapped (i.e., it's a `SharedObject`), and hence it's safe to return a slice of that memory.
+ /// 3. Or if you obtained the pointer in some other lawful manner
+ pub unsafe fn from_raw_rel<'a>(ptr: *const Rel, size: usize) -> &'a [Rel] {
+ slice::from_raw_parts(ptr, size / SIZEOF_REL)
+ }
+
+ #[cfg(feature = "std")]
+ pub fn from_fd(fd: &mut File, offset: usize, size: usize) -> Result<Vec<Rela>> {
+ let count = size / SIZEOF_RELA;
+ let mut relocs = vec![Rela::default(); count];
+ fd.seek(Start(offset as u64))?;
+ unsafe {
+ fd.read_exact(plain::as_mut_bytes(&mut *relocs))?;
+ }
+ Ok(relocs)
+ }
+ } // end if_alloc
+ };
+}
+
+pub mod reloc32 {
+
+ pub use crate::elf::reloc::*;
+
+ elf_reloc!(u32, i32);
+
+ pub const SIZEOF_RELA: usize = 4 + 4 + 4;
+ pub const SIZEOF_REL: usize = 4 + 4;
+
+ #[inline(always)]
+ pub fn r_sym(info: u32) -> u32 {
+ info >> 8
+ }
+
+ #[inline(always)]
+ pub fn r_type(info: u32) -> u32 {
+ info & 0xff
+ }
+
+ #[inline(always)]
+ pub fn r_info(sym: u32, typ: u32) -> u32 {
+ (sym << 8) + (typ & 0xff)
+ }
+
+ elf_rela_std_impl!(u32, i32);
+}
+
+pub mod reloc64 {
+ pub use crate::elf::reloc::*;
+
+ elf_reloc!(u64, i64);
+
+ pub const SIZEOF_RELA: usize = 8 + 8 + 8;
+ pub const SIZEOF_REL: usize = 8 + 8;
+
+ #[inline(always)]
+ pub fn r_sym(info: u64) -> u32 {
+ (info >> 32) as u32
+ }
+
+ #[inline(always)]
+ pub fn r_type(info: u64) -> u32 {
+ (info & 0xffff_ffff) as u32
+ }
+
+ #[inline(always)]
+ pub fn r_info(sym: u64, typ: u64) -> u64 {
+ (sym << 32) + typ
+ }
+
+ elf_rela_std_impl!(u64, i64);
+}
+
+//////////////////////////////
+// Generic Reloc
+/////////////////////////////
+if_alloc! {
+ use scroll::{ctx, Pread};
+ use scroll::ctx::SizeWith;
+ use core::fmt;
+ use core::result;
+ use crate::container::{Ctx, Container};
+ use alloc::vec::Vec;
+
+ #[derive(Clone, Copy, PartialEq, Default)]
+ /// A unified ELF relocation structure
+ pub struct Reloc {
+ /// Address
+ pub r_offset: u64,
+ /// Addend
+ pub r_addend: Option<i64>,
+ /// The index into the corresponding symbol table - either dynamic or regular
+ pub r_sym: usize,
+ /// The relocation type
+ pub r_type: u32,
+ }
+
+ impl Reloc {
+ pub fn size(is_rela: bool, ctx: Ctx) -> usize {
+ use scroll::ctx::SizeWith;
+ Reloc::size_with(&(is_rela, ctx))
+ }
+ }
+
+ type RelocCtx = (bool, Ctx);
+
+ impl ctx::SizeWith<RelocCtx> for Reloc {
+ fn size_with( &(is_rela, Ctx { container, .. }): &RelocCtx) -> usize {
+ match container {
+ Container::Little => {
+ if is_rela { reloc32::SIZEOF_RELA } else { reloc32::SIZEOF_REL }
+ },
+ Container::Big => {
+ if is_rela { reloc64::SIZEOF_RELA } else { reloc64::SIZEOF_REL }
+ }
+ }
+ }
+ }
+
+ impl<'a> ctx::TryFromCtx<'a, RelocCtx> for Reloc {
+ type Error = crate::error::Error;
+ fn try_from_ctx(bytes: &'a [u8], (is_rela, Ctx { container, le }): RelocCtx) -> result::Result<(Self, usize), Self::Error> {
+ use scroll::Pread;
+ let reloc = match container {
+ Container::Little => {
+ if is_rela {
+ (bytes.pread_with::<reloc32::Rela>(0, le)?.into(), reloc32::SIZEOF_RELA)
+ } else {
+ (bytes.pread_with::<reloc32::Rel>(0, le)?.into(), reloc32::SIZEOF_REL)
+ }
+ },
+ Container::Big => {
+ if is_rela {
+ (bytes.pread_with::<reloc64::Rela>(0, le)?.into(), reloc64::SIZEOF_RELA)
+ } else {
+ (bytes.pread_with::<reloc64::Rel>(0, le)?.into(), reloc64::SIZEOF_REL)
+ }
+ }
+ };
+ Ok(reloc)
+ }
+ }
+
+ impl ctx::TryIntoCtx<RelocCtx> for Reloc {
+ type Error = crate::error::Error;
+ /// Writes the relocation into `bytes`
+ fn try_into_ctx(self, bytes: &mut [u8], (is_rela, Ctx {container, le}): RelocCtx) -> result::Result<usize, Self::Error> {
+ use scroll::Pwrite;
+ match container {
+ Container::Little => {
+ if is_rela {
+ let rela: reloc32::Rela = self.into();
+ Ok(bytes.pwrite_with(rela, 0, le)?)
+ } else {
+ let rel: reloc32::Rel = self.into();
+ Ok(bytes.pwrite_with(rel, 0, le)?)
+ }
+ },
+ Container::Big => {
+ if is_rela {
+ let rela: reloc64::Rela = self.into();
+ Ok(bytes.pwrite_with(rela, 0, le)?)
+ } else {
+ let rel: reloc64::Rel = self.into();
+ Ok(bytes.pwrite_with(rel, 0, le)?)
+ }
+ },
+ }
+ }
+ }
+
+ impl ctx::IntoCtx<(bool, Ctx)> for Reloc {
+ /// Writes the relocation into `bytes`
+ fn into_ctx(self, bytes: &mut [u8], ctx: RelocCtx) {
+ use scroll::Pwrite;
+ bytes.pwrite_with(self, 0, ctx).unwrap();
+ }
+ }
+
+ impl fmt::Debug for Reloc {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Reloc")
+ .field("r_offset", &format_args!("{:x}", self.r_offset))
+ .field("r_addend", &format_args!("{:x}", self.r_addend.unwrap_or(0)))
+ .field("r_sym", &self.r_sym)
+ .field("r_type", &self.r_type)
+ .finish()
+ }
+ }
+
+ #[derive(Default)]
+ /// An ELF section containing relocations, allowing lazy iteration over symbols.
+ pub struct RelocSection<'a> {
+ bytes: &'a [u8],
+ count: usize,
+ ctx: RelocCtx,
+ start: usize,
+ end: usize,
+ }
+
+ impl<'a> fmt::Debug for RelocSection<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let len = self.bytes.len();
+ fmt.debug_struct("RelocSection")
+ .field("bytes", &len)
+ .field("range", &format!("{:#x}..{:#x}", self.start, self.end))
+ .field("count", &self.count)
+ .field("Relocations", &self.to_vec())
+ .finish()
+ }
+ }
+
+ impl<'a> RelocSection<'a> {
+ #[cfg(feature = "endian_fd")]
+ /// Parse a REL or RELA section of size `filesz` from `offset`.
+ pub fn parse(bytes: &'a [u8], offset: usize, filesz: usize, is_rela: bool, ctx: Ctx) -> crate::error::Result<RelocSection<'a>> {
+ // TODO: better error message when too large (see symtab implementation)
+ let bytes = if filesz != 0 {
+ bytes.pread_with::<&'a [u8]>(offset, filesz)?
+ } else {
+ &[]
+ };
+
+ Ok(RelocSection {
+ bytes: bytes,
+ count: filesz / Reloc::size(is_rela, ctx),
+ ctx: (is_rela, ctx),
+ start: offset,
+ end: offset + filesz,
+ })
+ }
+
+ /// Try to parse a single relocation from the binary, at `index`.
+ #[inline]
+ pub fn get(&self, index: usize) -> Option<Reloc> {
+ if index >= self.count {
+ None
+ } else {
+ Some(self.bytes.pread_with(index * Reloc::size_with(&self.ctx), self.ctx).unwrap())
+ }
+ }
+
+ /// The number of relocations in the section.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.count
+ }
+
+ /// Returns true if section has no relocations.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.count == 0
+ }
+
+ /// Iterate over all relocations.
+ pub fn iter(&self) -> RelocIterator<'a> {
+ self.into_iter()
+ }
+
+ /// Parse all relocations into a vector.
+ pub fn to_vec(&self) -> Vec<Reloc> {
+ self.iter().collect()
+ }
+ }
+
+ impl<'a, 'b> IntoIterator for &'b RelocSection<'a> {
+ type Item = <RelocIterator<'a> as Iterator>::Item;
+ type IntoIter = RelocIterator<'a>;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ RelocIterator {
+ bytes: self.bytes,
+ offset: 0,
+ index: 0,
+ count: self.count,
+ ctx: self.ctx,
+ }
+ }
+ }
+
+ pub struct RelocIterator<'a> {
+ bytes: &'a [u8],
+ offset: usize,
+ index: usize,
+ count: usize,
+ ctx: RelocCtx,
+ }
+
+ impl<'a> fmt::Debug for RelocIterator<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("RelocIterator")
+ .field("bytes", &"<... redacted ...>")
+ .field("offset", &self.offset)
+ .field("index", &self.index)
+ .field("count", &self.count)
+ .field("ctx", &self.ctx)
+ .finish()
+ }
+ }
+
+ impl<'a> Iterator for RelocIterator<'a> {
+ type Item = Reloc;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.index >= self.count {
+ None
+ } else {
+ self.index += 1;
+ Some(self.bytes.gread_with(&mut self.offset, self.ctx).unwrap())
+ }
+ }
+ }
+
+ impl<'a> ExactSizeIterator for RelocIterator<'a> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.count - self.index
+ }
+ }
+} // end if_alloc
diff --git a/third_party/rust/goblin/src/elf/section_header.rs b/third_party/rust/goblin/src/elf/section_header.rs
new file mode 100644
index 0000000000..981706153c
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/section_header.rs
@@ -0,0 +1,581 @@
+macro_rules! elf_section_header {
+ ($size:ident) => {
+ // XXX: Do not import scroll traits here.
+ // See: https://github.com/rust-lang/rust/issues/65090#issuecomment-538668155
+
+ #[repr(C)]
+ #[derive(Copy, Clone, Eq, PartialEq, Default)]
+ #[cfg_attr(
+ feature = "alloc",
+ derive(scroll::Pread, scroll::Pwrite, scroll::SizeWith)
+ )]
+ /// Section Headers are typically used by humans and static linkers for additional information or how to relocate the object
+ ///
+ /// **NOTE** section headers are strippable from a binary without any loss of portability/executability; _do not_ rely on them being there!
+ pub struct SectionHeader {
+ /// Section name (string tbl index)
+ pub sh_name: u32,
+ /// Section type
+ pub sh_type: u32,
+ /// Section flags
+ pub sh_flags: $size,
+ /// Section virtual addr at execution
+ pub sh_addr: $size,
+ /// Section file offset
+ pub sh_offset: $size,
+ /// Section size in bytes
+ pub sh_size: $size,
+ /// Link to another section
+ pub sh_link: u32,
+ /// Additional section information
+ pub sh_info: u32,
+ /// Section alignment
+ pub sh_addralign: $size,
+ /// Entry size if section holds table
+ pub sh_entsize: $size,
+ }
+
+ use plain;
+ // Declare that this is a plain type.
+ unsafe impl plain::Plain for SectionHeader {}
+
+ impl ::core::fmt::Debug for SectionHeader {
+ fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
+ f.debug_struct("SectionHeader")
+ .field("sh_name", &self.sh_name)
+ .field("sh_type", &sht_to_str(self.sh_type))
+ .field("sh_flags", &format_args!("0x{:x}", self.sh_flags))
+ .field("sh_addr", &format_args!("0x{:x}", self.sh_addr))
+ .field("sh_offset", &format_args!("0x{:x}", self.sh_offset))
+ .field("sh_size", &format_args!("0x{:x}", self.sh_size))
+ .field("sh_link", &format_args!("0x{:x}", self.sh_link))
+ .field("sh_info", &format_args!("0x{:x}", self.sh_info))
+ .field("sh_addralign", &format_args!("0x{:x}", self.sh_addralign))
+ .field("sh_entsize", &format_args!("0x{:x}", self.sh_entsize))
+ .finish()
+ }
+ }
+ };
+}
+
+/// Undefined section.
+pub const SHN_UNDEF: u32 = 0;
+/// Start of reserved indices.
+pub const SHN_LORESERVE: u32 = 0xff00;
+/// Start of processor-specific.
+pub const SHN_LOPROC: u32 = 0xff00;
+/// Order section before all others (Solaris).
+pub const SHN_BEFORE: u32 = 0xff00;
+/// Order section after all others (Solaris).
+pub const SHN_AFTER: u32 = 0xff01;
+/// End of processor-specific.
+pub const SHN_HIPROC: u32 = 0xff1f;
+/// Start of OS-specific.
+pub const SHN_LOOS: u32 = 0xff20;
+/// End of OS-specific.
+pub const SHN_HIOS: u32 = 0xff3f;
+/// Associated symbol is absolute.
+pub const SHN_ABS: u32 = 0xfff1;
+/// Associated symbol is common.
+pub const SHN_COMMON: u32 = 0xfff2;
+/// Index is in extra table.
+pub const SHN_XINDEX: u32 = 0xffff;
+/// End of reserved indices.
+pub const SHN_HIRESERVE: u32 = 0xffff;
+
+// === Legal values for sh_type (section type). ===
+/// Section header table entry unused.
+pub const SHT_NULL: u32 = 0;
+/// Program data.
+pub const SHT_PROGBITS: u32 = 1;
+/// Symbol table.
+pub const SHT_SYMTAB: u32 = 2;
+/// String table.
+pub const SHT_STRTAB: u32 = 3;
+/// Relocation entries with addends.
+pub const SHT_RELA: u32 = 4;
+/// Symbol hash table.
+pub const SHT_HASH: u32 = 5;
+/// Dynamic linking information.
+pub const SHT_DYNAMIC: u32 = 6;
+/// Notes.
+pub const SHT_NOTE: u32 = 7;
+/// Program space with no data (bss).
+pub const SHT_NOBITS: u32 = 8;
+/// Relocation entries, no addends.
+pub const SHT_REL: u32 = 9;
+/// Reserved.
+pub const SHT_SHLIB: u32 = 10;
+/// Dynamic linker symbol table.
+pub const SHT_DYNSYM: u32 = 11;
+/// Array of constructors.
+pub const SHT_INIT_ARRAY: u32 = 14;
+/// Array of destructors.
+pub const SHT_FINI_ARRAY: u32 = 15;
+/// Array of pre-constructors.
+pub const SHT_PREINIT_ARRAY: u32 = 16;
+/// Section group.
+pub const SHT_GROUP: u32 = 17;
+/// Extended section indeces.
+pub const SHT_SYMTAB_SHNDX: u32 = 18;
+/// Number of defined types.
+pub const SHT_NUM: u32 = 19;
+/// Start OS-specific.
+pub const SHT_LOOS: u32 = 0x6000_0000;
+/// Object attributes.
+pub const SHT_GNU_ATTRIBUTES: u32 = 0x6fff_fff5;
+/// GNU-style hash table.
+pub const SHT_GNU_HASH: u32 = 0x6fff_fff6;
+/// Prelink library list.
+pub const SHT_GNU_LIBLIST: u32 = 0x6fff_fff7;
+/// Checksum for DSO content.
+pub const SHT_CHECKSUM: u32 = 0x6fff_fff8;
+/// Sun-specific low bound.
+pub const SHT_LOSUNW: u32 = 0x6fff_fffa;
+pub const SHT_SUNW_MOVE: u32 = 0x6fff_fffa;
+pub const SHT_SUNW_COMDAT: u32 = 0x6fff_fffb;
+pub const SHT_SUNW_SYMINFO: u32 = 0x6fff_fffc;
+/// Version definition section.
+pub const SHT_GNU_VERDEF: u32 = 0x6fff_fffd;
+/// Version needs section.
+pub const SHT_GNU_VERNEED: u32 = 0x6fff_fffe;
+/// Version symbol table.
+pub const SHT_GNU_VERSYM: u32 = 0x6fff_ffff;
+/// Sun-specific high bound.
+pub const SHT_HISUNW: u32 = 0x6fff_ffff;
+/// End OS-specific type.
+pub const SHT_HIOS: u32 = 0x6fff_ffff;
+/// Start of processor-specific.
+pub const SHT_LOPROC: u32 = 0x7000_0000;
+/// X86-64 unwind information.
+pub const SHT_X86_64_UNWIND: u32 = 0x7000_0001;
+/// End of processor-specific.
+pub const SHT_HIPROC: u32 = 0x7fff_ffff;
+/// Start of application-specific.
+pub const SHT_LOUSER: u32 = 0x8000_0000;
+/// End of application-specific.
+pub const SHT_HIUSER: u32 = 0x8fff_ffff;
+
+// Legal values for sh_flags (section flags)
+/// Writable.
+pub const SHF_WRITE: u32 = 0x1;
+/// Occupies memory during execution.
+pub const SHF_ALLOC: u32 = 0x2;
+/// Executable.
+pub const SHF_EXECINSTR: u32 = 0x4;
+/// Might be merged.
+pub const SHF_MERGE: u32 = 0x10;
+/// Contains nul-terminated strings.
+pub const SHF_STRINGS: u32 = 0x20;
+/// `sh_info' contains SHT index.
+pub const SHF_INFO_LINK: u32 = 0x40;
+/// Preserve order after combining.
+pub const SHF_LINK_ORDER: u32 = 0x80;
+/// Non-standard OS specific handling required.
+pub const SHF_OS_NONCONFORMING: u32 = 0x100;
+/// Section is member of a group.
+pub const SHF_GROUP: u32 = 0x200;
+/// Section hold thread-local data.
+pub const SHF_TLS: u32 = 0x400;
+/// Section with compressed data.
+pub const SHF_COMPRESSED: u32 = 0x800;
+/// OS-specific..
+pub const SHF_MASKOS: u32 = 0x0ff0_0000;
+/// Processor-specific.
+pub const SHF_MASKPROC: u32 = 0xf000_0000;
+/// Special ordering requirement (Solaris).
+pub const SHF_ORDERED: u32 = 1 << 30;
+/// Number of "regular" section header flags
+pub const SHF_NUM_REGULAR_FLAGS: usize = 12;
+/// Section is excluded unless referenced or allocated (Solaris).
+pub const SHF_EXCLUDE: u32 = 0x80000000; // 1U << 31
+
+pub const SHF_FLAGS: [u32; SHF_NUM_REGULAR_FLAGS] = [
+ SHF_WRITE,
+ SHF_ALLOC,
+ SHF_EXECINSTR,
+ SHF_MERGE,
+ SHF_STRINGS,
+ SHF_INFO_LINK,
+ SHF_LINK_ORDER,
+ SHF_OS_NONCONFORMING,
+ SHF_GROUP,
+ SHF_TLS,
+ SHF_COMPRESSED,
+ SHF_ORDERED,
+];
+
+pub fn sht_to_str(sht: u32) -> &'static str {
+ match sht {
+ SHT_NULL => "SHT_NULL",
+ SHT_PROGBITS => "SHT_PROGBITS",
+ SHT_SYMTAB => "SHT_SYMTAB",
+ SHT_STRTAB => "SHT_STRTAB",
+ SHT_RELA => "SHT_RELA",
+ SHT_HASH => "SHT_HASH",
+ SHT_DYNAMIC => "SHT_DYNAMIC",
+ SHT_NOTE => "SHT_NOTE",
+ SHT_NOBITS => "SHT_NOBITS",
+ SHT_REL => "SHT_REL",
+ SHT_SHLIB => "SHT_SHLIB",
+ SHT_DYNSYM => "SHT_DYNSYM",
+ SHT_INIT_ARRAY => "SHT_INIT_ARRAY",
+ SHT_FINI_ARRAY => "SHT_FINI_ARRAY",
+ SHT_PREINIT_ARRAY => "SHT_PREINIT_ARRAY",
+ SHT_GROUP => "SHT_GROUP",
+ SHT_SYMTAB_SHNDX => "SHT_SYMTAB_SHNDX",
+ SHT_NUM => "SHT_NUM",
+ SHT_LOOS => "SHT_LOOS",
+ SHT_GNU_ATTRIBUTES => "SHT_GNU_ATTRIBUTES",
+ SHT_GNU_HASH => "SHT_GNU_HASH",
+ SHT_GNU_LIBLIST => "SHT_GNU_LIBLIST",
+ SHT_CHECKSUM => "SHT_CHECKSUM",
+ SHT_SUNW_MOVE => "SHT_SUNW_MOVE",
+ SHT_SUNW_COMDAT => "SHT_SUNW_COMDAT",
+ SHT_SUNW_SYMINFO => "SHT_SUNW_SYMINFO",
+ SHT_GNU_VERDEF => "SHT_GNU_VERDEF",
+ SHT_GNU_VERNEED => "SHT_GNU_VERNEED",
+ SHT_GNU_VERSYM => "SHT_GNU_VERSYM",
+ SHT_LOPROC => "SHT_LOPROC",
+ SHT_X86_64_UNWIND => "SHT_X86_64_UNWIND",
+ SHT_HIPROC => "SHT_HIPROC",
+ SHT_LOUSER => "SHT_LOUSER",
+ SHT_HIUSER => "SHT_HIUSER",
+ _ => "UNKNOWN_SHT",
+ }
+}
+
+pub fn shf_to_str(shf: u32) -> &'static str {
+ match shf {
+ SHF_WRITE => "SHF_WRITE",
+ SHF_ALLOC => "SHF_ALLOC",
+ SHF_EXECINSTR => "SHF_EXECINSTR",
+ SHF_MERGE => "SHF_MERGE",
+ SHF_STRINGS => "SHF_STRINGS",
+ SHF_INFO_LINK => "SHF_INFO_LINK",
+ SHF_LINK_ORDER => "SHF_LINK_ORDER",
+ SHF_OS_NONCONFORMING => "SHF_OS_NONCONFORMING",
+ SHF_GROUP => "SHF_GROUP",
+ SHF_TLS => "SHF_TLS",
+ SHF_COMPRESSED => "SHF_COMPRESSED",
+ //SHF_MASKOS..SHF_MASKPROC => "SHF_OSFLAG",
+ SHF_ORDERED => "SHF_ORDERED",
+ _ => "SHF_UNKNOWN",
+ }
+}
+
+macro_rules! elf_section_header_std_impl { ($size:ty) => {
+
+ #[cfg(test)]
+ mod tests {
+ use super::*;
+ #[test]
+ fn size_of() {
+ assert_eq!(::std::mem::size_of::<SectionHeader>(), SIZEOF_SHDR);
+ }
+ }
+
+ if_alloc! {
+ use crate::elf::section_header::SectionHeader as ElfSectionHeader;
+
+ use plain::Plain;
+ use alloc::vec::Vec;
+
+ if_std! {
+ use crate::error::Result;
+
+ use std::fs::File;
+ use std::io::{Read, Seek};
+ use std::io::SeekFrom::Start;
+ }
+
+ impl From<SectionHeader> for ElfSectionHeader {
+ fn from(sh: SectionHeader) -> Self {
+ ElfSectionHeader {
+ sh_name: sh.sh_name as usize,
+ sh_type: sh.sh_type,
+ sh_flags: u64::from(sh.sh_flags),
+ sh_addr: u64::from(sh.sh_addr),
+ sh_offset: u64::from(sh.sh_offset),
+ sh_size: u64::from(sh.sh_size),
+ sh_link: sh.sh_link,
+ sh_info: sh.sh_info,
+ sh_addralign: u64::from(sh.sh_addralign),
+ sh_entsize: u64::from(sh.sh_entsize),
+ }
+ }
+ }
+ impl From<ElfSectionHeader> for SectionHeader {
+ fn from(sh: ElfSectionHeader) -> Self {
+ SectionHeader {
+ sh_name : sh.sh_name as u32,
+ sh_type : sh.sh_type,
+ sh_flags : sh.sh_flags as $size,
+ sh_addr : sh.sh_addr as $size,
+ sh_offset : sh.sh_offset as $size,
+ sh_size : sh.sh_size as $size,
+ sh_link : sh.sh_link,
+ sh_info : sh.sh_info,
+ sh_addralign: sh.sh_addralign as $size,
+ sh_entsize : sh.sh_entsize as $size,
+ }
+ }
+ }
+
+ impl SectionHeader {
+ // FIXME: > 65535 sections
+ pub fn from_bytes(bytes: &[u8], shnum: usize) -> Vec<SectionHeader> {
+ let mut shdrs = vec![SectionHeader::default(); shnum];
+ shdrs.copy_from_bytes(bytes).expect("buffer is too short for given number of entries");
+ shdrs
+ }
+
+ #[cfg(feature = "std")]
+ // FIXME: > 65535 sections
+ pub fn from_fd(fd: &mut File, offset: u64, shnum: usize) -> Result<Vec<SectionHeader>> {
+ let mut shdrs = vec![SectionHeader::default(); shnum];
+ fd.seek(Start(offset))?;
+ unsafe {
+ fd.read_exact(plain::as_mut_bytes(&mut *shdrs))?;
+ }
+ Ok(shdrs)
+ }
+ }
+ } // end if_alloc
+};}
+
+pub mod section_header32 {
+ pub use crate::elf::section_header::*;
+
+ elf_section_header!(u32);
+
+ pub const SIZEOF_SHDR: usize = 40;
+
+ elf_section_header_std_impl!(u32);
+}
+
+pub mod section_header64 {
+
+ pub use crate::elf::section_header::*;
+
+ elf_section_header!(u64);
+
+ pub const SIZEOF_SHDR: usize = 64;
+
+ elf_section_header_std_impl!(u64);
+}
+
+///////////////////////////////
+// Std/analysis/Unified Structs
+///////////////////////////////
+
+if_alloc! {
+ use crate::error;
+ use core::fmt;
+ use core::result;
+ use core::ops::Range;
+ use scroll::ctx;
+ use crate::container::{Container, Ctx};
+
+ #[cfg(feature = "endian_fd")]
+ use alloc::vec::Vec;
+
+ #[derive(Default, PartialEq, Clone)]
+ /// A unified SectionHeader - convertable to and from 32-bit and 64-bit variants
+ pub struct SectionHeader {
+ /// Section name (string tbl index)
+ pub sh_name: usize,
+ /// Section type
+ pub sh_type: u32,
+ /// Section flags
+ pub sh_flags: u64,
+ /// Section virtual addr at execution
+ pub sh_addr: u64,
+ /// Section file offset
+ pub sh_offset: u64,
+ /// Section size in bytes
+ pub sh_size: u64,
+ /// Link to another section
+ pub sh_link: u32,
+ /// Additional section information
+ pub sh_info: u32,
+ /// Section alignment
+ pub sh_addralign: u64,
+ /// Entry size if section holds table
+ pub sh_entsize: u64,
+ }
+
+ impl SectionHeader {
+ /// Return the size of the underlying program header, given a `container`
+ #[inline]
+ pub fn size(ctx: Ctx) -> usize {
+ use scroll::ctx::SizeWith;
+ Self::size_with(&ctx)
+ }
+ pub fn new() -> Self {
+ SectionHeader {
+ sh_name: 0,
+ sh_type: SHT_PROGBITS,
+ sh_flags: u64::from(SHF_ALLOC),
+ sh_addr: 0,
+ sh_offset: 0,
+ sh_size: 0,
+ sh_link: 0,
+ sh_info: 0,
+ sh_addralign: 2 << 8,
+ sh_entsize: 0,
+ }
+ }
+ /// Returns this section header's file offset range,
+ /// if the section occupies space in fhe file.
+ pub fn file_range(&self) -> Option<Range<usize>> {
+ // Sections with type SHT_NOBITS have no data in the file itself,
+ // they only exist in memory.
+ if self.sh_type == SHT_NOBITS {
+ None
+ } else {
+ Some(self.sh_offset as usize..(self.sh_offset as usize).saturating_add(self.sh_size as usize))
+ }
+ }
+ /// Returns this section header's virtual memory range
+ pub fn vm_range(&self) -> Range<usize> {
+ self.sh_addr as usize..(self.sh_addr as usize).saturating_add(self.sh_size as usize)
+ }
+ /// Parse `count` section headers from `bytes` at `offset`, using the given `ctx`
+ #[cfg(feature = "endian_fd")]
+ pub fn parse(bytes: &[u8], mut offset: usize, mut count: usize, ctx: Ctx) -> error::Result<Vec<SectionHeader>> {
+ use scroll::Pread;
+ // Zero offset means no section headers, not even the null section header.
+ if offset == 0 {
+ return Ok(Vec::new());
+ }
+ let empty_sh = bytes.gread_with::<SectionHeader>(&mut offset, ctx)?;
+ if count == 0 as usize {
+ // Zero count means either no section headers if offset is also zero (checked
+ // above), or the number of section headers overflows SHN_LORESERVE, in which
+ // case the count is stored in the sh_size field of the null section header.
+ count = empty_sh.sh_size as usize;
+ }
+
+ // Sanity check to avoid OOM
+ if count > bytes.len() / Self::size(ctx) {
+ return Err(error::Error::BufferTooShort(count, "section headers"));
+ }
+ let mut section_headers = Vec::with_capacity(count);
+ section_headers.push(empty_sh);
+ for _ in 1..count {
+ let shdr = bytes.gread_with(&mut offset, ctx)?;
+ section_headers.push(shdr);
+ }
+ Ok(section_headers)
+ }
+ pub fn check_size(&self, size: usize) -> error::Result<()> {
+ if self.sh_type == SHT_NOBITS || self.sh_size == 0 {
+ return Ok(());
+ }
+ let (end, overflow) = self.sh_offset.overflowing_add(self.sh_size);
+ if overflow || end > size as u64 {
+ let message = format!("Section {} size ({}) + offset ({}) is out of bounds. Overflowed: {}",
+ self.sh_name, self.sh_offset, self.sh_size, overflow);
+ return Err(error::Error::Malformed(message));
+ }
+ let (_, overflow) = self.sh_addr.overflowing_add(self.sh_size);
+ if overflow {
+ let message = format!("Section {} size ({}) + addr ({}) is out of bounds. Overflowed: {}",
+ self.sh_name, self.sh_addr, self.sh_size, overflow);
+ return Err(error::Error::Malformed(message));
+ }
+ Ok(())
+ }
+ pub fn is_relocation(&self) -> bool {
+ self.sh_type == SHT_RELA
+ }
+ pub fn is_executable(&self) -> bool {
+ self.is_alloc() && self.sh_flags as u32 & SHF_EXECINSTR == SHF_EXECINSTR
+ }
+ pub fn is_writable(&self) -> bool {
+ self.is_alloc() && self.sh_flags as u32 & SHF_WRITE == SHF_WRITE
+ }
+ pub fn is_alloc(&self) -> bool {
+ self.sh_flags as u32 & SHF_ALLOC == SHF_ALLOC
+ }
+ }
+
+ impl fmt::Debug for SectionHeader {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("SectionHeader")
+ .field("sh_name", &self.sh_name)
+ .field("sh_type", &sht_to_str(self.sh_type))
+ .field("sh_flags", &format_args!("0x{:x}", self.sh_flags))
+ .field("sh_addr", &format_args!("0x{:x}", self.sh_addr))
+ .field("sh_offset", &format_args!("0x{:x}", self.sh_offset))
+ .field("sh_size", &format_args!("0x{:x}", self.sh_size))
+ .field("sh_link", &format_args!("0x{:x}", self.sh_link))
+ .field("sh_info", &format_args!("0x{:x}", self.sh_info))
+ .field("sh_addralign", &format_args!("0x{:x}", self.sh_addralign))
+ .field("sh_entsize", &format_args!("0x{:x}", self.sh_entsize))
+ .finish()
+ }
+ }
+
+ impl ctx::SizeWith<Ctx> for SectionHeader {
+ fn size_with( &Ctx { container, .. }: &Ctx) -> usize {
+ match container {
+ Container::Little => {
+ section_header32::SIZEOF_SHDR
+ },
+ Container::Big => {
+ section_header64::SIZEOF_SHDR
+ },
+ }
+ }
+ }
+
+ impl<'a> ctx::TryFromCtx<'a, Ctx> for SectionHeader {
+ type Error = crate::error::Error;
+ fn try_from_ctx(bytes: &'a [u8], Ctx {container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> {
+ use scroll::Pread;
+ let res = match container {
+ Container::Little => {
+ (bytes.pread_with::<section_header32::SectionHeader>(0, le)?.into(), section_header32::SIZEOF_SHDR)
+ },
+ Container::Big => {
+ (bytes.pread_with::<section_header64::SectionHeader>(0, le)?.into(), section_header64::SIZEOF_SHDR)
+ }
+ };
+ Ok(res)
+ }
+ }
+
+ impl ctx::TryIntoCtx<Ctx> for SectionHeader {
+ type Error = crate::error::Error;
+ fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result<usize, Self::Error> {
+ use scroll::Pwrite;
+ match container {
+ Container::Little => {
+ let shdr: section_header32::SectionHeader = self.into();
+ Ok(bytes.pwrite_with(shdr, 0, le)?)
+ },
+ Container::Big => {
+ let shdr: section_header64::SectionHeader = self.into();
+ Ok(bytes.pwrite_with(shdr, 0, le)?)
+ }
+ }
+ }
+ }
+ impl ctx::IntoCtx<Ctx> for SectionHeader {
+ fn into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) {
+ use scroll::Pwrite;
+ match container {
+ Container::Little => {
+ let shdr: section_header32::SectionHeader = self.into();
+ bytes.pwrite_with(shdr, 0, le).unwrap();
+ },
+ Container::Big => {
+ let shdr: section_header64::SectionHeader = self.into();
+ bytes.pwrite_with(shdr, 0, le).unwrap();
+ }
+ }
+ }
+ }
+} // end if_alloc
diff --git a/third_party/rust/goblin/src/elf/sym.rs b/third_party/rust/goblin/src/elf/sym.rs
new file mode 100644
index 0000000000..a13e7abcfd
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/sym.rs
@@ -0,0 +1,607 @@
+/// === Sym bindings ===
+/// Local symbol.
+pub const STB_LOCAL: u8 = 0;
+/// Global symbol.
+pub const STB_GLOBAL: u8 = 1;
+/// Weak symbol.
+pub const STB_WEAK: u8 = 2;
+/// Number of defined types..
+pub const STB_NUM: u8 = 3;
+/// Start of OS-specific.
+pub const STB_LOOS: u8 = 10;
+/// Unique symbol..
+pub const STB_GNU_UNIQUE: u8 = 10;
+/// End of OS-specific.
+pub const STB_HIOS: u8 = 12;
+/// Start of processor-specific.
+pub const STB_LOPROC: u8 = 13;
+/// End of processor-specific.
+pub const STB_HIPROC: u8 = 15;
+
+/// === Sym types ===
+/// Symbol type is unspecified.
+pub const STT_NOTYPE: u8 = 0;
+/// Symbol is a data object.
+pub const STT_OBJECT: u8 = 1;
+/// Symbol is a code object.
+pub const STT_FUNC: u8 = 2;
+/// Symbol associated with a section.
+pub const STT_SECTION: u8 = 3;
+/// Symbol's name is file name.
+pub const STT_FILE: u8 = 4;
+/// Symbol is a common data object.
+pub const STT_COMMON: u8 = 5;
+/// Symbol is thread-local data object.
+pub const STT_TLS: u8 = 6;
+/// Number of defined types.
+pub const STT_NUM: u8 = 7;
+/// Start of OS-specific.
+pub const STT_LOOS: u8 = 10;
+/// Symbol is indirect code object.
+pub const STT_GNU_IFUNC: u8 = 10;
+/// End of OS-specific.
+pub const STT_HIOS: u8 = 12;
+/// Start of processor-specific.
+pub const STT_LOPROC: u8 = 13;
+/// End of processor-specific.
+pub const STT_HIPROC: u8 = 15;
+
+/// === Sym visibility ===
+/// Default: Visibility is specified by the symbol's binding type
+pub const STV_DEFAULT: u8 = 0;
+/// Internal: use of this attribute is currently reserved.
+pub const STV_INTERNAL: u8 = 1;
+/// Hidden: Not visible to other components, necessarily protected. Binding scope becomes local
+/// when the object is included in an executable or shared object.
+pub const STV_HIDDEN: u8 = 2;
+/// Protected: Symbol defined in current component is visible in other components, but cannot be preempted.
+/// Any reference from within the defining component must be resolved to the definition in that
+/// component.
+pub const STV_PROTECTED: u8 = 3;
+/// Exported: ensures a symbol remains global, cannot be demoted or eliminated by any other symbol
+/// visibility technique.
+pub const STV_EXPORTED: u8 = 4;
+/// Singleton: ensures a symbol remains global, and that a single instance of the definition is
+/// bound to by all references within a process. Cannot be demoted or eliminated.
+pub const STV_SINGLETON: u8 = 5;
+/// Eliminate: extends the hidden attribute. Not written in any symbol table of a dynamic
+/// executable or shared object.
+pub const STV_ELIMINATE: u8 = 6;
+
+/// Get the ST bind.
+///
+/// This is the first four bits of the "info" byte.
+#[inline]
+pub fn st_bind(info: u8) -> u8 {
+ info >> 4
+}
+
+/// Get the ST type.
+///
+/// This is the last four bits of the "info" byte.
+#[inline]
+pub fn st_type(info: u8) -> u8 {
+ info & 0xf
+}
+
+/// Get the ST visibility.
+///
+/// This is the last three bits of the "other" byte.
+#[inline]
+pub fn st_visibility(other: u8) -> u8 {
+ other & 0x7
+}
+
+/// Is this information defining an import?
+#[inline]
+pub fn is_import(info: u8, value: u64) -> bool {
+ let bind = st_bind(info);
+ bind == STB_GLOBAL && value == 0
+}
+
+/// Convenience function to get the &'static str type from the symbols `st_info`.
+#[inline]
+pub fn get_type(info: u8) -> &'static str {
+ type_to_str(st_type(info))
+}
+
+/// Get the string for some bind.
+#[inline]
+pub fn bind_to_str(typ: u8) -> &'static str {
+ match typ {
+ STB_LOCAL => "LOCAL",
+ STB_GLOBAL => "GLOBAL",
+ STB_WEAK => "WEAK",
+ STB_NUM => "NUM",
+ STB_GNU_UNIQUE => "GNU_UNIQUE",
+ _ => "UNKNOWN_STB",
+ }
+}
+
+/// Get the string for some type.
+#[inline]
+pub fn type_to_str(typ: u8) -> &'static str {
+ match typ {
+ STT_NOTYPE => "NOTYPE",
+ STT_OBJECT => "OBJECT",
+ STT_FUNC => "FUNC",
+ STT_SECTION => "SECTION",
+ STT_FILE => "FILE",
+ STT_COMMON => "COMMON",
+ STT_TLS => "TLS",
+ STT_NUM => "NUM",
+ STT_GNU_IFUNC => "GNU_IFUNC",
+ _ => "UNKNOWN_STT",
+ }
+}
+
+/// Get the string for some visibility
+#[inline]
+pub fn visibility_to_str(typ: u8) -> &'static str {
+ match typ {
+ STV_DEFAULT => "DEFAULT",
+ STV_INTERNAL => "INTERNAL",
+ STV_HIDDEN => "HIDDEN",
+ STV_PROTECTED => "PROTECTED",
+ STV_EXPORTED => "EXPORTED",
+ STV_SINGLETON => "SINGLETON",
+ STV_ELIMINATE => "ELIMINATE",
+ _ => "UNKNOWN_STV",
+ }
+}
+
+macro_rules! elf_sym_std_impl {
+ ($size:ty) => {
+ #[cfg(test)]
+ mod tests {
+ use super::*;
+ #[test]
+ fn size_of() {
+ assert_eq!(::std::mem::size_of::<Sym>(), SIZEOF_SYM);
+ }
+ }
+
+ use crate::elf::sym::Sym as ElfSym;
+
+ use core::fmt;
+ use core::slice;
+
+ impl Sym {
+ /// Checks whether this `Sym` has `STB_GLOBAL`/`STB_WEAK` bind and a `st_value` of 0
+ #[inline]
+ pub fn is_import(&self) -> bool {
+ let bind = self.st_info >> 4;
+ (bind == STB_GLOBAL || bind == STB_WEAK) && self.st_value == 0
+ }
+ /// Checks whether this `Sym` has type `STT_FUNC`
+ #[inline]
+ pub fn is_function(&self) -> bool {
+ st_type(self.st_info) == STT_FUNC
+ }
+ }
+
+ impl From<Sym> for ElfSym {
+ #[inline]
+ fn from(sym: Sym) -> Self {
+ ElfSym {
+ st_name: sym.st_name as usize,
+ st_info: sym.st_info,
+ st_other: sym.st_other,
+ st_shndx: sym.st_shndx as usize,
+ st_value: u64::from(sym.st_value),
+ st_size: u64::from(sym.st_size),
+ }
+ }
+ }
+
+ impl From<ElfSym> for Sym {
+ #[inline]
+ fn from(sym: ElfSym) -> Self {
+ Sym {
+ st_name: sym.st_name as u32,
+ st_info: sym.st_info,
+ st_other: sym.st_other,
+ st_shndx: sym.st_shndx as u16,
+ st_value: sym.st_value as $size,
+ st_size: sym.st_size as $size,
+ }
+ }
+ }
+
+ impl fmt::Debug for Sym {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let bind = st_bind(self.st_info);
+ let typ = st_type(self.st_info);
+ let vis = st_visibility(self.st_other);
+ f.debug_struct("Sym")
+ .field("st_name", &self.st_name)
+ .field("st_value", &format_args!("{:x}", self.st_value))
+ .field("st_size", &self.st_size)
+ .field(
+ "st_info",
+ &format_args!(
+ "{:x} {} {}",
+ self.st_info,
+ bind_to_str(bind),
+ type_to_str(typ)
+ ),
+ )
+ .field(
+ "st_other",
+ &format_args!("{} {}", self.st_other, visibility_to_str(vis)),
+ )
+ .field("st_shndx", &self.st_shndx)
+ .finish()
+ }
+ }
+
+ /// # Safety
+ ///
+ /// This function creates a `Sym` slice directly from a raw pointer
+ #[inline]
+ pub unsafe fn from_raw<'a>(symp: *const Sym, count: usize) -> &'a [Sym] {
+ slice::from_raw_parts(symp, count)
+ }
+
+ if_std! {
+ use crate::error::Result;
+
+ use std::fs::File;
+ use std::io::{Read, Seek};
+ use std::io::SeekFrom::Start;
+
+ pub fn from_fd(fd: &mut File, offset: usize, count: usize) -> Result<Vec<Sym>> {
+ // TODO: AFAIK this shouldn't work, since i pass in a byte size...
+ let mut syms = vec![Sym::default(); count];
+ fd.seek(Start(offset as u64))?;
+ unsafe {
+ fd.read_exact(plain::as_mut_bytes(&mut *syms))?;
+ }
+ syms.dedup();
+ Ok(syms)
+ }
+ }
+ };
+}
+
+#[cfg(feature = "alloc")]
+use scroll::{Pread, Pwrite, SizeWith};
+
+pub mod sym32 {
+ pub use crate::elf::sym::*;
+
+ #[repr(C)]
+ #[derive(Clone, Copy, PartialEq, Default)]
+ #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))]
+ /// 32-bit Sym - used for both static and dynamic symbol information in a binary
+ pub struct Sym {
+ /// Symbol name (string tbl index)
+ pub st_name: u32,
+ /// Symbol value
+ pub st_value: u32,
+ /// Symbol size
+ pub st_size: u32,
+ /// Symbol type and binding
+ pub st_info: u8,
+ /// Symbol visibility
+ pub st_other: u8,
+ /// Section index
+ pub st_shndx: u16,
+ }
+
+ // Declare that the type is plain.
+ unsafe impl plain::Plain for Sym {}
+
+ pub const SIZEOF_SYM: usize = 4 + 1 + 1 + 2 + 4 + 4;
+
+ elf_sym_std_impl!(u32);
+}
+
+pub mod sym64 {
+ pub use crate::elf::sym::*;
+
+ #[repr(C)]
+ #[derive(Clone, Copy, PartialEq, Default)]
+ #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))]
+ /// 64-bit Sym - used for both static and dynamic symbol information in a binary
+ pub struct Sym {
+ /// Symbol name (string tbl index)
+ pub st_name: u32,
+ /// Symbol type and binding
+ pub st_info: u8,
+ /// Symbol visibility
+ pub st_other: u8,
+ /// Section index
+ pub st_shndx: u16,
+ /// Symbol value
+ pub st_value: u64,
+ /// Symbol size
+ pub st_size: u64,
+ }
+
+ // Declare that the type is plain.
+ unsafe impl plain::Plain for Sym {}
+
+ pub const SIZEOF_SYM: usize = 4 + 1 + 1 + 2 + 8 + 8;
+
+ elf_sym_std_impl!(u64);
+}
+
+use crate::container::{Container, Ctx};
+#[cfg(feature = "alloc")]
+use crate::error::Result;
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
+use core::fmt;
+use scroll::ctx;
+use scroll::ctx::SizeWith;
+
+#[derive(Clone, Copy, PartialEq, Default)]
+/// A unified Sym definition - convertible to and from 32-bit and 64-bit variants
+pub struct Sym {
+ pub st_name: usize,
+ pub st_info: u8,
+ pub st_other: u8,
+ pub st_shndx: usize,
+ pub st_value: u64,
+ pub st_size: u64,
+}
+
+impl Sym {
+ #[inline]
+ pub fn size(container: Container) -> usize {
+ Self::size_with(&Ctx::from(container))
+ }
+ /// Checks whether this `Sym` has `STB_GLOBAL`/`STB_WEAK` bind and a `st_value` of 0
+ #[inline]
+ pub fn is_import(&self) -> bool {
+ let bind = self.st_bind();
+ (bind == STB_GLOBAL || bind == STB_WEAK) && self.st_value == 0
+ }
+ /// Checks whether this `Sym` has type `STT_FUNC`
+ #[inline]
+ pub fn is_function(&self) -> bool {
+ st_type(self.st_info) == STT_FUNC
+ }
+ /// Get the ST bind.
+ ///
+ /// This is the first four bits of the "info" byte.
+ #[inline]
+ pub fn st_bind(&self) -> u8 {
+ self.st_info >> 4
+ }
+ /// Get the ST type.
+ ///
+ /// This is the last four bits of the "info" byte.
+ #[inline]
+ pub fn st_type(&self) -> u8 {
+ st_type(self.st_info)
+ }
+ /// Get the ST visibility.
+ ///
+ /// This is the last three bits of the "other" byte.
+ #[inline]
+ pub fn st_visibility(&self) -> u8 {
+ st_visibility(self.st_other)
+ }
+ #[cfg(feature = "endian_fd")]
+ /// Parse `count` vector of ELF symbols from `offset`
+ pub fn parse(bytes: &[u8], mut offset: usize, count: usize, ctx: Ctx) -> Result<Vec<Sym>> {
+ if count > bytes.len() / Sym::size_with(&ctx) {
+ return Err(crate::error::Error::BufferTooShort(count, "symbols"));
+ }
+ let mut syms = Vec::with_capacity(count);
+ for _ in 0..count {
+ let sym = bytes.gread_with(&mut offset, ctx)?;
+ syms.push(sym);
+ }
+ Ok(syms)
+ }
+}
+
+impl fmt::Debug for Sym {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let bind = self.st_bind();
+ let typ = self.st_type();
+ let vis = self.st_visibility();
+ f.debug_struct("Sym")
+ .field("st_name", &self.st_name)
+ .field(
+ "st_info",
+ &format_args!(
+ "0x{:x} {} {}",
+ self.st_info,
+ bind_to_str(bind),
+ type_to_str(typ)
+ ),
+ )
+ .field(
+ "st_other",
+ &format_args!("{} {}", self.st_other, visibility_to_str(vis)),
+ )
+ .field("st_shndx", &self.st_shndx)
+ .field("st_value", &format_args!("0x{:x}", self.st_value))
+ .field("st_size", &self.st_size)
+ .finish()
+ }
+}
+
+impl ctx::SizeWith<Ctx> for Sym {
+ #[inline]
+ fn size_with(&Ctx { container, .. }: &Ctx) -> usize {
+ match container {
+ Container::Little => sym32::SIZEOF_SYM,
+ Container::Big => sym64::SIZEOF_SYM,
+ }
+ }
+}
+
+if_alloc! {
+ use core::result;
+
+ impl<'a> ctx::TryFromCtx<'a, Ctx> for Sym {
+ type Error = crate::error::Error;
+ #[inline]
+ fn try_from_ctx(bytes: &'a [u8], Ctx { container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> {
+ let sym = match container {
+ Container::Little => {
+ (bytes.pread_with::<sym32::Sym>(0, le)?.into(), sym32::SIZEOF_SYM)
+ },
+ Container::Big => {
+ (bytes.pread_with::<sym64::Sym>(0, le)?.into(), sym64::SIZEOF_SYM)
+ }
+ };
+ Ok(sym)
+ }
+ }
+
+ impl ctx::TryIntoCtx<Ctx> for Sym {
+ type Error = crate::error::Error;
+ #[inline]
+ fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result<usize, Self::Error> {
+ match container {
+ Container::Little => {
+ let sym: sym32::Sym = self.into();
+ Ok(bytes.pwrite_with(sym, 0, le)?)
+ },
+ Container::Big => {
+ let sym: sym64::Sym = self.into();
+ Ok(bytes.pwrite_with(sym, 0, le)?)
+ }
+ }
+ }
+ }
+
+ impl ctx::IntoCtx<Ctx> for Sym {
+ #[inline]
+ fn into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) {
+ match container {
+ Container::Little => {
+ let sym: sym32::Sym = self.into();
+ bytes.pwrite_with(sym, 0, le).unwrap();
+ },
+ Container::Big => {
+ let sym: sym64::Sym = self.into();
+ bytes.pwrite_with(sym, 0, le).unwrap();
+ }
+ }
+ }
+ }
+}
+
+if_alloc! {
+ #[derive(Default)]
+ /// An ELF symbol table, allowing lazy iteration over symbols
+ pub struct Symtab<'a> {
+ bytes: &'a [u8],
+ count: usize,
+ ctx: Ctx,
+ start: usize,
+ end: usize,
+ }
+
+ impl<'a> fmt::Debug for Symtab<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let len = self.bytes.len();
+ fmt.debug_struct("Symtab")
+ .field("bytes", &len)
+ .field("range", &format_args!("{:#x}..{:#x}", self.start, self.end))
+ .field("count", &self.count)
+ .field("Symbols", &self.to_vec())
+ .finish()
+ }
+ }
+
+ impl<'a> Symtab<'a> {
+ /// Parse a table of `count` ELF symbols from `offset`.
+ pub fn parse(bytes: &'a [u8], offset: usize, count: usize, ctx: Ctx) -> Result<Symtab<'a>> {
+ let size = count
+ .checked_mul(Sym::size_with(&ctx))
+ .ok_or_else(|| crate::error::Error::Malformed(
+ format!("Too many ELF symbols (offset {:#x}, count {})", offset, count)
+ ))?;
+ // TODO: make this a better error message when too large
+ let bytes = bytes.pread_with(offset, size)?;
+ Ok(Symtab { bytes, count, ctx, start: offset, end: offset+size })
+ }
+
+ /// Try to parse a single symbol from the binary, at `index`.
+ #[inline]
+ pub fn get(&self, index: usize) -> Option<Sym> {
+ if index >= self.count {
+ None
+ } else {
+ Some(self.bytes.pread_with(index * Sym::size_with(&self.ctx), self.ctx).unwrap())
+ }
+ }
+
+ /// The number of symbols in the table.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.count
+ }
+
+ /// Returns true if table has no symbols.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.count == 0
+ }
+
+ /// Iterate over all symbols.
+ #[inline]
+ pub fn iter(&self) -> SymIterator<'a> {
+ self.into_iter()
+ }
+
+ /// Parse all symbols into a vector.
+ pub fn to_vec(&self) -> Vec<Sym> {
+ self.iter().collect()
+ }
+ }
+
+ impl<'a, 'b> IntoIterator for &'b Symtab<'a> {
+ type Item = <SymIterator<'a> as Iterator>::Item;
+ type IntoIter = SymIterator<'a>;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ SymIterator {
+ bytes: self.bytes,
+ offset: 0,
+ index: 0,
+ count: self.count,
+ ctx: self.ctx,
+ }
+ }
+ }
+
+ /// An iterator over symbols in an ELF symbol table
+ pub struct SymIterator<'a> {
+ bytes: &'a [u8],
+ offset: usize,
+ index: usize,
+ count: usize,
+ ctx: Ctx,
+ }
+
+ impl<'a> Iterator for SymIterator<'a> {
+ type Item = Sym;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.index >= self.count {
+ None
+ } else {
+ self.index += 1;
+ Some(self.bytes.gread_with(&mut self.offset, self.ctx).unwrap())
+ }
+ }
+ }
+
+ impl<'a> ExactSizeIterator for SymIterator<'a> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.count - self.index
+ }
+ }
+} // end if_alloc
diff --git a/third_party/rust/goblin/src/elf/symver.rs b/third_party/rust/goblin/src/elf/symver.rs
new file mode 100644
index 0000000000..0dd55e3d25
--- /dev/null
+++ b/third_party/rust/goblin/src/elf/symver.rs
@@ -0,0 +1,880 @@
+//! Symbol versioning
+//!
+//! Implementation of the GNU symbol versioning extension according to
+//! [LSB Core Specification - Symbol Versioning][lsb-symver].
+//!
+//! # Examples
+//!
+//! List the dependencies of an ELF file that have [version needed][lsb-verneed] information along
+//! with the versions needed for each dependency.
+//! ```rust
+//! use goblin::error::Error;
+//!
+//! pub fn show_verneed(bytes: &[u8]) -> Result<(), Error> {
+//! let binary = goblin::elf::Elf::parse(&bytes)?;
+//!
+//! if let Some(verneed) = binary.verneed {
+//! for need_file in verneed.iter() {
+//! println!(
+//! "Depend on {:?} with version(s):",
+//! binary.dynstrtab.get_at(need_file.vn_file)
+//! );
+//! for need_ver in need_file.iter() {
+//! println!("{:?}", binary.dynstrtab.get_at(need_ver.vna_name));
+//! }
+//! }
+//! }
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! List the [version defined][lsb-verdef] information of an ELF file, effectively listing the version
+//! defined by this ELF file.
+//! ```rust
+//! use goblin::error::Error;
+//!
+//! pub fn show_verdef(bytes: &[u8]) -> Result<(), Error> {
+//! let binary = goblin::elf::Elf::parse(&bytes)?;
+//!
+//! if let Some(verdef) = &binary.verdef {
+//! for def in verdef.iter() {
+//! for (n, aux) in def.iter().enumerate() {
+//! let name = binary.dynstrtab.get_at(aux.vda_name);
+//! match n {
+//! 0 => print!("Name: {:?}", name),
+//! 1 => print!(" Parent: {:?}", name),
+//! _ => print!(", {:?}", name),
+//! }
+//! }
+//! print!("\n");
+//! }
+//! }
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! [lsb-symver]: https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html
+//! [lsb-verneed]: https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#SYMVERRQMTS
+//! [lsb-verdef]: https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#SYMVERDEFS
+
+use crate::container;
+use crate::elf::section_header::{SectionHeader, SHT_GNU_VERDEF, SHT_GNU_VERNEED, SHT_GNU_VERSYM};
+use crate::error::Result;
+use core::iter::FusedIterator;
+use scroll::Pread;
+
+/******************
+ * ELF Constants *
+ ******************/
+
+// Versym constants.
+
+/// Constant describing a local symbol, see [`Versym::is_local`].
+pub const VER_NDX_LOCAL: u16 = 0;
+/// Constant describing a global symbol, see [`Versym::is_global`].
+pub const VER_NDX_GLOBAL: u16 = 1;
+/// Bitmask to check hidden bit, see [`Versym::is_hidden`].
+pub const VERSYM_HIDDEN: u16 = 0x8000;
+/// Bitmask to get version information, see [`Versym::version`].
+pub const VERSYM_VERSION: u16 = 0x7fff;
+
+// Verdef constants.
+
+/// Bitmask to check `base` flag in [`Verdef::vd_flags`].
+pub const VER_FLG_BASE: u16 = 0x1;
+/// Bitmask to check `weak` flag in [`Verdef::vd_flags`].
+pub const VER_FLG_WEAK: u16 = 0x2;
+/// Bitmask to check `info` flag in [`Verdef::vd_flags`].
+pub const VER_FLG_INFO: u16 = 0x4;
+
+/********************
+ * ELF Structures *
+ ********************/
+
+/// An ELF `Symbol Version` entry.
+///
+/// https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#SYMVERTBL
+#[repr(C)]
+#[derive(Debug, Pread)]
+struct ElfVersym {
+ vs_val: u16,
+}
+
+/// An ELF `Version Definition` entry Elfxx_Verdef.
+///
+/// https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#VERDEFENTRIES
+#[repr(C)]
+#[derive(Debug, Pread)]
+struct ElfVerdef {
+ vd_version: u16,
+ vd_flags: u16,
+ vd_ndx: u16,
+ vd_cnt: u16,
+ vd_hash: u32,
+ vd_aux: u32,
+ vd_next: u32,
+}
+
+/// An ELF `Version Definition Auxiliary` entry Elfxx_Verdaux.
+///
+/// https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#VERDEFEXTS
+#[repr(C)]
+#[derive(Debug, Pread)]
+struct ElfVerdaux {
+ vda_name: u32,
+ vda_next: u32,
+}
+
+/// An ELF `Version Need` entry Elfxx_Verneed.
+///
+/// https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#VERNEEDFIG
+#[repr(C)]
+#[derive(Debug, Pread)]
+struct ElfVerneed {
+ vn_version: u16,
+ vn_cnt: u16,
+ vn_file: u32,
+ vn_aux: u32,
+ vn_next: u32,
+}
+
+/// An ELF `Version Need Auxiliary` entry Elfxx_Vernaux.
+///
+/// https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#VERNEEDEXTFIG
+#[repr(C)]
+#[derive(Debug, Pread)]
+struct ElfVernaux {
+ vna_hash: u32,
+ vna_flags: u16,
+ vna_other: u16,
+ vna_name: u32,
+ vna_next: u32,
+}
+
+/********************
+ * Symbol Version *
+ ********************/
+
+/// Helper struct to iterate over [Symbol Version][Versym] entries.
+#[derive(Debug)]
+pub struct VersymSection<'a> {
+ bytes: &'a [u8],
+ ctx: container::Ctx,
+}
+
+impl<'a> VersymSection<'a> {
+ pub fn parse(
+ bytes: &'a [u8],
+ shdrs: &[SectionHeader],
+ ctx: container::Ctx,
+ ) -> Result<Option<VersymSection<'a>>> {
+ // Get fields needed from optional `symbol version` section.
+ let (offset, size) =
+ if let Some(shdr) = shdrs.iter().find(|shdr| shdr.sh_type == SHT_GNU_VERSYM) {
+ (shdr.sh_offset as usize, shdr.sh_size as usize)
+ } else {
+ return Ok(None);
+ };
+
+ // Get a slice of bytes of the `version definition` section content.
+ let bytes: &'a [u8] = bytes.pread_with(offset, size)?;
+
+ Ok(Some(VersymSection { bytes, ctx }))
+ }
+
+ /// Get an iterator over the [`Versym`] entries.
+ #[inline]
+ pub fn iter(&'a self) -> VersymIter<'a> {
+ self.into_iter()
+ }
+
+ /// True if there are no [`Versym`] entries.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.bytes.is_empty()
+ }
+
+ /// Number of [`Versym`] entries.
+ #[inline]
+ pub fn len(&self) -> usize {
+ let entsize = core::mem::size_of::<ElfVersym>();
+
+ self.bytes.len() / entsize
+ }
+
+ /// Get [`Versym`] entry at index.
+ #[inline]
+ pub fn get_at(&self, idx: usize) -> Option<Versym> {
+ let entsize = core::mem::size_of::<ElfVersym>();
+ let offset = idx.checked_mul(entsize)?;
+
+ self.bytes
+ .pread_with::<ElfVersym>(offset, self.ctx.le)
+ .ok()
+ .map(Versym::from)
+ }
+}
+
+impl<'a> IntoIterator for &'_ VersymSection<'a> {
+ type Item = <VersymIter<'a> as Iterator>::Item;
+ type IntoIter = VersymIter<'a>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ VersymIter {
+ bytes: self.bytes,
+ offset: 0,
+ ctx: self.ctx,
+ }
+ }
+}
+
+/// Iterator over the [`Versym`] entries from the [`SHT_GNU_VERSYM`] section.
+pub struct VersymIter<'a> {
+ bytes: &'a [u8],
+ offset: usize,
+ ctx: container::Ctx,
+}
+
+impl<'a> Iterator for VersymIter<'a> {
+ type Item = Versym;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.offset >= self.bytes.len() {
+ None
+ } else {
+ self.bytes
+ .gread_with::<ElfVersym>(&mut self.offset, self.ctx.le)
+ .ok()
+ .map(Versym::from)
+ .or_else(|| {
+ // self.bytes are not a multiple of ElfVersym.
+ // Adjust offset to continue yielding Nones.
+ self.offset = self.bytes.len();
+ None
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = (self.bytes.len() - self.offset) / core::mem::size_of::<Self::Item>();
+ (len, Some(len))
+ }
+}
+
+impl ExactSizeIterator for VersymIter<'_> {}
+
+impl FusedIterator for VersymIter<'_> {}
+
+/// An ELF [Symbol Version][lsb-versym] entry.
+///
+/// [lsb-versym]: https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#SYMVERTBL
+#[derive(Debug)]
+pub struct Versym {
+ pub vs_val: u16,
+}
+
+impl Versym {
+ /// Returns true if the symbol is local and not available outside the object according to
+ /// [`VER_NDX_LOCAL`].
+ #[inline]
+ pub fn is_local(&self) -> bool {
+ self.vs_val == VER_NDX_LOCAL
+ }
+
+ /// Returns true if the symbol is defined in this object and globally available according
+ /// to [`VER_NDX_GLOBAL`].
+ #[inline]
+ pub fn is_global(&self) -> bool {
+ self.vs_val == VER_NDX_GLOBAL
+ }
+
+ /// Returns true if the `hidden` bit is set according to the [`VERSYM_HIDDEN`] bitmask.
+ #[inline]
+ pub fn is_hidden(&self) -> bool {
+ (self.vs_val & VERSYM_HIDDEN) == VERSYM_HIDDEN
+ }
+
+ /// Returns the symbol version index according to the [`VERSYM_VERSION`] bitmask.
+ #[inline]
+ pub fn version(&self) -> u16 {
+ self.vs_val & VERSYM_VERSION
+ }
+}
+
+impl From<ElfVersym> for Versym {
+ fn from(ElfVersym { vs_val }: ElfVersym) -> Self {
+ Versym { vs_val }
+ }
+}
+
+/************************
+ * Version Definition *
+ ************************/
+
+/// Helper struct to iterate over [Version Definition][Verdef] and [Version Definition
+/// Auxiliary][Verdaux] entries.
+#[derive(Debug)]
+pub struct VerdefSection<'a> {
+ /// String table used to resolve version strings.
+ bytes: &'a [u8],
+ count: usize,
+ ctx: container::Ctx,
+}
+
+impl<'a> VerdefSection<'a> {
+ pub fn parse(
+ bytes: &'a [u8],
+ shdrs: &[SectionHeader],
+ ctx: container::Ctx,
+ ) -> Result<Option<VerdefSection<'a>>> {
+ // Get fields needed from optional `version definition` section.
+ let (offset, size, count) =
+ if let Some(shdr) = shdrs.iter().find(|shdr| shdr.sh_type == SHT_GNU_VERDEF) {
+ (
+ shdr.sh_offset as usize,
+ shdr.sh_size as usize,
+ shdr.sh_info as usize, // Encodes the number of ElfVerdef entries.
+ )
+ } else {
+ return Ok(None);
+ };
+
+ // Get a slice of bytes of the `version definition` section content.
+ let bytes: &'a [u8] = bytes.pread_with(offset, size)?;
+
+ Ok(Some(VerdefSection { bytes, count, ctx }))
+ }
+
+ /// Get an iterator over the [`Verdef`] entries.
+ #[inline]
+ pub fn iter(&'a self) -> VerdefIter<'a> {
+ self.into_iter()
+ }
+}
+
+impl<'a> IntoIterator for &'_ VerdefSection<'a> {
+ type Item = <VerdefIter<'a> as Iterator>::Item;
+ type IntoIter = VerdefIter<'a>;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ VerdefIter {
+ bytes: self.bytes,
+ count: self.count,
+ index: 0,
+ offset: 0,
+ ctx: self.ctx,
+ }
+ }
+}
+
+/// Iterator over the [`Verdef`] entries from the [`SHT_GNU_VERDEF`] section.
+pub struct VerdefIter<'a> {
+ bytes: &'a [u8],
+ count: usize,
+ index: usize,
+ offset: usize,
+ ctx: container::Ctx,
+}
+
+impl<'a> Iterator for VerdefIter<'a> {
+ type Item = Verdef<'a>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.index >= self.count {
+ None
+ } else {
+ self.index += 1;
+
+ let do_next = |iter: &mut Self| {
+ let ElfVerdef {
+ vd_version,
+ vd_flags,
+ vd_ndx,
+ vd_cnt,
+ vd_hash,
+ vd_aux,
+ vd_next,
+ } = iter.bytes.pread_with(iter.offset, iter.ctx.le).ok()?;
+
+ // Validate offset to first ElfVerdaux entry.
+ let offset = iter.offset.checked_add(vd_aux as usize)?;
+
+ // Validate if offset is valid index into bytes slice.
+ if offset >= iter.bytes.len() {
+ return None;
+ }
+
+ // Get a slice of bytes starting with the first ElfVerdaux entry.
+ let bytes: &'a [u8] = &iter.bytes[offset..];
+
+ // Bump the offset to the next ElfVerdef entry.
+ iter.offset = iter.offset.checked_add(vd_next as usize)?;
+
+ // Start yielding None on the next call if there is no next offset.
+ if vd_next == 0 {
+ iter.index = iter.count;
+ }
+
+ Some(Verdef {
+ vd_version,
+ vd_flags,
+ vd_ndx,
+ vd_cnt,
+ vd_hash,
+ vd_aux,
+ vd_next,
+ bytes,
+ ctx: iter.ctx,
+ })
+ };
+
+ do_next(self).or_else(|| {
+ // Adjust current index to count in case of an error.
+ self.index = self.count;
+ None
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.count - self.index;
+ (0, Some(len))
+ }
+}
+
+impl ExactSizeIterator for VerdefIter<'_> {}
+
+impl FusedIterator for VerdefIter<'_> {}
+
+/// An ELF [Version Definition][lsb-verdef] entry .
+///
+/// [lsb-verdef]: https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#VERDEFENTRIES
+#[derive(Debug)]
+pub struct Verdef<'a> {
+ /// Version revision. This field shall be set to 1.
+ pub vd_version: u16,
+ /// Version information flag bitmask.
+ pub vd_flags: u16,
+ /// Version index numeric value referencing the SHT_GNU_versym section.
+ pub vd_ndx: u16,
+ /// Number of associated verdaux array entries.
+ pub vd_cnt: u16,
+ /// Version name hash value (ELF hash function).
+ pub vd_hash: u32,
+ /// Offset in bytes to a corresponding entry in an array of Elfxx_Verdaux structures.
+ pub vd_aux: u32,
+ /// Offset to the next verdef entry, in bytes.
+ pub vd_next: u32,
+
+ bytes: &'a [u8],
+ ctx: container::Ctx,
+}
+
+impl<'a> Verdef<'a> {
+ /// Get an iterator over the [`Verdaux`] entries of this [`Verdef`] entry.
+ #[inline]
+ pub fn iter(&'a self) -> VerdauxIter<'a> {
+ self.into_iter()
+ }
+}
+
+impl<'a> IntoIterator for &'_ Verdef<'a> {
+ type Item = <VerdauxIter<'a> as Iterator>::Item;
+ type IntoIter = VerdauxIter<'a>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ VerdauxIter {
+ bytes: self.bytes,
+ count: self.vd_cnt,
+ index: 0,
+ offset: 0,
+ ctx: self.ctx,
+ }
+ }
+}
+
+/// Iterator over the [`Verdaux`] entries for an specific [`Verdef`] entry.
+pub struct VerdauxIter<'a> {
+ bytes: &'a [u8],
+ count: u16,
+ index: u16,
+ offset: usize,
+ ctx: container::Ctx,
+}
+
+impl<'a> Iterator for VerdauxIter<'a> {
+ type Item = Verdaux;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.index >= self.count {
+ None
+ } else {
+ self.index += 1;
+
+ let do_next = |iter: &mut Self| {
+ let ElfVerdaux { vda_name, vda_next } =
+ iter.bytes.pread_with(iter.offset, iter.ctx.le).ok()?;
+
+ // Bump the offset to the next ElfVerdaux entry.
+ iter.offset = iter.offset.checked_add(vda_next as usize)?;
+
+ // Start yielding None on the next call if there is no next offset.
+ if vda_next == 0 {
+ iter.index = iter.count;
+ }
+
+ Some(Verdaux {
+ vda_name: vda_name as usize,
+ vda_next,
+ })
+ };
+
+ do_next(self).or_else(|| {
+ // Adjust current index to count in case of an error.
+ self.index = self.count;
+ None
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = usize::from(self.count - self.index);
+ (0, Some(len))
+ }
+}
+
+impl ExactSizeIterator for VerdauxIter<'_> {}
+
+impl FusedIterator for VerdauxIter<'_> {}
+
+/// An ELF [Version Definition Auxiliary][lsb-verdaux] entry.
+///
+/// [lsb-verdaux]: https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#VERDEFEXTS
+#[derive(Debug)]
+pub struct Verdaux {
+ /// Offset to the version or dependency name string in the section header, in bytes.
+ pub vda_name: usize,
+ /// Offset to the next verdaux entry, in bytes.
+ pub vda_next: u32,
+}
+
+/**************************
+ * Version Requirements *
+ **************************/
+
+/// Helper struct to iterate over [Version Needed][Verneed] and [Version Needed
+/// Auxiliary][Vernaux] entries.
+#[derive(Debug)]
+pub struct VerneedSection<'a> {
+ bytes: &'a [u8],
+ count: usize,
+ ctx: container::Ctx,
+}
+
+impl<'a> VerneedSection<'a> {
+ /// Try to parse the optional [`SHT_GNU_VERNEED`] section.
+ pub fn parse(
+ bytes: &'a [u8],
+ shdrs: &[SectionHeader],
+ ctx: container::Ctx,
+ ) -> Result<Option<VerneedSection<'a>>> {
+ // Get fields needed from optional `version needed` section.
+ let (offset, size, count) =
+ if let Some(shdr) = shdrs.iter().find(|shdr| shdr.sh_type == SHT_GNU_VERNEED) {
+ (
+ shdr.sh_offset as usize,
+ shdr.sh_size as usize,
+ shdr.sh_info as usize, // Encodes the number of ElfVerneed entries.
+ )
+ } else {
+ return Ok(None);
+ };
+
+ // Get a slice of bytes of the `version needed` section content.
+ let bytes: &'a [u8] = bytes.pread_with(offset, size)?;
+
+ Ok(Some(VerneedSection { bytes, count, ctx }))
+ }
+
+ /// Get an iterator over the [`Verneed`] entries.
+ #[inline]
+ pub fn iter(&'a self) -> VerneedIter<'a> {
+ self.into_iter()
+ }
+}
+
+impl<'a> IntoIterator for &'_ VerneedSection<'a> {
+ type Item = <VerneedIter<'a> as Iterator>::Item;
+ type IntoIter = VerneedIter<'a>;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ VerneedIter {
+ bytes: self.bytes,
+ count: self.count,
+ index: 0,
+ offset: 0,
+ ctx: self.ctx,
+ }
+ }
+}
+
+/// Iterator over the [`Verneed`] entries from the [`SHT_GNU_VERNEED`] section.
+pub struct VerneedIter<'a> {
+ bytes: &'a [u8],
+ count: usize,
+ index: usize,
+ offset: usize,
+ ctx: container::Ctx,
+}
+
+impl<'a> Iterator for VerneedIter<'a> {
+ type Item = Verneed<'a>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.index >= self.count {
+ None
+ } else {
+ self.index += 1;
+
+ let do_next = |iter: &mut Self| {
+ let ElfVerneed {
+ vn_version,
+ vn_cnt,
+ vn_file,
+ vn_aux,
+ vn_next,
+ } = iter.bytes.pread_with(iter.offset, iter.ctx.le).ok()?;
+
+ // Validate offset to first ElfVernaux entry.
+ let offset = iter.offset.checked_add(vn_aux as usize)?;
+
+ // Validate if offset is valid index into bytes slice.
+ if offset >= iter.bytes.len() {
+ return None;
+ }
+
+ // Get a slice of bytes starting with the first ElfVernaux entry.
+ let bytes: &'a [u8] = &iter.bytes[offset..];
+
+ // Bump the offset to the next ElfVerneed entry.
+ iter.offset = iter.offset.checked_add(vn_next as usize)?;
+
+ // Start yielding None on the next call if there is no next offset.
+ if vn_next == 0 {
+ iter.index = iter.count;
+ }
+
+ Some(Verneed {
+ vn_version,
+ vn_cnt,
+ vn_file: vn_file as usize,
+ vn_aux,
+ vn_next,
+ bytes,
+ ctx: iter.ctx,
+ })
+ };
+
+ do_next(self).or_else(|| {
+ // Adjust current index to count in case of an error.
+ self.index = self.count;
+ None
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.count - self.index;
+ (0, Some(len))
+ }
+}
+
+impl ExactSizeIterator for VerneedIter<'_> {}
+
+impl FusedIterator for VerneedIter<'_> {}
+
+/// An ELF [Version Need][lsb-verneed] entry .
+///
+/// [lsb-verneed]: https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#VERNEEDFIG
+#[derive(Debug)]
+pub struct Verneed<'a> {
+ /// Version of structure. This value is currently set to 1, and will be reset if the versioning
+ /// implementation is incompatibly altered.
+ pub vn_version: u16,
+ /// Number of associated verneed array entries.
+ pub vn_cnt: u16,
+ /// Offset to the file name string in the section header, in bytes.
+ pub vn_file: usize,
+ /// Offset to a corresponding entry in the vernaux array, in bytes.
+ pub vn_aux: u32,
+ /// Offset to the next verneed entry, in bytes.
+ pub vn_next: u32,
+
+ bytes: &'a [u8],
+ ctx: container::Ctx,
+}
+
+impl<'a> Verneed<'a> {
+ /// Get an iterator over the [`Vernaux`] entries of this [`Verneed`] entry.
+ #[inline]
+ pub fn iter(&'a self) -> VernauxIter<'a> {
+ self.into_iter()
+ }
+}
+
+impl<'a> IntoIterator for &'_ Verneed<'a> {
+ type Item = <VernauxIter<'a> as Iterator>::Item;
+ type IntoIter = VernauxIter<'a>;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ VernauxIter {
+ bytes: self.bytes,
+ count: self.vn_cnt,
+ index: 0,
+ offset: 0,
+ ctx: self.ctx,
+ }
+ }
+}
+
+/// Iterator over the [`Vernaux`] entries for an specific [`Verneed`] entry.
+pub struct VernauxIter<'a> {
+ bytes: &'a [u8],
+ count: u16,
+ index: u16,
+ offset: usize,
+ ctx: container::Ctx,
+}
+
+impl<'a> Iterator for VernauxIter<'a> {
+ type Item = Vernaux;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.index >= self.count {
+ None
+ } else {
+ self.index += 1;
+
+ let do_next = |iter: &mut Self| {
+ let ElfVernaux {
+ vna_hash,
+ vna_flags,
+ vna_other,
+ vna_name,
+ vna_next,
+ } = iter.bytes.pread_with(iter.offset, iter.ctx.le).ok()?;
+
+ // Bump the offset to the next ElfVernaux entry.
+ iter.offset = iter.offset.checked_add(vna_next as usize)?;
+
+ // Start yielding None on the next call if there is no next offset.
+ if vna_next == 0 {
+ iter.index = iter.count;
+ }
+
+ Some(Vernaux {
+ vna_hash,
+ vna_flags,
+ vna_other,
+ vna_name: vna_name as usize,
+ vna_next,
+ })
+ };
+
+ do_next(self).or_else(|| {
+ // Adjust current index to count in case of an error.
+ self.index = self.count;
+ None
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = usize::from(self.count - self.index);
+ (0, Some(len))
+ }
+}
+
+impl ExactSizeIterator for VernauxIter<'_> {}
+
+impl FusedIterator for VernauxIter<'_> {}
+
+/// An ELF [Version Need Auxiliary][lsb-vernaux] entry.
+///
+/// [lsb-vernaux]: https://refspecs.linuxbase.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/symversion.html#VERNEEDEXTFIG
+#[derive(Debug)]
+pub struct Vernaux {
+ /// Dependency name hash value (ELF hash function).
+ pub vna_hash: u32,
+ /// Dependency information flag bitmask.
+ pub vna_flags: u16,
+ /// Object file version identifier used in the .gnu.version symbol version array. Bit number 15
+ /// controls whether or not the object is hidden; if this bit is set, the object cannot be used
+ /// and the static linker will ignore the symbol's presence in the object.
+ pub vna_other: u16,
+ /// Offset to the dependency name string in the section header, in bytes.
+ pub vna_name: usize,
+ /// Offset to the next vernaux entry, in bytes.
+ pub vna_next: u32,
+}
+
+#[cfg(test)]
+mod test {
+ use super::{ElfVerdaux, ElfVerdef, ElfVernaux, ElfVerneed, ElfVersym};
+ use super::{Versym, VERSYM_HIDDEN, VER_NDX_GLOBAL, VER_NDX_LOCAL};
+ use core::mem::size_of;
+
+ #[test]
+ fn check_size() {
+ assert_eq!(2, size_of::<ElfVersym>());
+ assert_eq!(20, size_of::<ElfVerdef>());
+ assert_eq!(8, size_of::<ElfVerdaux>());
+ assert_eq!(16, size_of::<ElfVerneed>());
+ assert_eq!(16, size_of::<ElfVernaux>());
+ }
+
+ #[test]
+ fn check_versym() {
+ let local = Versym {
+ vs_val: VER_NDX_LOCAL,
+ };
+ assert_eq!(true, local.is_local());
+ assert_eq!(false, local.is_global());
+ assert_eq!(false, local.is_hidden());
+ assert_eq!(VER_NDX_LOCAL, local.version());
+
+ let global = Versym {
+ vs_val: VER_NDX_GLOBAL,
+ };
+ assert_eq!(false, global.is_local());
+ assert_eq!(true, global.is_global());
+ assert_eq!(false, global.is_hidden());
+ assert_eq!(VER_NDX_GLOBAL, global.version());
+
+ let hidden = Versym {
+ vs_val: VERSYM_HIDDEN,
+ };
+ assert_eq!(false, hidden.is_local());
+ assert_eq!(false, hidden.is_global());
+ assert_eq!(true, hidden.is_hidden());
+ assert_eq!(0, hidden.version());
+
+ let hidden = Versym {
+ vs_val: VERSYM_HIDDEN | 0x123,
+ };
+ assert_eq!(false, hidden.is_local());
+ assert_eq!(false, hidden.is_global());
+ assert_eq!(true, hidden.is_hidden());
+ assert_eq!(0x123, hidden.version());
+ }
+}