1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
|
use crate::errors::AndroidError;
use crate::maps_reader::MappingInfo;
use crate::ptrace_dumper::PtraceDumper;
use crate::thread_info::Pid;
use goblin::elf;
#[cfg(target_pointer_width = "32")]
use goblin::elf::dynamic::dyn32::{Dyn, SIZEOF_DYN};
#[cfg(target_pointer_width = "64")]
use goblin::elf::dynamic::dyn64::{Dyn, SIZEOF_DYN};
#[cfg(target_pointer_width = "32")]
use goblin::elf::header::header32 as elf_header;
#[cfg(target_pointer_width = "64")]
use goblin::elf::header::header64 as elf_header;
#[cfg(target_pointer_width = "32")]
use goblin::elf::program_header::program_header32::ProgramHeader;
#[cfg(target_pointer_width = "64")]
use goblin::elf::program_header::program_header64::ProgramHeader;
use std::ffi::c_void;
type Result<T> = std::result::Result<T, AndroidError>;
// From /usr/include/elf.h of the android SDK
// #define DT_ANDROID_REL (DT_LOOS + 2)
// #define DT_ANDROID_RELSZ (DT_LOOS + 3)
// #define DT_ANDROID_RELA (DT_LOOS + 4)
// #define DT_ANDROID_RELASZ (DT_LOOS + 5)
#[cfg(target_pointer_width = "64")]
const DT_ANDROID_REL: u64 = elf::dynamic::DT_LOOS + 2;
#[cfg(target_pointer_width = "64")]
const DT_ANDROID_RELA: u64 = elf::dynamic::DT_LOOS + 4;
#[cfg(target_pointer_width = "32")]
const DT_ANDROID_REL: u32 = (elf::dynamic::DT_LOOS + 2) as u32;
#[cfg(target_pointer_width = "32")]
const DT_ANDROID_RELA: u32 = (elf::dynamic::DT_LOOS + 4) as u32;
struct DynVaddresses {
min_vaddr: usize,
dyn_vaddr: usize,
dyn_count: usize,
}
fn has_android_packed_relocations(pid: Pid, load_bias: usize, vaddrs: DynVaddresses) -> Result<()> {
let dyn_addr = load_bias + vaddrs.dyn_vaddr;
for idx in 0..vaddrs.dyn_count {
let addr = (dyn_addr + SIZEOF_DYN * idx) as *mut c_void;
let dyn_data = PtraceDumper::copy_from_process(pid, addr, SIZEOF_DYN)?;
// TODO: Couldn't find a nice way to use goblin for that, to avoid the unsafe-block
let dyn_obj: Dyn;
unsafe {
dyn_obj = std::mem::transmute::<[u8; SIZEOF_DYN], Dyn>(dyn_data.as_slice().try_into()?);
}
if dyn_obj.d_tag == DT_ANDROID_REL || dyn_obj.d_tag == DT_ANDROID_RELA {
return Ok(());
}
}
Err(AndroidError::NoRelFound)
}
fn get_effective_load_bias(pid: Pid, ehdr: &elf_header::Header, address: usize) -> usize {
let ph = parse_loaded_elf_program_headers(pid, ehdr, address);
// If |min_vaddr| is non-zero and we find Android packed relocation tags,
// return the effective load bias.
if ph.min_vaddr != 0 {
let load_bias = address - ph.min_vaddr;
if has_android_packed_relocations(pid, load_bias, ph).is_ok() {
return load_bias;
}
}
// Either |min_vaddr| is zero, or it is non-zero but we did not find the
// expected Android packed relocations tags.
address
}
fn parse_loaded_elf_program_headers(
pid: Pid,
ehdr: &elf_header::Header,
address: usize,
) -> DynVaddresses {
let phdr_addr = address + ehdr.e_phoff as usize;
let mut min_vaddr = usize::MAX;
let mut dyn_vaddr = 0;
let mut dyn_count = 0;
let phdr_opt = PtraceDumper::copy_from_process(
pid,
phdr_addr as *mut c_void,
elf_header::SIZEOF_EHDR * ehdr.e_phnum as usize,
);
if let Ok(ph_data) = phdr_opt {
// TODO: The original C code doesn't have error-handling here at all.
// We silently ignore "not parsable" for now, but might bubble it up.
// TODO2: `from_bytes` might panic, `parse()` would return a Result<>, so maybe better
// to switch to that at some point.
for phdr in ProgramHeader::from_bytes(&ph_data, ehdr.e_phnum as usize) {
let p_vaddr = phdr.p_vaddr as usize;
if phdr.p_type == elf::program_header::PT_LOAD && p_vaddr < min_vaddr {
min_vaddr = p_vaddr;
}
if phdr.p_type == elf::program_header::PT_DYNAMIC {
dyn_vaddr = p_vaddr;
dyn_count = phdr.p_memsz as usize / SIZEOF_DYN;
}
}
}
DynVaddresses {
min_vaddr,
dyn_vaddr,
dyn_count,
}
}
pub fn late_process_mappings(pid: Pid, mappings: &mut [MappingInfo]) -> Result<()> {
// Only consider exec mappings that indicate a file path was mapped, and
// where the ELF header indicates a mapped shared library.
for mut map in mappings
.iter_mut()
.filter(|m| m.executable && m.name.as_ref().map_or(false, |n| n.starts_with("/")))
{
let ehdr_opt = PtraceDumper::copy_from_process(
pid,
map.start_address as *mut c_void,
elf_header::SIZEOF_EHDR,
)
.ok()
.and_then(|x| elf_header::Header::parse(&x).ok());
if let Some(ehdr) = ehdr_opt {
if ehdr.e_type == elf_header::ET_DYN {
// Compute the effective load bias for this mapped library, and update
// the mapping to hold that rather than |start_addr|, at the same time
// adjusting |size| to account for the change in |start_addr|. Where
// the library does not contain Android packed relocations,
// GetEffectiveLoadBias() returns |start_addr| and the mapping entry
// is not changed.
let load_bias = get_effective_load_bias(pid, &ehdr, map.start_address);
map.size += map.start_address - load_bias;
map.start_address = load_bias;
}
}
}
Ok(())
}
|