ELF动态加载器的核心工作流程可分为以下阶段:
技术要点:
设计模式:
关键技术:
优化原理:
关键机制:
安全特性:
安全机制:
实现类dlopen接口,支持符号延迟绑定
通过结合Rust的内存安全特性和ELF加载机制,可以构建既安全又高效的动态加载系统。该设计模式已应用于华为鸿蒙内核、Cloudflare Workers等场景。
关键机制:
技术要点:
关键技术:
汇编级实现要点:
注意事项:
调试技巧:
安全机制:
关键技术:
核心机制:
关键特性:
实现要点:
自举过程:
协作机制:
安全验证:
这些补充实现覆盖了ELF动态加载器开发中的深层技术细节
实际开发时还需注意:
建议结合Rust的FFI能力和C ABI的精确控制,在保证内存安全的同时实现完整的ELF动态加载规范
use goblin::elf::Elf;
struct
DynamicLoader {
elf: Elf<'
static
>,
base_addr: usize,
dependencies: Vec<Arc<DynamicLoader>>,
}
impl DynamicLoader {
pub fn load(path: &str) -> Result<Self> {
let data = fs::read(path)?;
let elf = Elf::parse(&data)?;
let base_addr = calculate_base_address(&elf);
Ok(Self { elf, base_addr, dependencies: vec![] })
}
}
use goblin::elf::Elf;
struct
DynamicLoader {
elf: Elf<'
static
>,
base_addr: usize,
dependencies: Vec<Arc<DynamicLoader>>,
}
impl DynamicLoader {
pub fn load(path: &str) -> Result<Self> {
let data = fs::read(path)?;
let elf = Elf::parse(&data)?;
let base_addr = calculate_base_address(&elf);
Ok(Self { elf, base_addr, dependencies: vec![] })
}
}
trait MemoryMapper {
fn map(&self, va: usize, size: usize, prot: Protection) -> Result<*mut u8>;
}
struct
LinuxMapper;
impl MemoryMapper
for
LinuxMapper {
fn map(&self, va: usize, size: usize, prot: Protection) -> Result<*mut u8> {
let ptr = unsafe {
libc::mmap(
va as *mut libc::c_void,
size,
prot.to_unix_flags(),
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
-1,
0
)
};
}
}
trait MemoryMapper {
fn map(&self, va: usize, size: usize, prot: Protection) -> Result<*mut u8>;
}
struct
LinuxMapper;
impl MemoryMapper
for
LinuxMapper {
fn map(&self, va: usize, size: usize, prot: Protection) -> Result<*mut u8> {
let ptr = unsafe {
libc::mmap(
va as *mut libc::c_void,
size,
prot.to_unix_flags(),
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
-1,
0
)
};
}
}
fn apply_relocations(&self) -> Result<()> {
for
reloc in &self.elf.dynrelas {
let sym = &self.elf.dynsyms[reloc.r_sym];
let target_addr = match sym.st_shndx {
SHN_UNDEF => self.resolve_symbol(&sym.name)?,
_ => self.base_addr + sym.st_value as usize,
};
let reloc_addr = self.base_addr + reloc.r_offset as usize;
unsafe {
match reloc.r_type {
R_X86_64_JUMP_SLOT => {
*(reloc_addr as *mut u64) = target_addr as u64;
}
}
}
}
Ok(())
}
fn apply_relocations(&self) -> Result<()> {
for
reloc in &self.elf.dynrelas {
let sym = &self.elf.dynsyms[reloc.r_sym];
let target_addr = match sym.st_shndx {
SHN_UNDEF => self.resolve_symbol(&sym.name)?,
_ => self.base_addr + sym.st_value as usize,
};
let reloc_addr = self.base_addr + reloc.r_offset as usize;
unsafe {
match reloc.r_type {
R_X86_64_JUMP_SLOT => {
*(reloc_addr as *mut u64) = target_addr as u64;
}
}
}
}
Ok(())
}
struct
PLTEntry {
code: [u8; 16],
got_entry: *mut u64,
}
impl PLTEntry {
fn
new
() -> Self {
Self {
code: [
0xFF, 0x25, 0x00, 0x00, 0x00, 0x00,
0x68, 0x00, 0x00, 0x00, 0x00,
0xE9, 0x00, 0x00, 0x00, 0x00,
],
got_entry: null_mut(),
}
}
}
struct
PLTEntry {
code: [u8; 16],
got_entry: *mut u64,
}
impl PLTEntry {
fn
new
() -> Self {
Self {
code: [
0xFF, 0x25, 0x00, 0x00, 0x00, 0x00,
0x68, 0x00, 0x00, 0x00, 0x00,
0xE9, 0x00, 0x00, 0x00, 0x00,
],
got_entry: null_mut(),
}
}
}
struct
TlsBlock {
module_id: u64,
data: Vec<u8>,
}
impl DynamicLoader {
fn init_tls(&mut self) -> Result<()> {
let tls_phdr = self.elf.program_headers.iter()
.find(|ph| ph.p_type == PT_TLS)
.context(
"No TLS segment"
)?;
let mut tls_data = vec![0u8; tls_phdr.p_memsz as usize];
tls_data[..tls_phdr.p_filesz as usize]
.copy_from_slice(&self.elf.data[tls_phdr.p_offset as usize..]);
let module_id = allocate_tls_module();
self.tls = Some(TlsBlock { module_id, data: tls_data });
Ok(())
}
}
struct
TlsBlock {
module_id: u64,
data: Vec<u8>,
}
impl DynamicLoader {
fn init_tls(&mut self) -> Result<()> {
let tls_phdr = self.elf.program_headers.iter()
.find(|ph| ph.p_type == PT_TLS)
.context(
"No TLS segment"
)?;
let mut tls_data = vec![0u8; tls_phdr.p_memsz as usize];
tls_data[..tls_phdr.p_filesz as usize]
.copy_from_slice(&self.elf.data[tls_phdr.p_offset as usize..]);
let module_id = allocate_tls_module();
self.tls = Some(TlsBlock { module_id, data: tls_data });
Ok(())
}
}
struct
LoadedSegment {
ptr: NonNull<u8>,
size: usize,
_marker: PhantomData<[u8]>,
}
impl Drop
for
LoadedSegment {
fn drop(&mut self) {
unsafe {
libc::munmap(self.ptr.as_ptr() as _, self.size);
}
}
}
struct
LoadedSegment {
ptr: NonNull<u8>,
size: usize,
_marker: PhantomData<[u8]>,
}
impl Drop
for
LoadedSegment {
fn drop(&mut self) {
unsafe {
libc::munmap(self.ptr.as_ptr() as _, self.size);
}
}
}
fn enable_relro(&self) -> Result<()> {
let (start, size) = calculate_relro_range(&self.elf);
unsafe {
libc::mprotect(
start as *mut libc::c_void,
size,
libc::PROT_READ
)?;
}
Ok(())
}
fn enable_relro(&self) -> Result<()> {
let (start, size) = calculate_relro_range(&self.elf);
unsafe {
libc::mprotect(
start as *mut libc::c_void,
size,
libc::PROT_READ
)?;
}
Ok(())
}
use packed_simd::u64x4;
fn batch_relocate(addrs: &mut [u64], bases: u64x4) {
for
chunk in addrs.chunks_exact_mut(4) {
let vals = u64x4::from_slice_unaligned(chunk);
(vals + bases).write_to_slice_unaligned(chunk);
}
}
use packed_simd::u64x4;
fn batch_relocate(addrs: &mut [u64], bases: u64x4) {
for
chunk in addrs.chunks_exact_mut(4) {
let vals = u64x4::from_slice_unaligned(chunk);
(vals + bases).write_to_slice_unaligned(chunk);
}
}
let lib = unsafe { Dylib::open(
"libc.so.6"
)? };
let
puts
:
extern
fn(*
const
c_char) = lib.sym(
"puts"
)?;
puts
(b
"Hello Rust Dynamic Loader!\0"
.as_ptr() as _);
let lib = unsafe { Dylib::open(
"libc.so.6"
)? };
let
puts
:
extern
fn(*
const
c_char) = lib.sym(
"puts"
)?;
puts
(b
"Hello Rust Dynamic Loader!\0"
.as_ptr() as _);
use goblin::elf::program_header::{PT_DYNAMIC, DT_NEEDED};
fn parse_dynamic(elf: &Elf) -> Result<Vec<String>> {
let dynamic_phdr = elf.program_headers
.iter()
.find(|ph| ph.p_type == PT_DYNAMIC)
.context(
"Missing dynamic segment"
)?;
let dynamic_data = &elf.data[dynamic_phdr.p_offset as usize..];
let mut deps = Vec::
new
();
let mut i = 0;
while
i < dynamic_data.len() {
let tag = unsafe {
*(dynamic_data.as_ptr().add(i) as u64
};
let val = unsafe {
*(dynamic_data.as_ptr().add(i + 8) as u64
};
match tag {
DT_NEEDED => {
let str_offset = elf.dynstrtab.get(val as usize)
.context(
"Invalid string offset"
)?;
deps.push(str_offset.to_string());
}
}
i += 16;
}
Ok(deps)
}
fn load_dependencies(&mut self) -> Result<()> {
for
dep in &self.dependencies {
let child = DynamicLoader::load(dep)?;
self.dependencies.push(Arc::
new
(child));
}
Ok(())
}
use goblin::elf::program_header::{PT_DYNAMIC, DT_NEEDED};
fn parse_dynamic(elf: &Elf) -> Result<Vec<String>> {
let dynamic_phdr = elf.program_headers
.iter()
.find(|ph| ph.p_type == PT_DYNAMIC)
.context(
"Missing dynamic segment"
)?;
let dynamic_data = &elf.data[dynamic_phdr.p_offset as usize..];
let mut deps = Vec::
new
();
let mut i = 0;
while
i < dynamic_data.len() {
let tag = unsafe {
*(dynamic_data.as_ptr().add(i) as u64
};
let val = unsafe {
*(dynamic_data.as_ptr().add(i + 8) as u64
};
match tag {
DT_NEEDED => {
let str_offset = elf.dynstrtab.get(val as usize)
.context(
"Invalid string offset"
)?;
deps.push(str_offset.to_string());
}
}
i += 16;
}
Ok(deps)
}
fn load_dependencies(&mut self) -> Result<()> {
for
dep in &self.dependencies {
let child = DynamicLoader::load(dep)?;
self.dependencies.push(Arc::
new
(child));
}
Ok(())
}
#[derive(Debug)]
struct
VersionedSymbol {
name: String,
version: Option<String>,
hash: u32,
}
fn parse_version_requirements(&self) -> Result<Vec<VersionedSymbol>> {
let versym = self.elf.sections
.get_by_name(
".gnu.version_r"
)
.context(
"No version section"
)?;
let data = versym.data();
let mut symbols = Vec::
new
();
let mut offset = 0;
while
offset < data.len() {
let vna_flags = u16::from_le_bytes([data[offset], data[offset+1]]);
let vna_ndx = u16::from_le_bytes([data[offset+2], data[offset+3]]);
let vna_hash = u32::from_le_bytes([
data[offset+4], data[offset+5],
data[offset+6], data[offset+7]
]);
let name_ptr = offset + 8;
let name = CStr::from_bytes_until_nul(&data[name_ptr..])?
.to_str()?;
symbols.push(VersionedSymbol {
name: name.to_string(),
version: None,
hash: vna_hash,
});
offset += name.len() + 9;
}
Ok(symbols)
}
fn find_symbol_with_version(&self, name: &str, ver: &str) -> Result<usize> {
for
lib in &self.dependencies {
if
let Some(sym) = lib.exported_symbols.iter()
.find(|s| s.name == name && s.version.as_deref() == Some(ver))
{
return
Ok(sym.value);
}
}
bail!(
"Symbol {}({}) not found"
, name, ver)
}
#[derive(Debug)]
struct
VersionedSymbol {
name: String,
version: Option<String>,
hash: u32,
}
fn parse_version_requirements(&self) -> Result<Vec<VersionedSymbol>> {
let versym = self.elf.sections
.get_by_name(
".gnu.version_r"
)
.context(
"No version section"
)?;
let data = versym.data();
let mut symbols = Vec::
new
();
let mut offset = 0;
while
offset < data.len() {
let vna_flags = u16::from_le_bytes([data[offset], data[offset+1]]);
let vna_ndx = u16::from_le_bytes([data[offset+2], data[offset+3]]);
let vna_hash = u32::from_le_bytes([
data[offset+4], data[offset+5],
data[offset+6], data[offset+7]
]);
let name_ptr = offset + 8;
let name = CStr::from_bytes_until_nul(&data[name_ptr..])?
.to_str()?;
symbols.push(VersionedSymbol {
name: name.to_string(),
version: None,
hash: vna_hash,
});
offset += name.len() + 9;
}
Ok(symbols)
}
fn find_symbol_with_version(&self, name: &str, ver: &str) -> Result<usize> {
for
lib in &self.dependencies {
if
let Some(sym) = lib.exported_symbols.iter()
.find(|s| s.name == name && s.version.as_deref() == Some(ver))
{
return
Ok(sym.value);
}
}
bail!(
"Symbol {}({}) not found"
, name, ver)
}
fn register_eh_frame(&self) -> Result<()> {
let eh_frame = self.elf.sections
.get_by_name(
".eh_frame"
)
.context(
"No EH frame"
)?;
let eh_frame_ptr = self.base_addr + eh_frame.sh_addr as usize;
unsafe {
libc::__register_frame(eh_frame_ptr as *mut u8);
}
Ok(())
}
impl Drop
for
DynamicLoader {
fn drop(&mut self) {
if
let Some(eh_frame) = self.elf.sections.get_by_name(
".eh_frame"
) {
let ptr = self.base_addr + eh_frame.sh_addr as usize;
unsafe {
libc::__deregister_frame(ptr as *mut u8);
}
}
}
}
fn register_eh_frame(&self) -> Result<()> {
let eh_frame = self.elf.sections
.get_by_name(
".eh_frame"
)
.context(
"No EH frame"
)?;
let eh_frame_ptr = self.base_addr + eh_frame.sh_addr as usize;
unsafe {
libc::__register_frame(eh_frame_ptr as *mut u8);
}
Ok(())
}
impl Drop
for
DynamicLoader {
fn drop(&mut self) {
if
let Some(eh_frame) = self.elf.sections.get_by_name(
".eh_frame"
) {
let ptr = self.base_addr + eh_frame.sh_addr as usize;
unsafe {
libc::__deregister_frame(ptr as *mut u8);
}
}
}
}
struct
LazyBindingState {
plt_got: HashMap<usize, SymbolResolver>,
resolver_fn: fn(*mut u8) -> u64,
}
impl LazyBindingState {
fn
new
() -> Self {
Self {
plt_got: HashMap::
new
(),
resolver_fn: default_resolver,
}
}
extern
"C"
fn lazy_resolver_stub(args: *mut u8) -> u64 {
let resolver = unsafe {
&mut *(args.offset(-16) as *mut LazyBindingState)
};
let index = unsafe { *(args.offset(-8) as *
const
u32) };
resolver.resolve_symbol(index)
}
fn resolve_symbol(&mut self, index: u32) -> u64 {
}
}
struct
LazyBindingState {
plt_got: HashMap<usize, SymbolResolver>,
resolver_fn: fn(*mut u8) -> u64,
}
impl LazyBindingState {
fn
new
() -> Self {
Self {
plt_got: HashMap::
new
(),
resolver_fn: default_resolver,
}
}
extern
"C"
fn lazy_resolver_stub(args: *mut u8) -> u64 {
let resolver = unsafe {
&mut *(args.offset(-16) as *mut LazyBindingState)
};
let index = unsafe { *(args.offset(-8) as *
const
u32) };
resolver.resolve_symbol(index)
}
fn resolve_symbol(&mut self, index: u32) -> u64 {
}
}
#[no_mangle]
unsafe
extern
"C"
fn __libc_dlopen_mode(
filename: *
const
c_char,
mode: c_int
) -> *mut c_void {
let custom_handle = my_loader.load(filename);
hijack_original_symbols();
custom_handle as *mut c_void
}
fn inject_via_preload() {
use std::os::unix::process::CommandExt;
Command::
new
(
"ls"
)
.env(
"LD_PRELOAD"
,
"./my_loader.so"
)
.exec();
}
#[no_mangle]
unsafe
extern
"C"
fn __libc_dlopen_mode(
filename: *
const
c_char,
mode: c_int
) -> *mut c_void {
let custom_handle = my_loader.load(filename);
hijack_original_symbols();
custom_handle as *mut c_void
}
fn inject_via_preload() {
use std::os::unix::process::CommandExt;
Command::
new
(
"ls"
)
.env(
"LD_PRELOAD"
,
"./my_loader.so"
)
.exec();
}
fn generate_loadmap(&self) -> String {
let mut map = String::
new
();
for
phdr in &self.elf.program_headers {
if
phdr.p_type == PT_LOAD {
writeln!(&mut map,
"{:x}-{:x} {}{}{}"
,
self.base_addr + phdr.p_vaddr as usize,
self.base_addr + phdr.p_vaddr as usize + phdr.p_memsz as usize,
if
phdr.p_flags & PF_R != 0 {
'r'
}
else
{
'-'
},
if
phdr.p_flags & PF_W != 0 {
'w'
}
else
{
'-'
},
if
phdr.p_flags & PF_X != 0 {
'x'
}
else
{
'-'
},
).unwrap();
}
}
map
}
fn register_gdb_helpers() {
println!(
"add-symbol-file {} 0x{:x}"
,
self.elf_path,
self.base_addr + self.elf.header.e_entry as usize
);
}
fn generate_loadmap(&self) -> String {
let mut map = String::
new
();
for
phdr in &self.elf.program_headers {
if
phdr.p_type == PT_LOAD {
writeln!(&mut map,
"{:x}-{:x} {}{}{}"
,
self.base_addr + phdr.p_vaddr as usize,
self.base_addr + phdr.p_vaddr as usize + phdr.p_memsz as usize,
if
phdr.p_flags & PF_R != 0 {
'r'
}
else
{
'-'
},
if
phdr.p_flags & PF_W != 0 {
'w'
}
else
{
'-'
},
if
phdr.p_flags & PF_X != 0 {
'x'
}
else
{
'-'
},
).unwrap();
}
}
map
}
fn register_gdb_helpers() {
println!(
"add-symbol-file {} 0x{:x}"
,
self.elf_path,
self.base_addr + self.elf.header.e_entry as usize
);
}
struct
LoadPolicy {
allowed_paths: Vec<PathBuf>,
max_segments: usize,
}
impl LoadPolicy {
fn validate(&self, elf: &Elf) -> Result<()> {
if
elf.program_headers.len() > self.max_segments {
bail!(
"Too many segments"
);
}
for
dep in &parse_dynamic(elf)? {
if
!self.allowed_paths.iter()
.any(|p| p.as_path() == Path::
new
(dep))
{
bail!(
"Forbidden dependency: {}"
, dep);
}
}
Ok(())
}
}
fn apply_syscall_filters() {
let mut ctx = seccomp::Context::
default
()
.allow(syscall::Sysno::
exit
)
.allow(syscall::Sysno::read)
.allow(syscall::Sysno::write);
ctx.load().unwrap();
}
struct
LoadPolicy {
allowed_paths: Vec<PathBuf>,
max_segments: usize,
}
impl LoadPolicy {
fn validate(&self, elf: &Elf) -> Result<()> {
if
elf.program_headers.len() > self.max_segments {
bail!(
"Too many segments"
);
}
for
dep in &parse_dynamic(elf)? {
if
!self.allowed_paths.iter()
.any(|p| p.as_path() == Path::
new
(dep))
{
bail!(
"Forbidden dependency: {}"
, dep);
}
}
Ok(())
}
}
fn apply_syscall_filters() {
let mut ctx = seccomp::Context::
default
()
.allow(syscall::Sysno::
exit
)
.allow(syscall::Sysno::read)
.allow(syscall::Sysno::write);
ctx.load().unwrap();
}
fn execute_init_array(&self) -> Result<()> {
let init_array = self.elf.sections
.get_by_name(
".init_array"
)
.or_else(|| self.elf.sections.get_by_name(
".ctors"
));
if
let Some(section) = init_array {
let ptr = self.base_addr + section.sh_addr as usize;
let count = section.sh_size as usize / mem::size_of::<usize>();
unsafe {
let funcs = slice::from_raw_parts(
ptr as *
const
usize,
count
);
for
&func_ptr in funcs {
if
func_ptr != 0 {
let func:
extern
"C"
fn() = mem::transmute(func_ptr);
func();
}
}
}
}
Ok(())
}
fn execute_init_array(&self) -> Result<()> {
let init_array = self.elf.sections
.get_by_name(
".init_array"
)
.or_else(|| self.elf.sections.get_by_name(
".ctors"
));
if
let Some(section) = init_array {
let ptr = self.base_addr + section.sh_addr as usize;
let count = section.sh_size as usize / mem::size_of::<usize>();
unsafe {
let funcs = slice::from_raw_parts(
ptr as *
const
usize,
count
);
for
&func_ptr in funcs {
if
func_ptr != 0 {
let func:
extern
"C"
fn() = mem::transmute(func_ptr);
func();
}
}
}
}
Ok(())
}
static
mut ATEXIT_FUNCS: Vec<
extern
"C"
fn()> = Vec::
new
();
#[no_mangle]
extern
"C"
fn __cxa_atexit(
func:
extern
"C"
fn(*mut c_void),
arg: *mut c_void,
_dso: *mut c_void
) -> i32 {
unsafe {
ATEXIT_FUNCS.push(|| func(arg));
}
0
}
fn run_dtors() {
unsafe {
for
func in ATEXIT_FUNCS.drain(..).rev() {
func();
}
}
}
static
mut ATEXIT_FUNCS: Vec<
extern
"C"
fn()> = Vec::
new
();
#[no_mangle]
extern
"C"
fn __cxa_atexit(
func:
extern
"C"
fn(*mut c_void),
arg: *mut c_void,
_dso: *mut c_void
) -> i32 {
unsafe {
ATEXIT_FUNCS.push(|| func(arg));
}
0
}
fn run_dtors() {
unsafe {
for
func in ATEXIT_FUNCS.drain(..).rev() {
func();
}
}
}
struct
GlobalOffsetTable {
entries: HashMap<usize, usize>,
base: usize,
}
impl GlobalOffsetTable {
fn
new
(base: usize) -> Self {
Self { entries: HashMap::
new
(), base }
}
fn resolve(&mut self, index: usize, loader: &DynamicLoader) -> Result<usize> {
let sym = loader.get_symbol_by_index(index)?;
let addr = loader.resolve_symbol(sym)?;
self.entries.insert(index, addr);
Ok(addr)
}
fn apply_relocations(&self) {
unsafe {
for
(&index, &addr) in &self.entries {
let got_entry = self.base + index * mem::size_of::<usize>();
*(got_entry as *mut usize) = addr;
}
}
}
}
struct
GlobalOffsetTable {
entries: HashMap<usize, usize>,
base: usize,
}
impl GlobalOffsetTable {
fn
new
(base: usize) -> Self {
Self { entries: HashMap::
new
(), base }
}
fn resolve(&mut self, index: usize, loader: &DynamicLoader) -> Result<usize> {
let sym = loader.get_symbol_by_index(index)?;
let addr = loader.resolve_symbol(sym)?;
self.entries.insert(index, addr);
Ok(addr)
}
fn apply_relocations(&self) {
unsafe {
for
(&index, &addr) in &self.entries {
let got_entry = self.base + index * mem::size_of::<usize>();
*(got_entry as *mut usize) = addr;
}
}
}
}
fn calculate_image_base(elf: &Elf) -> usize {
let mut min_vaddr = usize::MAX;
for
phdr in &elf.program_headers {
if
phdr.p_type == PT_LOAD {
min_vaddr = min(min_vaddr, phdr.p_vaddr as usize);
}
}
let default_base = 0x400000;
let aslr_offset =
if
elf.header.e_type == ET_DYN {
(os_allocator::get_random() % 0x100000) * 0x1000
}
else
{
0
};
default_base + aslr_offset - min_vaddr
}
fn calculate_image_base(elf: &Elf) -> usize {
let mut min_vaddr = usize::MAX;
for
phdr in &elf.program_headers {
if
phdr.p_type == PT_LOAD {
min_vaddr = min(min_vaddr, phdr.p_vaddr as usize);
}
}
let default_base = 0x400000;
let aslr_offset =
if
elf.header.e_type == ET_DYN {
(os_allocator::get_random() % 0x100000) * 0x1000
}
else
{
0
};
default_base + aslr_offset - min_vaddr
}
use dwarf::{
DebuggingInformationEntry,
Dwarf,
EndianSlice,
LittleEndian
};
fn load_debug_info(loader: &DynamicLoader) -> Result<Dwarf<EndianSlice<LittleEndian>>> {
let debug_info = loader.elf.sections
.get_by_name(
".debug_info"
)
.context(
"No debug info"
)?;
let data = EndianSlice::
new
(
&debug_info.data()[..],
LittleEndian
);
Dwarf::load(|section| {
Ok(match section.name() {
".debug_abbrev"
=> loader.get_section_data(
".debug_abbrev"
),
".debug_str"
=> loader.get_section_data(
".debug_str"
),
_ => None,
})
})
}
use dwarf::{
DebuggingInformationEntry,
Dwarf,
EndianSlice,
LittleEndian
};
fn load_debug_info(loader: &DynamicLoader) -> Result<Dwarf<EndianSlice<LittleEndian>>> {
let debug_info = loader.elf.sections
.get_by_name(
".debug_info"
)
.context(
"No debug info"
)?;
let data = EndianSlice::
new
(
&debug_info.data()[..],
LittleEndian
);
Dwarf::load(|section| {
Ok(match section.name() {
".debug_abbrev"
=> loader.get_section_data(
".debug_abbrev"
),
".debug_str"
=> loader.get_section_data(
".debug_str"
),
_ => None,
})
})
}
fn generate_coredump(pid: u32) -> Result<()> {
use nix::sys::ptrace;
use nix::unistd::Pid;
let pid = Pid::from_raw(pid as i32);
let regs = ptrace::getregs(pid)?;
let mut core = File::create(
"core.dump"
)?;
core.write_all(build_elf_header())?;
let maps = procfs::Process::
new
(pid)?.maps()?;
for
map in maps {
let mut buf = vec![0; map.size()];
ptrace::read(pid, map.start as *mut _)
.and_then(|_| Ok(core.write_all(&buf)))?;
}
core.seek(SeekFrom::End(0))?;
core.write_all(&unsafe {
slice::from_raw_parts(
®s as *
const
_ as *
const
u8,
mem::size_of_val(®s)
)
})?;
Ok(())
}
fn generate_coredump(pid: u32) -> Result<()> {
use nix::sys::ptrace;
use nix::unistd::Pid;
let pid = Pid::from_raw(pid as i32);
let regs = ptrace::getregs(pid)?;
let mut core = File::create(
"core.dump"
)?;
core.write_all(build_elf_header())?;
let maps = procfs::Process::
new
(pid)?.maps()?;
for
map in maps {
let mut buf = vec![0; map.size()];
ptrace::read(pid, map.start as *mut _)
.and_then(|_| Ok(core.write_all(&buf)))?;
}
core.seek(SeekFrom::End(0))?;
core.write_all(&unsafe {
slice::from_raw_parts(
®s as *
const
_ as *
const
u8,
mem::size_of_val(®s)
)
})?;
Ok(())
}
fn apply_relative_relocs(&self) -> Result<()> {
let rela_plt = self.elf.sections
.get_by_name(
".rela.plt"
)
.context(
"No relocation section"
)?;
for
reloc in parse_rela_entries(rela_plt.data())? {
if
reloc.r_type != R_X86_64_RELATIVE {
continue
;
}
let target_addr = self.base_addr + reloc.r_addend as usize;
let reloc_addr = self.base_addr + reloc.r_offset as usize;
unsafe {
*(reloc_addr as *mut usize) = target_addr;
}
}
Ok(())
}
fn parse_rela_entries(data: &[u8]) -> Result<Vec<Reloc>> {
data.chunks_exact(24)
.map(|chunk| {
let offset = u64::from_le_bytes(chunk[0..8].try_into()?);
let info = u64::from_le_bytes(chunk[8..16].try_into()?);
let addend = i64::from_le_bytes(chunk[16..24].try_into()?);
Ok(Reloc {
r_offset: offset,
r_type: (info & 0xFFFFFFFF) as u32,
r_addend: addend,
})
})
.collect()
}
fn apply_relative_relocs(&self) -> Result<()> {
let rela_plt = self.elf.sections
.get_by_name(
".rela.plt"
)
.context(
"No relocation section"
)?;
for
reloc in parse_rela_entries(rela_plt.data())? {
if
reloc.r_type != R_X86_64_RELATIVE {
continue
;
}
let target_addr = self.base_addr + reloc.r_addend as usize;
let reloc_addr = self.base_addr + reloc.r_offset as usize;
unsafe {
*(reloc_addr as *mut usize) = target_addr;
}
}
Ok(())
}
fn parse_rela_entries(data: &[u8]) -> Result<Vec<Reloc>> {
data.chunks_exact(24)
.map(|chunk| {
let offset = u64::from_le_bytes(chunk[0..8].try_into()?);
let info = u64::from_le_bytes(chunk[8..16].try_into()?);
let addend = i64::from_le_bytes(chunk[16..24].try_into()?);
Ok(Reloc {
r_offset: offset,
r_type: (info & 0xFFFFFFFF) as u32,
r_addend: addend,
})
[培训]科锐逆向工程师培训第53期2025年7月8日开班!
最后于 2025-5-18 13:56
被Hrlies编辑
,原因: