mirror of
https://github.com/ziglang/zig.git
synced 2024-11-26 23:22:44 +00:00
Merge pull request #15823 from kcbanner/dwarf_unwind
Add DWARF unwinding, and an external debug info loader for ELF
This commit is contained in:
commit
61d5b7c957
@ -413,6 +413,13 @@ pub extern "c" fn timer_delete(timerid: c.timer_t) c_int;
|
||||
pub extern "c" fn timer_settime(timerid: c.timer_t, flags: c_int, new_value: *const c.itimerspec, old_value: *c.itimerspec) c_int;
|
||||
pub extern "c" fn timer_gettime(timerid: c.timer_t, flags: c_int, curr_value: *c.itimerspec) c_int;
|
||||
|
||||
pub usingnamespace if (builtin.os.tag == .linux and builtin.target.isMusl()) struct {
|
||||
// musl does not implement getcontext
|
||||
pub const getcontext = std.os.linux.getcontext;
|
||||
} else struct {
|
||||
pub extern "c" fn getcontext(ucp: *std.os.ucontext_t) c_int;
|
||||
};
|
||||
|
||||
pub const max_align_t = if (builtin.abi == .msvc)
|
||||
f64
|
||||
else if (builtin.target.isDarwin())
|
||||
|
@ -148,12 +148,10 @@ pub const ucontext_t = extern struct {
|
||||
link: ?*ucontext_t,
|
||||
mcsize: u64,
|
||||
mcontext: *mcontext_t,
|
||||
__mcontext_data: mcontext_t,
|
||||
};
|
||||
|
||||
pub const mcontext_t = extern struct {
|
||||
es: arch_bits.exception_state,
|
||||
ss: arch_bits.thread_state,
|
||||
};
|
||||
pub const mcontext_t = arch_bits.mcontext_t;
|
||||
|
||||
extern "c" fn __error() *c_int;
|
||||
pub extern "c" fn NSVersionOfRunTimeLibrary(library_name: [*:0]const u8) u32;
|
||||
|
@ -1,5 +1,12 @@
|
||||
// See C headers in
|
||||
// lib/libc/include/aarch64-macos.12-gnu/mach/arm/_structs.h
|
||||
// lib/libc/include/aarch64-macos.13-none/arm/_mcontext.h
|
||||
|
||||
pub const mcontext_t = extern struct {
|
||||
es: exception_state,
|
||||
ss: thread_state,
|
||||
ns: neon_state,
|
||||
};
|
||||
|
||||
pub const exception_state = extern struct {
|
||||
far: u64, // Virtual Fault Address
|
||||
@ -17,6 +24,12 @@ pub const thread_state = extern struct {
|
||||
__pad: u32,
|
||||
};
|
||||
|
||||
pub const neon_state = extern struct {
|
||||
q: [32]u128,
|
||||
fpsr: u32,
|
||||
fpcr: u32,
|
||||
};
|
||||
|
||||
pub const EXC_TYPES_COUNT = 14;
|
||||
pub const EXC_MASK_MACHINE = 0;
|
||||
|
||||
|
@ -1,5 +1,11 @@
|
||||
const c = @import("../darwin.zig");
|
||||
|
||||
pub const mcontext_t = extern struct {
|
||||
es: exception_state,
|
||||
ss: thread_state,
|
||||
fs: float_state,
|
||||
};
|
||||
|
||||
pub const exception_state = extern struct {
|
||||
trapno: u16,
|
||||
cpu: u16,
|
||||
@ -31,6 +37,29 @@ pub const thread_state = extern struct {
|
||||
gs: u64,
|
||||
};
|
||||
|
||||
const stmm_reg = [16]u8;
|
||||
const xmm_reg = [16]u8;
|
||||
pub const float_state = extern struct {
|
||||
reserved: [2]c_int,
|
||||
fcw: u16,
|
||||
fsw: u16,
|
||||
ftw: u8,
|
||||
rsrv1: u8,
|
||||
fop: u16,
|
||||
ip: u32,
|
||||
cs: u16,
|
||||
rsrv2: u16,
|
||||
dp: u32,
|
||||
ds: u16,
|
||||
rsrv3: u16,
|
||||
mxcsr: u32,
|
||||
mxcsrmask: u32,
|
||||
stmm: [8]stmm_reg,
|
||||
xmm: [16]xmm_reg,
|
||||
rsrv4: [96]u8,
|
||||
reserved1: c_int,
|
||||
};
|
||||
|
||||
pub const THREAD_STATE = 4;
|
||||
pub const THREAD_STATE_COUNT: c.mach_msg_type_number_t = @sizeOf(thread_state) / @sizeOf(c_int);
|
||||
|
||||
|
@ -1214,6 +1214,11 @@ pub const Coff = struct {
|
||||
return Strtab{ .buffer = self.data[offset..][0..size] };
|
||||
}
|
||||
|
||||
pub fn strtabRequired(self: *const Coff) bool {
|
||||
for (self.getSectionHeaders()) |*sect_hdr| if (sect_hdr.getName() == null) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn getSectionHeaders(self: *const Coff) []align(1) const SectionHeader {
|
||||
const coff_header = self.getCoffHeader();
|
||||
const offset = self.coff_header_offset + @sizeOf(CoffHeader) + coff_header.size_of_optional_header;
|
||||
@ -1248,14 +1253,12 @@ pub const Coff = struct {
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn getSectionData(self: *const Coff, comptime name: []const u8) ![]const u8 {
|
||||
const sec = self.getSectionByName(name) orelse return error.MissingCoffSection;
|
||||
pub fn getSectionData(self: *const Coff, sec: *align(1) const SectionHeader) []const u8 {
|
||||
return self.data[sec.pointer_to_raw_data..][0..sec.virtual_size];
|
||||
}
|
||||
|
||||
// Return an owned slice full of the section data
|
||||
pub fn getSectionDataAlloc(self: *const Coff, comptime name: []const u8, allocator: mem.Allocator) ![]u8 {
|
||||
const section_data = try self.getSectionData(name);
|
||||
pub fn getSectionDataAlloc(self: *const Coff, sec: *align(1) const SectionHeader, allocator: mem.Allocator) ![]u8 {
|
||||
const section_data = self.getSectionData(sec);
|
||||
return allocator.dupe(u8, section_data);
|
||||
}
|
||||
};
|
||||
|
1095
lib/std/debug.zig
1095
lib/std/debug.zig
File diff suppressed because it is too large
Load Diff
1578
lib/std/dwarf.zig
1578
lib/std/dwarf.zig
File diff suppressed because it is too large
Load Diff
27
lib/std/dwarf/EH.zig
Normal file
27
lib/std/dwarf/EH.zig
Normal file
@ -0,0 +1,27 @@
|
||||
pub const PE = struct {
|
||||
pub const absptr = 0x00;
|
||||
|
||||
pub const size_mask = 0x7;
|
||||
pub const sign_mask = 0x8;
|
||||
pub const type_mask = size_mask | sign_mask;
|
||||
|
||||
pub const uleb128 = 0x01;
|
||||
pub const udata2 = 0x02;
|
||||
pub const udata4 = 0x03;
|
||||
pub const udata8 = 0x04;
|
||||
pub const sleb128 = 0x09;
|
||||
pub const sdata2 = 0x0A;
|
||||
pub const sdata4 = 0x0B;
|
||||
pub const sdata8 = 0x0C;
|
||||
|
||||
pub const rel_mask = 0x70;
|
||||
pub const pcrel = 0x10;
|
||||
pub const textrel = 0x20;
|
||||
pub const datarel = 0x30;
|
||||
pub const funcrel = 0x40;
|
||||
pub const aligned = 0x50;
|
||||
|
||||
pub const indirect = 0x80;
|
||||
|
||||
pub const omit = 0xff;
|
||||
};
|
387
lib/std/dwarf/abi.zig
Normal file
387
lib/std/dwarf/abi.zig
Normal file
@ -0,0 +1,387 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("../std.zig");
|
||||
const os = std.os;
|
||||
const mem = std.mem;
|
||||
|
||||
pub fn isSupportedArch(arch: std.Target.Cpu.Arch) bool {
|
||||
return switch (arch) {
|
||||
.x86,
|
||||
.x86_64,
|
||||
.arm,
|
||||
.aarch64,
|
||||
=> true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn ipRegNum() u8 {
|
||||
return switch (builtin.cpu.arch) {
|
||||
.x86 => 8,
|
||||
.x86_64 => 16,
|
||||
.arm => 15,
|
||||
.aarch64 => 32,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fpRegNum(reg_context: RegisterContext) u8 {
|
||||
return switch (builtin.cpu.arch) {
|
||||
// GCC on OS X historicaly did the opposite of ELF for these registers (only in .eh_frame), and that is now the convention for MachO
|
||||
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 4 else 5,
|
||||
.x86_64 => 6,
|
||||
.arm => 11,
|
||||
.aarch64 => 29,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn spRegNum(reg_context: RegisterContext) u8 {
|
||||
return switch (builtin.cpu.arch) {
|
||||
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 5 else 4,
|
||||
.x86_64 => 7,
|
||||
.arm => 13,
|
||||
.aarch64 => 31,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
/// Some platforms use pointer authentication - the upper bits of instruction pointers contain a signature.
|
||||
/// This function clears these signature bits to make the pointer usable.
|
||||
pub inline fn stripInstructionPtrAuthCode(ptr: usize) usize {
|
||||
if (builtin.cpu.arch == .aarch64) {
|
||||
// `hint 0x07` maps to `xpaclri` (or `nop` if the hardware doesn't support it)
|
||||
// The save / restore is because `xpaclri` operates on x30 (LR)
|
||||
return asm (
|
||||
\\mov x16, x30
|
||||
\\mov x30, x15
|
||||
\\hint 0x07
|
||||
\\mov x15, x30
|
||||
\\mov x30, x16
|
||||
: [ret] "={x15}" (-> usize),
|
||||
: [ptr] "{x15}" (ptr),
|
||||
: "x16"
|
||||
);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
pub const RegisterContext = struct {
|
||||
eh_frame: bool,
|
||||
is_macho: bool,
|
||||
};
|
||||
|
||||
pub const AbiError = error{
|
||||
InvalidRegister,
|
||||
UnimplementedArch,
|
||||
UnimplementedOs,
|
||||
RegisterContextRequired,
|
||||
ThreadContextNotSupported,
|
||||
};
|
||||
|
||||
fn RegValueReturnType(comptime ContextPtrType: type, comptime T: type) type {
|
||||
const reg_bytes_type = comptime RegBytesReturnType(ContextPtrType);
|
||||
const info = @typeInfo(reg_bytes_type).Pointer;
|
||||
return @Type(.{
|
||||
.Pointer = .{
|
||||
.size = .One,
|
||||
.is_const = info.is_const,
|
||||
.is_volatile = info.is_volatile,
|
||||
.is_allowzero = info.is_allowzero,
|
||||
.alignment = info.alignment,
|
||||
.address_space = info.address_space,
|
||||
.child = T,
|
||||
.sentinel = null,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/// Returns a pointer to a register stored in a ThreadContext, preserving the pointer attributes of the context.
|
||||
pub fn regValueNative(
|
||||
comptime T: type,
|
||||
thread_context_ptr: anytype,
|
||||
reg_number: u8,
|
||||
reg_context: ?RegisterContext,
|
||||
) !RegValueReturnType(@TypeOf(thread_context_ptr), T) {
|
||||
const reg_bytes = try regBytes(thread_context_ptr, reg_number, reg_context);
|
||||
if (@sizeOf(T) != reg_bytes.len) return error.IncompatibleRegisterSize;
|
||||
return mem.bytesAsValue(T, reg_bytes[0..@sizeOf(T)]);
|
||||
}
|
||||
|
||||
fn RegBytesReturnType(comptime ContextPtrType: type) type {
|
||||
const info = @typeInfo(ContextPtrType);
|
||||
if (info != .Pointer or info.Pointer.child != std.debug.ThreadContext) {
|
||||
@compileError("Expected a pointer to std.debug.ThreadContext, got " ++ @typeName(@TypeOf(ContextPtrType)));
|
||||
}
|
||||
|
||||
return if (info.Pointer.is_const) return []const u8 else []u8;
|
||||
}
|
||||
|
||||
/// Returns a slice containing the backing storage for `reg_number`.
|
||||
///
|
||||
/// `reg_context` describes in what context the register number is used, as it can have different
|
||||
/// meanings depending on the DWARF container. It is only required when getting the stack or
|
||||
/// frame pointer register on some architectures.
|
||||
pub fn regBytes(
|
||||
thread_context_ptr: anytype,
|
||||
reg_number: u8,
|
||||
reg_context: ?RegisterContext,
|
||||
) AbiError!RegBytesReturnType(@TypeOf(thread_context_ptr)) {
|
||||
if (builtin.os.tag == .windows) {
|
||||
return switch (builtin.cpu.arch) {
|
||||
.x86 => switch (reg_number) {
|
||||
0 => mem.asBytes(&thread_context_ptr.Eax),
|
||||
1 => mem.asBytes(&thread_context_ptr.Ecx),
|
||||
2 => mem.asBytes(&thread_context_ptr.Edx),
|
||||
3 => mem.asBytes(&thread_context_ptr.Ebx),
|
||||
4 => mem.asBytes(&thread_context_ptr.Esp),
|
||||
5 => mem.asBytes(&thread_context_ptr.Ebp),
|
||||
6 => mem.asBytes(&thread_context_ptr.Esi),
|
||||
7 => mem.asBytes(&thread_context_ptr.Edi),
|
||||
8 => mem.asBytes(&thread_context_ptr.Eip),
|
||||
9 => mem.asBytes(&thread_context_ptr.EFlags),
|
||||
10 => mem.asBytes(&thread_context_ptr.SegCs),
|
||||
11 => mem.asBytes(&thread_context_ptr.SegSs),
|
||||
12 => mem.asBytes(&thread_context_ptr.SegDs),
|
||||
13 => mem.asBytes(&thread_context_ptr.SegEs),
|
||||
14 => mem.asBytes(&thread_context_ptr.SegFs),
|
||||
15 => mem.asBytes(&thread_context_ptr.SegGs),
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
.x86_64 => switch (reg_number) {
|
||||
0 => mem.asBytes(&thread_context_ptr.Rax),
|
||||
1 => mem.asBytes(&thread_context_ptr.Rdx),
|
||||
2 => mem.asBytes(&thread_context_ptr.Rcx),
|
||||
3 => mem.asBytes(&thread_context_ptr.Rbx),
|
||||
4 => mem.asBytes(&thread_context_ptr.Rsi),
|
||||
5 => mem.asBytes(&thread_context_ptr.Rdi),
|
||||
6 => mem.asBytes(&thread_context_ptr.Rbp),
|
||||
7 => mem.asBytes(&thread_context_ptr.Rsp),
|
||||
8 => mem.asBytes(&thread_context_ptr.R8),
|
||||
9 => mem.asBytes(&thread_context_ptr.R9),
|
||||
10 => mem.asBytes(&thread_context_ptr.R10),
|
||||
11 => mem.asBytes(&thread_context_ptr.R11),
|
||||
12 => mem.asBytes(&thread_context_ptr.R12),
|
||||
13 => mem.asBytes(&thread_context_ptr.R13),
|
||||
14 => mem.asBytes(&thread_context_ptr.R14),
|
||||
15 => mem.asBytes(&thread_context_ptr.R15),
|
||||
16 => mem.asBytes(&thread_context_ptr.Rip),
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
.aarch64 => switch (reg_number) {
|
||||
0...30 => mem.asBytes(&thread_context_ptr.DUMMYUNIONNAME.X[reg_number]),
|
||||
31 => mem.asBytes(&thread_context_ptr.Sp),
|
||||
32 => mem.asBytes(&thread_context_ptr.Pc),
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
else => error.UnimplementedArch,
|
||||
};
|
||||
}
|
||||
|
||||
if (!std.debug.have_ucontext) return error.ThreadContextNotSupported;
|
||||
|
||||
const ucontext_ptr = thread_context_ptr;
|
||||
return switch (builtin.cpu.arch) {
|
||||
.x86 => switch (builtin.os.tag) {
|
||||
.linux, .netbsd, .solaris => switch (reg_number) {
|
||||
0 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.EAX]),
|
||||
1 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.ECX]),
|
||||
2 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.EDX]),
|
||||
3 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.EBX]),
|
||||
4...5 => if (reg_context) |r| bytes: {
|
||||
if (reg_number == 4) {
|
||||
break :bytes if (r.eh_frame and r.is_macho)
|
||||
mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.EBP])
|
||||
else
|
||||
mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.ESP]);
|
||||
} else {
|
||||
break :bytes if (r.eh_frame and r.is_macho)
|
||||
mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.ESP])
|
||||
else
|
||||
mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.EBP]);
|
||||
}
|
||||
} else error.RegisterContextRequired,
|
||||
6 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.ESI]),
|
||||
7 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.EDI]),
|
||||
8 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.EIP]),
|
||||
9 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.EFL]),
|
||||
10 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.CS]),
|
||||
11 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.SS]),
|
||||
12 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.DS]),
|
||||
13 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.ES]),
|
||||
14 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.FS]),
|
||||
15 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.GS]),
|
||||
16...23 => error.InvalidRegister, // TODO: Support loading ST0-ST7 from mcontext.fpregs
|
||||
32...39 => error.InvalidRegister, // TODO: Support loading XMM0-XMM7 from mcontext.fpregs
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
else => error.UnimplementedOs,
|
||||
},
|
||||
.x86_64 => switch (builtin.os.tag) {
|
||||
.linux, .netbsd, .solaris => switch (reg_number) {
|
||||
0 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.RAX]),
|
||||
1 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.RDX]),
|
||||
2 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.RCX]),
|
||||
3 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.RBX]),
|
||||
4 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.RSI]),
|
||||
5 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.RDI]),
|
||||
6 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.RBP]),
|
||||
7 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.RSP]),
|
||||
8 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.R8]),
|
||||
9 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.R9]),
|
||||
10 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.R10]),
|
||||
11 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.R11]),
|
||||
12 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.R12]),
|
||||
13 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.R13]),
|
||||
14 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.R14]),
|
||||
15 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.R15]),
|
||||
16 => mem.asBytes(&ucontext_ptr.mcontext.gregs[os.REG.RIP]),
|
||||
17...32 => |i| mem.asBytes(&ucontext_ptr.mcontext.fpregs.xmm[i - 17]),
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
.freebsd => switch (reg_number) {
|
||||
0 => mem.asBytes(&ucontext_ptr.mcontext.rax),
|
||||
1 => mem.asBytes(&ucontext_ptr.mcontext.rdx),
|
||||
2 => mem.asBytes(&ucontext_ptr.mcontext.rcx),
|
||||
3 => mem.asBytes(&ucontext_ptr.mcontext.rbx),
|
||||
4 => mem.asBytes(&ucontext_ptr.mcontext.rsi),
|
||||
5 => mem.asBytes(&ucontext_ptr.mcontext.rdi),
|
||||
6 => mem.asBytes(&ucontext_ptr.mcontext.rbp),
|
||||
7 => mem.asBytes(&ucontext_ptr.mcontext.rsp),
|
||||
8 => mem.asBytes(&ucontext_ptr.mcontext.r8),
|
||||
9 => mem.asBytes(&ucontext_ptr.mcontext.r9),
|
||||
10 => mem.asBytes(&ucontext_ptr.mcontext.r10),
|
||||
11 => mem.asBytes(&ucontext_ptr.mcontext.r11),
|
||||
12 => mem.asBytes(&ucontext_ptr.mcontext.r12),
|
||||
13 => mem.asBytes(&ucontext_ptr.mcontext.r13),
|
||||
14 => mem.asBytes(&ucontext_ptr.mcontext.r14),
|
||||
15 => mem.asBytes(&ucontext_ptr.mcontext.r15),
|
||||
16 => mem.asBytes(&ucontext_ptr.mcontext.rip),
|
||||
// TODO: Extract xmm state from mcontext.fpstate?
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
.openbsd => switch (reg_number) {
|
||||
0 => mem.asBytes(&ucontext_ptr.sc_rax),
|
||||
1 => mem.asBytes(&ucontext_ptr.sc_rdx),
|
||||
2 => mem.asBytes(&ucontext_ptr.sc_rcx),
|
||||
3 => mem.asBytes(&ucontext_ptr.sc_rbx),
|
||||
4 => mem.asBytes(&ucontext_ptr.sc_rsi),
|
||||
5 => mem.asBytes(&ucontext_ptr.sc_rdi),
|
||||
6 => mem.asBytes(&ucontext_ptr.sc_rbp),
|
||||
7 => mem.asBytes(&ucontext_ptr.sc_rsp),
|
||||
8 => mem.asBytes(&ucontext_ptr.sc_r8),
|
||||
9 => mem.asBytes(&ucontext_ptr.sc_r9),
|
||||
10 => mem.asBytes(&ucontext_ptr.sc_r10),
|
||||
11 => mem.asBytes(&ucontext_ptr.sc_r11),
|
||||
12 => mem.asBytes(&ucontext_ptr.sc_r12),
|
||||
13 => mem.asBytes(&ucontext_ptr.sc_r13),
|
||||
14 => mem.asBytes(&ucontext_ptr.sc_r14),
|
||||
15 => mem.asBytes(&ucontext_ptr.sc_r15),
|
||||
16 => mem.asBytes(&ucontext_ptr.sc_rip),
|
||||
// TODO: Extract xmm state from sc_fpstate?
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
.macos => switch (reg_number) {
|
||||
0 => mem.asBytes(&ucontext_ptr.mcontext.ss.rax),
|
||||
1 => mem.asBytes(&ucontext_ptr.mcontext.ss.rdx),
|
||||
2 => mem.asBytes(&ucontext_ptr.mcontext.ss.rcx),
|
||||
3 => mem.asBytes(&ucontext_ptr.mcontext.ss.rbx),
|
||||
4 => mem.asBytes(&ucontext_ptr.mcontext.ss.rsi),
|
||||
5 => mem.asBytes(&ucontext_ptr.mcontext.ss.rdi),
|
||||
6 => mem.asBytes(&ucontext_ptr.mcontext.ss.rbp),
|
||||
7 => mem.asBytes(&ucontext_ptr.mcontext.ss.rsp),
|
||||
8 => mem.asBytes(&ucontext_ptr.mcontext.ss.r8),
|
||||
9 => mem.asBytes(&ucontext_ptr.mcontext.ss.r9),
|
||||
10 => mem.asBytes(&ucontext_ptr.mcontext.ss.r10),
|
||||
11 => mem.asBytes(&ucontext_ptr.mcontext.ss.r11),
|
||||
12 => mem.asBytes(&ucontext_ptr.mcontext.ss.r12),
|
||||
13 => mem.asBytes(&ucontext_ptr.mcontext.ss.r13),
|
||||
14 => mem.asBytes(&ucontext_ptr.mcontext.ss.r14),
|
||||
15 => mem.asBytes(&ucontext_ptr.mcontext.ss.r15),
|
||||
16 => mem.asBytes(&ucontext_ptr.mcontext.ss.rip),
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
else => error.UnimplementedOs,
|
||||
},
|
||||
.arm => switch (builtin.os.tag) {
|
||||
.linux => switch (reg_number) {
|
||||
0 => mem.asBytes(&ucontext_ptr.mcontext.arm_r0),
|
||||
1 => mem.asBytes(&ucontext_ptr.mcontext.arm_r1),
|
||||
2 => mem.asBytes(&ucontext_ptr.mcontext.arm_r2),
|
||||
3 => mem.asBytes(&ucontext_ptr.mcontext.arm_r3),
|
||||
4 => mem.asBytes(&ucontext_ptr.mcontext.arm_r4),
|
||||
5 => mem.asBytes(&ucontext_ptr.mcontext.arm_r5),
|
||||
6 => mem.asBytes(&ucontext_ptr.mcontext.arm_r6),
|
||||
7 => mem.asBytes(&ucontext_ptr.mcontext.arm_r7),
|
||||
8 => mem.asBytes(&ucontext_ptr.mcontext.arm_r8),
|
||||
9 => mem.asBytes(&ucontext_ptr.mcontext.arm_r9),
|
||||
10 => mem.asBytes(&ucontext_ptr.mcontext.arm_r10),
|
||||
11 => mem.asBytes(&ucontext_ptr.mcontext.arm_fp),
|
||||
12 => mem.asBytes(&ucontext_ptr.mcontext.arm_ip),
|
||||
13 => mem.asBytes(&ucontext_ptr.mcontext.arm_sp),
|
||||
14 => mem.asBytes(&ucontext_ptr.mcontext.arm_lr),
|
||||
15 => mem.asBytes(&ucontext_ptr.mcontext.arm_pc),
|
||||
// CPSR is not allocated a register number (See: https://github.com/ARM-software/abi-aa/blob/main/aadwarf32/aadwarf32.rst, Section 4.1)
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
else => error.UnimplementedOs,
|
||||
},
|
||||
.aarch64 => switch (builtin.os.tag) {
|
||||
.macos => switch (reg_number) {
|
||||
0...28 => mem.asBytes(&ucontext_ptr.mcontext.ss.regs[reg_number]),
|
||||
29 => mem.asBytes(&ucontext_ptr.mcontext.ss.fp),
|
||||
30 => mem.asBytes(&ucontext_ptr.mcontext.ss.lr),
|
||||
31 => mem.asBytes(&ucontext_ptr.mcontext.ss.sp),
|
||||
32 => mem.asBytes(&ucontext_ptr.mcontext.ss.pc),
|
||||
|
||||
// TODO: Find storage for this state
|
||||
//34 => mem.asBytes(&ucontext_ptr.ra_sign_state),
|
||||
|
||||
// V0-V31
|
||||
64...95 => mem.asBytes(&ucontext_ptr.mcontext.ns.q[reg_number - 64]),
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
.netbsd => switch (reg_number) {
|
||||
0...34 => mem.asBytes(&ucontext_ptr.mcontext.gregs[reg_number]),
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
.freebsd => switch (reg_number) {
|
||||
0...29 => mem.asBytes(&ucontext_ptr.mcontext.gpregs.x[reg_number]),
|
||||
30 => mem.asBytes(&ucontext_ptr.mcontext.gpregs.lr),
|
||||
31 => mem.asBytes(&ucontext_ptr.mcontext.gpregs.sp),
|
||||
|
||||
// TODO: This seems wrong, but it was in the previous debug.zig code for mapping PC, check this
|
||||
32 => mem.asBytes(&ucontext_ptr.mcontext.gpregs.elr),
|
||||
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
else => switch (reg_number) {
|
||||
0...30 => mem.asBytes(&ucontext_ptr.mcontext.regs[reg_number]),
|
||||
31 => mem.asBytes(&ucontext_ptr.mcontext.sp),
|
||||
32 => mem.asBytes(&ucontext_ptr.mcontext.pc),
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
},
|
||||
else => error.UnimplementedArch,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the ABI-defined default value this register has in the unwinding table
|
||||
/// before running any of the CIE instructions. The DWARF spec defines these as having
|
||||
/// the .undefined rule by default, but allows ABI authors to override that.
|
||||
pub fn getRegDefaultValue(reg_number: u8, context: *std.dwarf.UnwindContext, out: []u8) !void {
|
||||
switch (builtin.cpu.arch) {
|
||||
.aarch64 => {
|
||||
// Callee-saved registers are initialized as if they had the .same_value rule
|
||||
if (reg_number >= 19 and reg_number <= 28) {
|
||||
const src = try regBytes(context.thread_context, reg_number, context.reg_context);
|
||||
if (src.len != out.len) return error.RegisterSizeMismatch;
|
||||
@memcpy(out, src);
|
||||
return;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
@memset(out, undefined);
|
||||
}
|
610
lib/std/dwarf/call_frame.zig
Normal file
610
lib/std/dwarf/call_frame.zig
Normal file
@ -0,0 +1,610 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("../std.zig");
|
||||
const mem = std.mem;
|
||||
const debug = std.debug;
|
||||
const leb = std.leb;
|
||||
const dwarf = std.dwarf;
|
||||
const abi = dwarf.abi;
|
||||
const expressions = dwarf.expressions;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Opcode = enum(u8) {
|
||||
advance_loc = 0x1 << 6,
|
||||
offset = 0x2 << 6,
|
||||
restore = 0x3 << 6,
|
||||
|
||||
nop = 0x00,
|
||||
set_loc = 0x01,
|
||||
advance_loc1 = 0x02,
|
||||
advance_loc2 = 0x03,
|
||||
advance_loc4 = 0x04,
|
||||
offset_extended = 0x05,
|
||||
restore_extended = 0x06,
|
||||
undefined = 0x07,
|
||||
same_value = 0x08,
|
||||
register = 0x09,
|
||||
remember_state = 0x0a,
|
||||
restore_state = 0x0b,
|
||||
def_cfa = 0x0c,
|
||||
def_cfa_register = 0x0d,
|
||||
def_cfa_offset = 0x0e,
|
||||
def_cfa_expression = 0x0f,
|
||||
expression = 0x10,
|
||||
offset_extended_sf = 0x11,
|
||||
def_cfa_sf = 0x12,
|
||||
def_cfa_offset_sf = 0x13,
|
||||
val_offset = 0x14,
|
||||
val_offset_sf = 0x15,
|
||||
val_expression = 0x16,
|
||||
|
||||
// These opcodes encode an operand in the lower 6 bits of the opcode itself
|
||||
pub const lo_inline = @intFromEnum(Opcode.advance_loc);
|
||||
pub const hi_inline = @intFromEnum(Opcode.restore) | 0b111111;
|
||||
|
||||
// These opcodes are trailed by zero or more operands
|
||||
pub const lo_reserved = @intFromEnum(Opcode.nop);
|
||||
pub const hi_reserved = @intFromEnum(Opcode.val_expression);
|
||||
|
||||
// Vendor-specific opcodes
|
||||
pub const lo_user = 0x1c;
|
||||
pub const hi_user = 0x3f;
|
||||
};
|
||||
|
||||
const Operand = enum {
|
||||
opcode_delta,
|
||||
opcode_register,
|
||||
uleb128_register,
|
||||
uleb128_offset,
|
||||
sleb128_offset,
|
||||
address,
|
||||
u8_delta,
|
||||
u16_delta,
|
||||
u32_delta,
|
||||
block,
|
||||
|
||||
fn Storage(comptime self: Operand) type {
|
||||
return switch (self) {
|
||||
.opcode_delta, .opcode_register => u8,
|
||||
.uleb128_register => u8,
|
||||
.uleb128_offset => u64,
|
||||
.sleb128_offset => i64,
|
||||
.address => u64,
|
||||
.u8_delta => u8,
|
||||
.u16_delta => u16,
|
||||
.u32_delta => u32,
|
||||
.block => []const u8,
|
||||
};
|
||||
}
|
||||
|
||||
fn read(
|
||||
comptime self: Operand,
|
||||
stream: *std.io.FixedBufferStream([]const u8),
|
||||
opcode_value: ?u6,
|
||||
addr_size_bytes: u8,
|
||||
endian: std.builtin.Endian,
|
||||
) !Storage(self) {
|
||||
const reader = stream.reader();
|
||||
return switch (self) {
|
||||
.opcode_delta, .opcode_register => opcode_value orelse return error.InvalidOperand,
|
||||
.uleb128_register => try leb.readULEB128(u8, reader),
|
||||
.uleb128_offset => try leb.readULEB128(u64, reader),
|
||||
.sleb128_offset => try leb.readILEB128(i64, reader),
|
||||
.address => switch (addr_size_bytes) {
|
||||
2 => try reader.readInt(u16, endian),
|
||||
4 => try reader.readInt(u32, endian),
|
||||
8 => try reader.readInt(u64, endian),
|
||||
else => return error.InvalidAddrSize,
|
||||
},
|
||||
.u8_delta => try reader.readByte(),
|
||||
.u16_delta => try reader.readInt(u16, endian),
|
||||
.u32_delta => try reader.readInt(u32, endian),
|
||||
.block => {
|
||||
const block_len = try leb.readULEB128(usize, reader);
|
||||
if (stream.pos + block_len > stream.buffer.len) return error.InvalidOperand;
|
||||
|
||||
const block = stream.buffer[stream.pos..][0..block_len];
|
||||
reader.context.pos += block_len;
|
||||
|
||||
return block;
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
fn InstructionType(comptime definition: anytype) type {
|
||||
const definition_type = @typeInfo(@TypeOf(definition));
|
||||
assert(definition_type == .Struct);
|
||||
|
||||
const definition_len = definition_type.Struct.fields.len;
|
||||
comptime var fields: [definition_len]std.builtin.Type.StructField = undefined;
|
||||
inline for (definition_type.Struct.fields, &fields) |definition_field, *operands_field| {
|
||||
const opcode = std.enums.nameCast(Operand, @field(definition, definition_field.name));
|
||||
const storage_type = opcode.Storage();
|
||||
operands_field.* = .{
|
||||
.name = definition_field.name,
|
||||
.type = storage_type,
|
||||
.default_value = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(storage_type),
|
||||
};
|
||||
}
|
||||
|
||||
const InstructionOperands = @Type(.{
|
||||
.Struct = .{
|
||||
.layout = .Auto,
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
},
|
||||
});
|
||||
|
||||
return struct {
|
||||
const Self = @This();
|
||||
operands: InstructionOperands,
|
||||
|
||||
pub fn read(
|
||||
stream: *std.io.FixedBufferStream([]const u8),
|
||||
opcode_value: ?u6,
|
||||
addr_size_bytes: u8,
|
||||
endian: std.builtin.Endian,
|
||||
) !Self {
|
||||
var operands: InstructionOperands = undefined;
|
||||
inline for (definition_type.Struct.fields) |definition_field| {
|
||||
const operand = comptime std.enums.nameCast(Operand, @field(definition, definition_field.name));
|
||||
@field(operands, definition_field.name) = try operand.read(stream, opcode_value, addr_size_bytes, endian);
|
||||
}
|
||||
|
||||
return .{ .operands = operands };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub const Instruction = union(Opcode) {
|
||||
advance_loc: InstructionType(.{ .delta = .opcode_delta }),
|
||||
offset: InstructionType(.{ .register = .opcode_register, .offset = .uleb128_offset }),
|
||||
offset_extended: InstructionType(.{ .register = .uleb128_register, .offset = .uleb128_offset }),
|
||||
restore: InstructionType(.{ .register = .opcode_register }),
|
||||
restore_extended: InstructionType(.{ .register = .uleb128_register }),
|
||||
nop: InstructionType(.{}),
|
||||
set_loc: InstructionType(.{ .address = .address }),
|
||||
advance_loc1: InstructionType(.{ .delta = .u8_delta }),
|
||||
advance_loc2: InstructionType(.{ .delta = .u16_delta }),
|
||||
advance_loc4: InstructionType(.{ .delta = .u32_delta }),
|
||||
undefined: InstructionType(.{ .register = .uleb128_register }),
|
||||
same_value: InstructionType(.{ .register = .uleb128_register }),
|
||||
register: InstructionType(.{ .register = .uleb128_register, .target_register = .uleb128_register }),
|
||||
remember_state: InstructionType(.{}),
|
||||
restore_state: InstructionType(.{}),
|
||||
def_cfa: InstructionType(.{ .register = .uleb128_register, .offset = .uleb128_offset }),
|
||||
def_cfa_register: InstructionType(.{ .register = .uleb128_register }),
|
||||
def_cfa_offset: InstructionType(.{ .offset = .uleb128_offset }),
|
||||
def_cfa_expression: InstructionType(.{ .block = .block }),
|
||||
expression: InstructionType(.{ .register = .uleb128_register, .block = .block }),
|
||||
offset_extended_sf: InstructionType(.{ .register = .uleb128_register, .offset = .sleb128_offset }),
|
||||
def_cfa_sf: InstructionType(.{ .register = .uleb128_register, .offset = .sleb128_offset }),
|
||||
def_cfa_offset_sf: InstructionType(.{ .offset = .sleb128_offset }),
|
||||
val_offset: InstructionType(.{ .register = .uleb128_register, .offset = .uleb128_offset }),
|
||||
val_offset_sf: InstructionType(.{ .register = .uleb128_register, .offset = .sleb128_offset }),
|
||||
val_expression: InstructionType(.{ .register = .uleb128_register, .block = .block }),
|
||||
|
||||
fn readOperands(
|
||||
self: *Instruction,
|
||||
stream: *std.io.FixedBufferStream([]const u8),
|
||||
opcode_value: ?u6,
|
||||
addr_size_bytes: u8,
|
||||
endian: std.builtin.Endian,
|
||||
) !void {
|
||||
switch (self.*) {
|
||||
inline else => |*inst| inst.* = try @TypeOf(inst.*).read(stream, opcode_value, addr_size_bytes, endian),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read(
|
||||
stream: *std.io.FixedBufferStream([]const u8),
|
||||
addr_size_bytes: u8,
|
||||
endian: std.builtin.Endian,
|
||||
) !Instruction {
|
||||
return switch (try stream.reader().readByte()) {
|
||||
inline Opcode.lo_inline...Opcode.hi_inline => |opcode| blk: {
|
||||
const e: Opcode = @enumFromInt(opcode & 0b11000000);
|
||||
var result = @unionInit(Instruction, @tagName(e), undefined);
|
||||
try result.readOperands(stream, @as(u6, @intCast(opcode & 0b111111)), addr_size_bytes, endian);
|
||||
break :blk result;
|
||||
},
|
||||
inline Opcode.lo_reserved...Opcode.hi_reserved => |opcode| blk: {
|
||||
const e: Opcode = @enumFromInt(opcode);
|
||||
var result = @unionInit(Instruction, @tagName(e), undefined);
|
||||
try result.readOperands(stream, null, addr_size_bytes, endian);
|
||||
break :blk result;
|
||||
},
|
||||
Opcode.lo_user...Opcode.hi_user => error.UnimplementedUserOpcode,
|
||||
else => error.InvalidOpcode,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// Since register rules are applied (usually) during a panic,
|
||||
/// checked addition / subtraction is used so that we can return
|
||||
/// an error and fall back to FP-based unwinding.
|
||||
pub fn applyOffset(base: usize, offset: i64) !usize {
|
||||
return if (offset >= 0)
|
||||
try std.math.add(usize, base, @as(usize, @intCast(offset)))
|
||||
else
|
||||
try std.math.sub(usize, base, @as(usize, @intCast(-offset)));
|
||||
}
|
||||
|
||||
/// This is a virtual machine that runs DWARF call frame instructions.
|
||||
pub const VirtualMachine = struct {
|
||||
/// See section 6.4.1 of the DWARF5 specification for details on each
|
||||
const RegisterRule = union(enum) {
|
||||
// The spec says that the default rule for each column is the undefined rule.
|
||||
// However, it also allows ABI / compiler authors to specify alternate defaults, so
|
||||
// there is a distinction made here.
|
||||
default: void,
|
||||
|
||||
undefined: void,
|
||||
same_value: void,
|
||||
|
||||
// offset(N)
|
||||
offset: i64,
|
||||
|
||||
// val_offset(N)
|
||||
val_offset: i64,
|
||||
|
||||
// register(R)
|
||||
register: u8,
|
||||
|
||||
// expression(E)
|
||||
expression: []const u8,
|
||||
|
||||
// val_expression(E)
|
||||
val_expression: []const u8,
|
||||
|
||||
// Augmenter-defined rule
|
||||
architectural: void,
|
||||
};
|
||||
|
||||
/// Each row contains unwinding rules for a set of registers.
|
||||
pub const Row = struct {
|
||||
/// Offset from `FrameDescriptionEntry.pc_begin`
|
||||
offset: u64 = 0,
|
||||
|
||||
/// Special-case column that defines the CFA (Canonical Frame Address) rule.
|
||||
/// The register field of this column defines the register that CFA is derived from.
|
||||
cfa: Column = .{},
|
||||
|
||||
/// The register fields in these columns define the register the rule applies to.
|
||||
columns: ColumnRange = .{},
|
||||
|
||||
/// Indicates that the next write to any column in this row needs to copy
|
||||
/// the backing column storage first, as it may be referenced by previous rows.
|
||||
copy_on_write: bool = false,
|
||||
};
|
||||
|
||||
pub const Column = struct {
|
||||
register: ?u8 = null,
|
||||
rule: RegisterRule = .{ .default = {} },
|
||||
|
||||
/// Resolves the register rule and places the result into `out` (see dwarf.abi.regBytes)
|
||||
pub fn resolveValue(
|
||||
self: Column,
|
||||
context: *dwarf.UnwindContext,
|
||||
expression_context: dwarf.expressions.ExpressionContext,
|
||||
out: []u8,
|
||||
) !void {
|
||||
switch (self.rule) {
|
||||
.default => {
|
||||
const register = self.register orelse return error.InvalidRegister;
|
||||
try abi.getRegDefaultValue(register, context, out);
|
||||
},
|
||||
.undefined => {
|
||||
@memset(out, undefined);
|
||||
},
|
||||
.same_value => {
|
||||
// TODO: This copy could be eliminated if callers always copy the state then call this function to update it
|
||||
const register = self.register orelse return error.InvalidRegister;
|
||||
const src = try abi.regBytes(context.thread_context, register, context.reg_context);
|
||||
if (src.len != out.len) return error.RegisterSizeMismatch;
|
||||
@memcpy(out, src);
|
||||
},
|
||||
.offset => |offset| {
|
||||
if (context.cfa) |cfa| {
|
||||
const addr = try applyOffset(cfa, offset);
|
||||
if (expression_context.isValidMemory) |isValidMemory| if (!isValidMemory(addr)) return error.InvalidAddress;
|
||||
const ptr: *const usize = @ptrFromInt(addr);
|
||||
mem.writeIntSliceNative(usize, out, ptr.*);
|
||||
} else return error.InvalidCFA;
|
||||
},
|
||||
.val_offset => |offset| {
|
||||
if (context.cfa) |cfa| {
|
||||
mem.writeIntSliceNative(usize, out, try applyOffset(cfa, offset));
|
||||
} else return error.InvalidCFA;
|
||||
},
|
||||
.register => |register| {
|
||||
const src = try abi.regBytes(context.thread_context, register, context.reg_context);
|
||||
if (src.len != out.len) return error.RegisterSizeMismatch;
|
||||
@memcpy(out, try abi.regBytes(context.thread_context, register, context.reg_context));
|
||||
},
|
||||
.expression => |expression| {
|
||||
context.stack_machine.reset();
|
||||
const value = try context.stack_machine.run(expression, context.allocator, expression_context, context.cfa.?);
|
||||
const addr = if (value) |v| blk: {
|
||||
if (v != .generic) return error.InvalidExpressionValue;
|
||||
break :blk v.generic;
|
||||
} else return error.NoExpressionValue;
|
||||
|
||||
if (!context.isValidMemory(addr)) return error.InvalidExpressionAddress;
|
||||
const ptr: *usize = @ptrFromInt(addr);
|
||||
mem.writeIntSliceNative(usize, out, ptr.*);
|
||||
},
|
||||
.val_expression => |expression| {
|
||||
context.stack_machine.reset();
|
||||
const value = try context.stack_machine.run(expression, context.allocator, expression_context, context.cfa.?);
|
||||
if (value) |v| {
|
||||
if (v != .generic) return error.InvalidExpressionValue;
|
||||
mem.writeIntSliceNative(usize, out, v.generic);
|
||||
} else return error.NoExpressionValue;
|
||||
},
|
||||
.architectural => return error.UnimplementedRegisterRule,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const ColumnRange = struct {
|
||||
/// Index into `columns` of the first column in this row.
|
||||
start: usize = undefined,
|
||||
len: u8 = 0,
|
||||
};
|
||||
|
||||
columns: std.ArrayListUnmanaged(Column) = .{},
|
||||
stack: std.ArrayListUnmanaged(ColumnRange) = .{},
|
||||
current_row: Row = .{},
|
||||
|
||||
/// The result of executing the CIE's initial_instructions
|
||||
cie_row: ?Row = null,
|
||||
|
||||
pub fn deinit(self: *VirtualMachine, allocator: std.mem.Allocator) void {
|
||||
self.stack.deinit(allocator);
|
||||
self.columns.deinit(allocator);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn reset(self: *VirtualMachine) void {
|
||||
self.stack.clearRetainingCapacity();
|
||||
self.columns.clearRetainingCapacity();
|
||||
self.current_row = .{};
|
||||
self.cie_row = null;
|
||||
}
|
||||
|
||||
/// Return a slice backed by the row's non-CFA columns
|
||||
pub fn rowColumns(self: VirtualMachine, row: Row) []Column {
|
||||
return self.columns.items[row.columns.start..][0..row.columns.len];
|
||||
}
|
||||
|
||||
/// Either retrieves or adds a column for `register` (non-CFA) in the current row.
|
||||
fn getOrAddColumn(self: *VirtualMachine, allocator: std.mem.Allocator, register: u8) !*Column {
|
||||
for (self.rowColumns(self.current_row)) |*c| {
|
||||
if (c.register == register) return c;
|
||||
}
|
||||
|
||||
if (self.current_row.columns.len == 0) {
|
||||
self.current_row.columns.start = self.columns.items.len;
|
||||
}
|
||||
self.current_row.columns.len += 1;
|
||||
|
||||
const column = try self.columns.addOne(allocator);
|
||||
column.* = .{
|
||||
.register = register,
|
||||
};
|
||||
|
||||
return column;
|
||||
}
|
||||
|
||||
/// Runs the CIE instructions, then the FDE instructions. Execution halts
|
||||
/// once the row that corresponds to `pc` is known, and the row is returned.
|
||||
pub fn runTo(
|
||||
self: *VirtualMachine,
|
||||
allocator: std.mem.Allocator,
|
||||
pc: u64,
|
||||
cie: dwarf.CommonInformationEntry,
|
||||
fde: dwarf.FrameDescriptionEntry,
|
||||
addr_size_bytes: u8,
|
||||
endian: std.builtin.Endian,
|
||||
) !Row {
|
||||
assert(self.cie_row == null);
|
||||
if (pc < fde.pc_begin or pc >= fde.pc_begin + fde.pc_range) return error.AddressOutOfRange;
|
||||
|
||||
var prev_row: Row = self.current_row;
|
||||
|
||||
var cie_stream = std.io.fixedBufferStream(cie.initial_instructions);
|
||||
var fde_stream = std.io.fixedBufferStream(fde.instructions);
|
||||
var streams = [_]*std.io.FixedBufferStream([]const u8){
|
||||
&cie_stream,
|
||||
&fde_stream,
|
||||
};
|
||||
|
||||
for (&streams, 0..) |stream, i| {
|
||||
while (stream.pos < stream.buffer.len) {
|
||||
const instruction = try dwarf.call_frame.Instruction.read(stream, addr_size_bytes, endian);
|
||||
prev_row = try self.step(allocator, cie, i == 0, instruction);
|
||||
if (pc < fde.pc_begin + self.current_row.offset) return prev_row;
|
||||
}
|
||||
}
|
||||
|
||||
return self.current_row;
|
||||
}
|
||||
|
||||
pub fn runToNative(
|
||||
self: *VirtualMachine,
|
||||
allocator: std.mem.Allocator,
|
||||
pc: u64,
|
||||
cie: dwarf.CommonInformationEntry,
|
||||
fde: dwarf.FrameDescriptionEntry,
|
||||
) !Row {
|
||||
return self.runTo(allocator, pc, cie, fde, @sizeOf(usize), builtin.target.cpu.arch.endian());
|
||||
}
|
||||
|
||||
fn resolveCopyOnWrite(self: *VirtualMachine, allocator: std.mem.Allocator) !void {
|
||||
if (!self.current_row.copy_on_write) return;
|
||||
|
||||
const new_start = self.columns.items.len;
|
||||
if (self.current_row.columns.len > 0) {
|
||||
try self.columns.ensureUnusedCapacity(allocator, self.current_row.columns.len);
|
||||
self.columns.appendSliceAssumeCapacity(self.rowColumns(self.current_row));
|
||||
self.current_row.columns.start = new_start;
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes a single instruction.
|
||||
/// If this instruction is from the CIE, `is_initial` should be set.
|
||||
/// Returns the value of `current_row` before executing this instruction.
|
||||
pub fn step(
|
||||
self: *VirtualMachine,
|
||||
allocator: std.mem.Allocator,
|
||||
cie: dwarf.CommonInformationEntry,
|
||||
is_initial: bool,
|
||||
instruction: Instruction,
|
||||
) !Row {
|
||||
// CIE instructions must be run before FDE instructions
|
||||
assert(!is_initial or self.cie_row == null);
|
||||
if (!is_initial and self.cie_row == null) {
|
||||
self.cie_row = self.current_row;
|
||||
self.current_row.copy_on_write = true;
|
||||
}
|
||||
|
||||
const prev_row = self.current_row;
|
||||
switch (instruction) {
|
||||
.set_loc => |i| {
|
||||
if (i.operands.address <= self.current_row.offset) return error.InvalidOperation;
|
||||
// TODO: Check cie.segment_selector_size != 0 for DWARFV4
|
||||
self.current_row.offset = i.operands.address;
|
||||
},
|
||||
inline .advance_loc,
|
||||
.advance_loc1,
|
||||
.advance_loc2,
|
||||
.advance_loc4,
|
||||
=> |i| {
|
||||
self.current_row.offset += i.operands.delta * cie.code_alignment_factor;
|
||||
self.current_row.copy_on_write = true;
|
||||
},
|
||||
inline .offset,
|
||||
.offset_extended,
|
||||
.offset_extended_sf,
|
||||
=> |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
const column = try self.getOrAddColumn(allocator, i.operands.register);
|
||||
column.rule = .{ .offset = @as(i64, @intCast(i.operands.offset)) * cie.data_alignment_factor };
|
||||
},
|
||||
inline .restore,
|
||||
.restore_extended,
|
||||
=> |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
if (self.cie_row) |cie_row| {
|
||||
const column = try self.getOrAddColumn(allocator, i.operands.register);
|
||||
column.rule = for (self.rowColumns(cie_row)) |cie_column| {
|
||||
if (cie_column.register == i.operands.register) break cie_column.rule;
|
||||
} else .{ .default = {} };
|
||||
} else return error.InvalidOperation;
|
||||
},
|
||||
.nop => {},
|
||||
.undefined => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
const column = try self.getOrAddColumn(allocator, i.operands.register);
|
||||
column.rule = .{ .undefined = {} };
|
||||
},
|
||||
.same_value => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
const column = try self.getOrAddColumn(allocator, i.operands.register);
|
||||
column.rule = .{ .same_value = {} };
|
||||
},
|
||||
.register => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
const column = try self.getOrAddColumn(allocator, i.operands.register);
|
||||
column.rule = .{ .register = i.operands.target_register };
|
||||
},
|
||||
.remember_state => {
|
||||
try self.stack.append(allocator, self.current_row.columns);
|
||||
self.current_row.copy_on_write = true;
|
||||
},
|
||||
.restore_state => {
|
||||
const restored_columns = self.stack.popOrNull() orelse return error.InvalidOperation;
|
||||
self.columns.shrinkRetainingCapacity(self.columns.items.len - self.current_row.columns.len);
|
||||
try self.columns.ensureUnusedCapacity(allocator, restored_columns.len);
|
||||
|
||||
self.current_row.columns.start = self.columns.items.len;
|
||||
self.current_row.columns.len = restored_columns.len;
|
||||
self.columns.appendSliceAssumeCapacity(self.columns.items[restored_columns.start..][0..restored_columns.len]);
|
||||
},
|
||||
.def_cfa => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
self.current_row.cfa = .{
|
||||
.register = i.operands.register,
|
||||
.rule = .{ .val_offset = @intCast(i.operands.offset) },
|
||||
};
|
||||
},
|
||||
.def_cfa_sf => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
self.current_row.cfa = .{
|
||||
.register = i.operands.register,
|
||||
.rule = .{ .val_offset = i.operands.offset * cie.data_alignment_factor },
|
||||
};
|
||||
},
|
||||
.def_cfa_register => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
|
||||
self.current_row.cfa.register = i.operands.register;
|
||||
},
|
||||
.def_cfa_offset => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
|
||||
self.current_row.cfa.rule = .{
|
||||
.val_offset = @intCast(i.operands.offset),
|
||||
};
|
||||
},
|
||||
.def_cfa_offset_sf => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
|
||||
self.current_row.cfa.rule = .{
|
||||
.val_offset = i.operands.offset * cie.data_alignment_factor,
|
||||
};
|
||||
},
|
||||
.def_cfa_expression => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
self.current_row.cfa.register = undefined;
|
||||
self.current_row.cfa.rule = .{
|
||||
.expression = i.operands.block,
|
||||
};
|
||||
},
|
||||
.expression => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
const column = try self.getOrAddColumn(allocator, i.operands.register);
|
||||
column.rule = .{
|
||||
.expression = i.operands.block,
|
||||
};
|
||||
},
|
||||
.val_offset => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
const column = try self.getOrAddColumn(allocator, i.operands.register);
|
||||
column.rule = .{
|
||||
.val_offset = @as(i64, @intCast(i.operands.offset)) * cie.data_alignment_factor,
|
||||
};
|
||||
},
|
||||
.val_offset_sf => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
const column = try self.getOrAddColumn(allocator, i.operands.register);
|
||||
column.rule = .{
|
||||
.val_offset = i.operands.offset * cie.data_alignment_factor,
|
||||
};
|
||||
},
|
||||
.val_expression => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
const column = try self.getOrAddColumn(allocator, i.operands.register);
|
||||
column.rule = .{
|
||||
.val_expression = i.operands.block,
|
||||
};
|
||||
},
|
||||
}
|
||||
|
||||
return prev_row;
|
||||
}
|
||||
};
|
1639
lib/std/dwarf/expressions.zig
Normal file
1639
lib/std/dwarf/expressions.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -371,6 +371,9 @@ pub const SHT_LOUSER = 0x80000000;
|
||||
/// End of application-specific
|
||||
pub const SHT_HIUSER = 0xffffffff;
|
||||
|
||||
// Note type for .note.gnu.build_id
|
||||
pub const NT_GNU_BUILD_ID = 3;
|
||||
|
||||
/// Local symbol
|
||||
pub const STB_LOCAL = 0;
|
||||
/// Global symbol
|
||||
@ -1055,6 +1058,11 @@ pub const Shdr = switch (@sizeOf(usize)) {
|
||||
8 => Elf64_Shdr,
|
||||
else => @compileError("expected pointer size of 32 or 64"),
|
||||
};
|
||||
pub const Chdr = switch (@sizeOf(usize)) {
|
||||
4 => Elf32_Chdr,
|
||||
8 => Elf64_Chdr,
|
||||
else => @compileError("expected pointer size of 32 or 64"),
|
||||
};
|
||||
pub const Sym = switch (@sizeOf(usize)) {
|
||||
4 => Elf32_Sym,
|
||||
8 => Elf64_Sym,
|
||||
|
@ -2064,3 +2064,64 @@ pub const UNWIND_ARM64_FRAME_D14_D15_PAIR: u32 = 0x00000800;
|
||||
|
||||
pub const UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK: u32 = 0x00FFF000;
|
||||
pub const UNWIND_ARM64_DWARF_SECTION_OFFSET: u32 = 0x00FFFFFF;
|
||||
|
||||
pub const CompactUnwindEncoding = packed struct(u32) {
|
||||
value: packed union {
|
||||
x86_64: packed union {
|
||||
frame: packed struct(u24) {
|
||||
reg4: u3,
|
||||
reg3: u3,
|
||||
reg2: u3,
|
||||
reg1: u3,
|
||||
reg0: u3,
|
||||
unused: u1 = 0,
|
||||
frame_offset: u8,
|
||||
},
|
||||
frameless: packed struct(u24) {
|
||||
stack_reg_permutation: u10,
|
||||
stack_reg_count: u3,
|
||||
stack: packed union {
|
||||
direct: packed struct(u11) {
|
||||
_: u3,
|
||||
stack_size: u8,
|
||||
},
|
||||
indirect: packed struct(u11) {
|
||||
stack_adjust: u3,
|
||||
sub_offset: u8,
|
||||
},
|
||||
},
|
||||
},
|
||||
dwarf: u24,
|
||||
},
|
||||
arm64: packed union {
|
||||
frame: packed struct(u24) {
|
||||
x_reg_pairs: packed struct(u5) {
|
||||
x19_x20: u1,
|
||||
x21_x22: u1,
|
||||
x23_x24: u1,
|
||||
x25_x26: u1,
|
||||
x27_x28: u1,
|
||||
},
|
||||
d_reg_pairs: packed struct(u4) {
|
||||
d8_d9: u1,
|
||||
d10_d11: u1,
|
||||
d12_d13: u1,
|
||||
d14_d15: u1,
|
||||
},
|
||||
_: u15,
|
||||
},
|
||||
frameless: packed struct(u24) {
|
||||
_: u12 = 0,
|
||||
stack_size: u12,
|
||||
},
|
||||
dwarf: u24,
|
||||
},
|
||||
},
|
||||
mode: packed union {
|
||||
x86_64: UNWIND_X86_64_MODE,
|
||||
arm64: UNWIND_ARM64_MODE,
|
||||
},
|
||||
personality_index: u2,
|
||||
has_lsda: u1,
|
||||
start: u1,
|
||||
};
|
||||
|
@ -86,6 +86,7 @@ pub const timeval = arch_bits.timeval;
|
||||
pub const timezone = arch_bits.timezone;
|
||||
pub const ucontext_t = arch_bits.ucontext_t;
|
||||
pub const user_desc = arch_bits.user_desc;
|
||||
pub const getcontext = arch_bits.getcontext;
|
||||
|
||||
pub const tls = @import("linux/tls.zig");
|
||||
pub const pie = @import("linux/start_pie.zig");
|
||||
|
@ -389,3 +389,86 @@ pub const SC = struct {
|
||||
pub const recvmmsg = 19;
|
||||
pub const sendmmsg = 20;
|
||||
};
|
||||
|
||||
fn gpRegisterOffset(comptime reg_index: comptime_int) usize {
|
||||
return @offsetOf(ucontext_t, "mcontext") + @offsetOf(mcontext_t, "gregs") + @sizeOf(usize) * reg_index;
|
||||
}
|
||||
|
||||
noinline fn getContextReturnAddress() usize {
|
||||
return @returnAddress();
|
||||
}
|
||||
|
||||
pub fn getContextInternal() callconv(.Naked) void {
|
||||
asm volatile (
|
||||
\\ movl $0, (%[flags_offset])(%%edx)
|
||||
\\ movl $0, (%[link_offset])(%%edx)
|
||||
\\ movl %%edi, (%[edi_offset])(%%edx)
|
||||
\\ movl %%esi, (%[esi_offset])(%%edx)
|
||||
\\ movl %%ebp, (%[ebp_offset])(%%edx)
|
||||
\\ movl %%ebx, (%[ebx_offset])(%%edx)
|
||||
\\ movl %%edx, (%[edx_offset])(%%edx)
|
||||
\\ movl %%ecx, (%[ecx_offset])(%%edx)
|
||||
\\ movl %%eax, (%[eax_offset])(%%edx)
|
||||
\\ movl (%%esp), %%ecx
|
||||
\\ movl %%ecx, (%[eip_offset])(%%edx)
|
||||
\\ leal 4(%%esp), %%ecx
|
||||
\\ movl %%ecx, (%[esp_offset])(%%edx)
|
||||
\\ xorl %%ecx, %%ecx
|
||||
\\ movw %%fs, %%cx
|
||||
\\ movl %%ecx, (%[fs_offset])(%%edx)
|
||||
\\ leal (%[regspace_offset])(%%edx), %%ecx
|
||||
\\ movl %%ecx, (%[fpregs_offset])(%%edx)
|
||||
\\ fnstenv (%%ecx)
|
||||
\\ fldenv (%%ecx)
|
||||
\\ pushl %%ebx
|
||||
\\ pushl %%esi
|
||||
\\ xorl %%ebx, %%ebx
|
||||
\\ movl %[sigaltstack], %%eax
|
||||
\\ leal (%[stack_offset])(%%edx), %%ecx
|
||||
\\ int $0x80
|
||||
\\ cmpl $0, %%eax
|
||||
\\ jne return
|
||||
\\ movl %[sigprocmask], %%eax
|
||||
\\ xorl %%ecx, %%ecx
|
||||
\\ leal (%[sigmask_offset])(%%edx), %%edx
|
||||
\\ movl %[sigset_size], %%esi
|
||||
\\ int $0x80
|
||||
\\ return:
|
||||
\\ popl %%esi
|
||||
\\ popl %%ebx
|
||||
:
|
||||
: [flags_offset] "p" (@offsetOf(ucontext_t, "flags")),
|
||||
[link_offset] "p" (@offsetOf(ucontext_t, "link")),
|
||||
[edi_offset] "p" (comptime gpRegisterOffset(REG.EDI)),
|
||||
[esi_offset] "p" (comptime gpRegisterOffset(REG.ESI)),
|
||||
[ebp_offset] "p" (comptime gpRegisterOffset(REG.EBP)),
|
||||
[esp_offset] "p" (comptime gpRegisterOffset(REG.ESP)),
|
||||
[ebx_offset] "p" (comptime gpRegisterOffset(REG.EBX)),
|
||||
[edx_offset] "p" (comptime gpRegisterOffset(REG.EDX)),
|
||||
[ecx_offset] "p" (comptime gpRegisterOffset(REG.ECX)),
|
||||
[eax_offset] "p" (comptime gpRegisterOffset(REG.EAX)),
|
||||
[eip_offset] "p" (comptime gpRegisterOffset(REG.EIP)),
|
||||
[fs_offset] "p" (comptime gpRegisterOffset(REG.FS)),
|
||||
[fpregs_offset] "p" (@offsetOf(ucontext_t, "mcontext") + @offsetOf(mcontext_t, "fpregs")),
|
||||
[regspace_offset] "p" (@offsetOf(ucontext_t, "regspace")),
|
||||
[sigaltstack] "i" (@intFromEnum(linux.SYS.sigaltstack)),
|
||||
[stack_offset] "p" (@offsetOf(ucontext_t, "stack")),
|
||||
[sigprocmask] "i" (@intFromEnum(linux.SYS.rt_sigprocmask)),
|
||||
[sigmask_offset] "p" (@offsetOf(ucontext_t, "sigmask")),
|
||||
[sigset_size] "i" (linux.NSIG / 8),
|
||||
: "memory", "eax", "ecx", "edx"
|
||||
);
|
||||
}
|
||||
|
||||
pub inline fn getcontext(context: *ucontext_t) usize {
|
||||
// This method is used so that getContextInternal can control
|
||||
// its prologue in order to read ESP from a constant offset.
|
||||
// The unused &getContextInternal input is required so the function is included in the binary.
|
||||
return asm volatile (
|
||||
\\ call os.linux.x86.getContextInternal
|
||||
: [ret] "={eax}" (-> usize),
|
||||
: [context] "{edx}" (context),
|
||||
[getContextInternal] "X" (&getContextInternal),
|
||||
: "memory", "ecx"
|
||||
);
|
||||
}
|
||||
|
@ -395,3 +395,97 @@ pub const ucontext_t = extern struct {
|
||||
sigmask: sigset_t,
|
||||
fpregs_mem: [64]usize,
|
||||
};
|
||||
|
||||
fn gpRegisterOffset(comptime reg_index: comptime_int) usize {
|
||||
return @offsetOf(ucontext_t, "mcontext") + @offsetOf(mcontext_t, "gregs") + @sizeOf(usize) * reg_index;
|
||||
}
|
||||
|
||||
fn getContextInternal() callconv(.Naked) void {
|
||||
// TODO: Read GS/FS registers?
|
||||
asm volatile (
|
||||
\\ movq $0, (%[flags_offset])(%%rdi)
|
||||
\\ movq $0, (%[link_offset])(%%rdi)
|
||||
\\ movq %%r8, (%[r8_offset])(%%rdi)
|
||||
\\ movq %%r9, (%[r9_offset])(%%rdi)
|
||||
\\ movq %%r10, (%[r10_offset])(%%rdi)
|
||||
\\ movq %%r11, (%[r11_offset])(%%rdi)
|
||||
\\ movq %%r12, (%[r12_offset])(%%rdi)
|
||||
\\ movq %%r13, (%[r13_offset])(%%rdi)
|
||||
\\ movq %%r14, (%[r14_offset])(%%rdi)
|
||||
\\ movq %%r15, (%[r15_offset])(%%rdi)
|
||||
\\ movq %%rdi, (%[rdi_offset])(%%rdi)
|
||||
\\ movq %%rsi, (%[rsi_offset])(%%rdi)
|
||||
\\ movq %%rbp, (%[rbp_offset])(%%rdi)
|
||||
\\ movq %%rbx, (%[rbx_offset])(%%rdi)
|
||||
\\ movq %%rdx, (%[rdx_offset])(%%rdi)
|
||||
\\ movq %%rax, (%[rax_offset])(%%rdi)
|
||||
\\ movq %%rcx, (%[rcx_offset])(%%rdi)
|
||||
\\ movq (%%rsp), %%rcx
|
||||
\\ movq %%rcx, (%[rip_offset])(%%rdi)
|
||||
\\ leaq 8(%%rsp), %%rcx
|
||||
\\ movq %%rcx, (%[rsp_offset])(%%rdi)
|
||||
\\ pushfq
|
||||
\\ popq (%[efl_offset])(%%rdi)
|
||||
\\ leaq (%[fpmem_offset])(%%rdi), %%rcx
|
||||
\\ movq %%rcx, (%[fpstate_offset])(%%rdi)
|
||||
\\ fnstenv (%%rcx)
|
||||
\\ fldenv (%%rcx)
|
||||
\\ stmxcsr (%[mxcsr_offset])(%%rdi)
|
||||
\\ leaq (%[stack_offset])(%%rdi), %%rsi
|
||||
\\ movq %%rdi, %%r8
|
||||
\\ xorq %%rdi, %%rdi
|
||||
\\ movq %[sigaltstack], %%rax
|
||||
\\ syscall
|
||||
\\ cmpq $0, %%rax
|
||||
\\ jne return
|
||||
\\ movq %[sigprocmask], %%rax
|
||||
\\ xorq %%rsi, %%rsi
|
||||
\\ leaq (%[sigmask_offset])(%%r8), %%rdx
|
||||
\\ movq %[sigset_size], %%r10
|
||||
\\ syscall
|
||||
\\ return:
|
||||
:
|
||||
: [flags_offset] "p" (@offsetOf(ucontext_t, "flags")),
|
||||
[link_offset] "p" (@offsetOf(ucontext_t, "link")),
|
||||
[r8_offset] "p" (comptime gpRegisterOffset(REG.R8)),
|
||||
[r9_offset] "p" (comptime gpRegisterOffset(REG.R9)),
|
||||
[r10_offset] "p" (comptime gpRegisterOffset(REG.R10)),
|
||||
[r11_offset] "p" (comptime gpRegisterOffset(REG.R11)),
|
||||
[r12_offset] "p" (comptime gpRegisterOffset(REG.R12)),
|
||||
[r13_offset] "p" (comptime gpRegisterOffset(REG.R13)),
|
||||
[r14_offset] "p" (comptime gpRegisterOffset(REG.R14)),
|
||||
[r15_offset] "p" (comptime gpRegisterOffset(REG.R15)),
|
||||
[rdi_offset] "p" (comptime gpRegisterOffset(REG.RDI)),
|
||||
[rsi_offset] "p" (comptime gpRegisterOffset(REG.RSI)),
|
||||
[rbp_offset] "p" (comptime gpRegisterOffset(REG.RBP)),
|
||||
[rbx_offset] "p" (comptime gpRegisterOffset(REG.RBX)),
|
||||
[rdx_offset] "p" (comptime gpRegisterOffset(REG.RDX)),
|
||||
[rax_offset] "p" (comptime gpRegisterOffset(REG.RAX)),
|
||||
[rcx_offset] "p" (comptime gpRegisterOffset(REG.RCX)),
|
||||
[rsp_offset] "p" (comptime gpRegisterOffset(REG.RSP)),
|
||||
[rip_offset] "p" (comptime gpRegisterOffset(REG.RIP)),
|
||||
[efl_offset] "p" (comptime gpRegisterOffset(REG.EFL)),
|
||||
[fpstate_offset] "p" (@offsetOf(ucontext_t, "mcontext") + @offsetOf(mcontext_t, "fpregs")),
|
||||
[fpmem_offset] "p" (@offsetOf(ucontext_t, "fpregs_mem")),
|
||||
[mxcsr_offset] "p" (@offsetOf(ucontext_t, "fpregs_mem") + @offsetOf(fpstate, "mxcsr")),
|
||||
[sigaltstack] "i" (@intFromEnum(linux.SYS.sigaltstack)),
|
||||
[stack_offset] "p" (@offsetOf(ucontext_t, "stack")),
|
||||
[sigprocmask] "i" (@intFromEnum(linux.SYS.rt_sigprocmask)),
|
||||
[sigmask_offset] "p" (@offsetOf(ucontext_t, "sigmask")),
|
||||
[sigset_size] "i" (linux.NSIG / 8),
|
||||
: "memory", "rcx", "rdx", "rdi", "rsi", "r8", "r10", "r11"
|
||||
);
|
||||
}
|
||||
|
||||
pub inline fn getcontext(context: *ucontext_t) usize {
|
||||
// This method is used so that getContextInternal can control
|
||||
// its prologue in order to read RSP from a constant offset
|
||||
// The unused &getContextInternal input is required so the function is included in the binary.
|
||||
return asm volatile (
|
||||
\\ call os.linux.x86_64.getContextInternal
|
||||
: [ret] "={rax}" (-> usize),
|
||||
: [context] "{rdi}" (context),
|
||||
[getContextInternal] "X" (&getContextInternal),
|
||||
: "memory", "rcx", "rdx", "rdi", "rsi", "r8", "r10", "r11"
|
||||
);
|
||||
}
|
||||
|
@ -3301,6 +3301,35 @@ pub const REGSAM = ACCESS_MASK;
|
||||
pub const ACCESS_MASK = DWORD;
|
||||
pub const LSTATUS = LONG;
|
||||
|
||||
pub const SECTION_INHERIT = enum(c_int) {
|
||||
ViewShare = 0,
|
||||
ViewUnmap = 1,
|
||||
};
|
||||
|
||||
pub const SECTION_QUERY = 0x0001;
|
||||
pub const SECTION_MAP_WRITE = 0x0002;
|
||||
pub const SECTION_MAP_READ = 0x0004;
|
||||
pub const SECTION_MAP_EXECUTE = 0x0008;
|
||||
pub const SECTION_EXTEND_SIZE = 0x0010;
|
||||
pub const SECTION_ALL_ACCESS =
|
||||
STANDARD_RIGHTS_REQUIRED |
|
||||
SECTION_QUERY |
|
||||
SECTION_MAP_WRITE |
|
||||
SECTION_MAP_READ |
|
||||
SECTION_MAP_EXECUTE |
|
||||
SECTION_EXTEND_SIZE;
|
||||
|
||||
pub const SEC_64K_PAGES = 0x80000;
|
||||
pub const SEC_FILE = 0x800000;
|
||||
pub const SEC_IMAGE = 0x1000000;
|
||||
pub const SEC_PROTECTED_IMAGE = 0x2000000;
|
||||
pub const SEC_RESERVE = 0x4000000;
|
||||
pub const SEC_COMMIT = 0x8000000;
|
||||
pub const SEC_IMAGE_NO_EXECUTE = SEC_IMAGE | SEC_NOCACHE;
|
||||
pub const SEC_NOCACHE = 0x10000000;
|
||||
pub const SEC_WRITECOMBINE = 0x40000000;
|
||||
pub const SEC_LARGE_PAGES = 0x80000000;
|
||||
|
||||
pub const HKEY = *opaque {};
|
||||
|
||||
pub const HKEY_LOCAL_MACHINE: HKEY = @as(HKEY, @ptrFromInt(0x80000002));
|
||||
|
@ -36,6 +36,7 @@ const THREADINFOCLASS = windows.THREADINFOCLASS;
|
||||
const PROCESSINFOCLASS = windows.PROCESSINFOCLASS;
|
||||
const LPVOID = windows.LPVOID;
|
||||
const LPCVOID = windows.LPCVOID;
|
||||
const SECTION_INHERIT = windows.SECTION_INHERIT;
|
||||
|
||||
pub extern "ntdll" fn NtQueryInformationProcess(
|
||||
ProcessHandle: HANDLE,
|
||||
@ -125,6 +126,31 @@ pub extern "ntdll" fn NtCreateFile(
|
||||
EaBuffer: ?*anyopaque,
|
||||
EaLength: ULONG,
|
||||
) callconv(WINAPI) NTSTATUS;
|
||||
pub extern "ntdll" fn NtCreateSection(
|
||||
SectionHandle: *HANDLE,
|
||||
DesiredAccess: ACCESS_MASK,
|
||||
ObjectAttributes: ?*OBJECT_ATTRIBUTES,
|
||||
MaximumSize: ?*LARGE_INTEGER,
|
||||
SectionPageProtection: ULONG,
|
||||
AllocationAttributes: ULONG,
|
||||
FileHandle: ?HANDLE,
|
||||
) callconv(WINAPI) NTSTATUS;
|
||||
pub extern "ntdll" fn NtMapViewOfSection(
|
||||
SectionHandle: HANDLE,
|
||||
ProcessHandle: HANDLE,
|
||||
BaseAddress: *PVOID,
|
||||
ZeroBits: ?*ULONG,
|
||||
CommitSize: SIZE_T,
|
||||
SectionOffset: ?*LARGE_INTEGER,
|
||||
ViewSize: *SIZE_T,
|
||||
InheritDispostion: SECTION_INHERIT,
|
||||
AllocationType: ULONG,
|
||||
Win32Protect: ULONG,
|
||||
) callconv(WINAPI) NTSTATUS;
|
||||
pub extern "ntdll" fn NtUnmapViewOfSection(
|
||||
ProcessHandle: HANDLE,
|
||||
BaseAddress: PVOID,
|
||||
) callconv(WINAPI) NTSTATUS;
|
||||
pub extern "ntdll" fn NtDeviceIoControlFile(
|
||||
FileHandle: HANDLE,
|
||||
Event: ?HANDLE,
|
||||
|
@ -5288,6 +5288,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
|
||||
\\pub const position_independent_executable = {};
|
||||
\\pub const strip_debug_info = {};
|
||||
\\pub const code_model = std.builtin.CodeModel.{};
|
||||
\\pub const omit_frame_pointer = {};
|
||||
\\
|
||||
, .{
|
||||
std.zig.fmtId(@tagName(target.ofmt)),
|
||||
@ -5301,6 +5302,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
|
||||
comp.bin_file.options.pie,
|
||||
comp.bin_file.options.strip,
|
||||
std.zig.fmtId(@tagName(comp.bin_file.options.machine_code_model)),
|
||||
comp.bin_file.options.omit_frame_pointer,
|
||||
});
|
||||
|
||||
if (target.os.tag == .wasi) {
|
||||
|
@ -203,53 +203,11 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
|
||||
};
|
||||
|
||||
const stack_ctx: StackContext = switch (builtin.cpu.arch) {
|
||||
.x86 => ctx: {
|
||||
const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
||||
const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP]));
|
||||
const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP]));
|
||||
break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
|
||||
},
|
||||
.x86_64 => ctx: {
|
||||
const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
||||
const ip = switch (builtin.os.tag) {
|
||||
.linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])),
|
||||
.freebsd => @as(usize, @intCast(ctx.mcontext.rip)),
|
||||
.openbsd => @as(usize, @intCast(ctx.sc_rip)),
|
||||
.macos => @as(usize, @intCast(ctx.mcontext.ss.rip)),
|
||||
else => unreachable,
|
||||
};
|
||||
const bp = switch (builtin.os.tag) {
|
||||
.linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])),
|
||||
.openbsd => @as(usize, @intCast(ctx.sc_rbp)),
|
||||
.freebsd => @as(usize, @intCast(ctx.mcontext.rbp)),
|
||||
.macos => @as(usize, @intCast(ctx.mcontext.ss.rbp)),
|
||||
else => unreachable,
|
||||
};
|
||||
break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
|
||||
},
|
||||
.arm => ctx: {
|
||||
const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
||||
const ip = @as(usize, @intCast(ctx.mcontext.arm_pc));
|
||||
const bp = @as(usize, @intCast(ctx.mcontext.arm_fp));
|
||||
break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
|
||||
},
|
||||
.aarch64 => ctx: {
|
||||
const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
||||
const ip = switch (native_os) {
|
||||
.macos => @as(usize, @intCast(ctx.mcontext.ss.pc)),
|
||||
.netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.PC])),
|
||||
.freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.elr)),
|
||||
else => @as(usize, @intCast(ctx.mcontext.pc)),
|
||||
};
|
||||
// x29 is the ABI-designated frame pointer
|
||||
const bp = switch (native_os) {
|
||||
.macos => @as(usize, @intCast(ctx.mcontext.ss.fp)),
|
||||
.netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.FP])),
|
||||
.freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.x[os.REG.FP])),
|
||||
else => @as(usize, @intCast(ctx.mcontext.regs[29])),
|
||||
};
|
||||
break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
|
||||
},
|
||||
.x86,
|
||||
.x86_64,
|
||||
.arm,
|
||||
.aarch64,
|
||||
=> StackContext{ .exception = @ptrCast(@alignCast(ctx_ptr)) },
|
||||
else => .not_supported,
|
||||
};
|
||||
|
||||
@ -275,10 +233,9 @@ fn handleSegfaultWindows(info: *os.windows.EXCEPTION_POINTERS) callconv(os.windo
|
||||
fn handleSegfaultWindowsExtra(info: *os.windows.EXCEPTION_POINTERS, comptime msg: WindowsSegfaultMessage) noreturn {
|
||||
PanicSwitch.preDispatch();
|
||||
|
||||
const stack_ctx = if (@hasDecl(os.windows, "CONTEXT")) ctx: {
|
||||
const regs = info.ContextRecord.getRegs();
|
||||
break :ctx StackContext{ .exception = .{ .bp = regs.bp, .ip = regs.ip } };
|
||||
} else ctx: {
|
||||
const stack_ctx = if (@hasDecl(os.windows, "CONTEXT"))
|
||||
StackContext{ .exception = info.ContextRecord }
|
||||
else ctx: {
|
||||
const addr = @intFromPtr(info.ExceptionRecord.ExceptionAddress);
|
||||
break :ctx StackContext{ .current = .{ .ret_addr = addr } };
|
||||
};
|
||||
@ -293,7 +250,7 @@ fn handleSegfaultWindowsExtra(info: *os.windows.EXCEPTION_POINTERS, comptime msg
|
||||
},
|
||||
.illegal_instruction => {
|
||||
const ip: ?usize = switch (stack_ctx) {
|
||||
.exception => |ex| ex.ip,
|
||||
.exception => |ex| ex.getRegs().ip,
|
||||
.current => |cur| cur.ret_addr,
|
||||
.not_supported => null,
|
||||
};
|
||||
@ -314,10 +271,7 @@ const StackContext = union(enum) {
|
||||
current: struct {
|
||||
ret_addr: ?usize,
|
||||
},
|
||||
exception: struct {
|
||||
bp: usize,
|
||||
ip: usize,
|
||||
},
|
||||
exception: *const debug.ThreadContext,
|
||||
not_supported: void,
|
||||
|
||||
pub fn dumpStackTrace(ctx: @This()) void {
|
||||
@ -325,8 +279,8 @@ const StackContext = union(enum) {
|
||||
.current => |ct| {
|
||||
debug.dumpCurrentStackTrace(ct.ret_addr);
|
||||
},
|
||||
.exception => |ex| {
|
||||
debug.dumpStackTraceFromBase(ex.bp, ex.ip);
|
||||
.exception => |context| {
|
||||
debug.dumpStackTraceFromBase(context);
|
||||
},
|
||||
.not_supported => {
|
||||
const stderr = io.getStdErr().writer();
|
||||
|
@ -510,7 +510,7 @@ pub fn clangAssemblerSupportsMcpuArg(target: std.Target) bool {
|
||||
}
|
||||
|
||||
pub fn needUnwindTables(target: std.Target) bool {
|
||||
return target.os.tag == .windows;
|
||||
return target.os.tag == .windows or target.isDarwin();
|
||||
}
|
||||
|
||||
pub fn defaultAddressSpace(
|
||||
|
@ -230,6 +230,14 @@ pub const build_cases = [_]BuildCase{
|
||||
.build_root = "test/standalone/zerolength_check",
|
||||
.import = @import("standalone/zerolength_check/build.zig"),
|
||||
},
|
||||
.{
|
||||
.build_root = "test/standalone/stack_iterator",
|
||||
.import = @import("standalone/stack_iterator/build.zig"),
|
||||
},
|
||||
.{
|
||||
.build_root = "test/standalone/coff_dwarf",
|
||||
.import = @import("standalone/coff_dwarf/build.zig"),
|
||||
},
|
||||
};
|
||||
|
||||
const std = @import("std");
|
||||
|
35
test/standalone/coff_dwarf/build.zig
Normal file
35
test/standalone/coff_dwarf/build.zig
Normal file
@ -0,0 +1,35 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
/// This tests the path where DWARF information is embedded in a COFF binary
|
||||
pub fn build(b: *std.Build) void {
|
||||
const test_step = b.step("test", "Test it");
|
||||
b.default_step = test_step;
|
||||
|
||||
const optimize: std.builtin.OptimizeMode = .Debug;
|
||||
const target = b.standardTargetOptions(.{});
|
||||
|
||||
if (builtin.os.tag != .windows) return;
|
||||
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "main",
|
||||
.root_source_file = .{ .path = "main.zig" },
|
||||
.optimize = optimize,
|
||||
.target = target,
|
||||
});
|
||||
|
||||
const lib = b.addSharedLibrary(.{
|
||||
.name = "shared_lib",
|
||||
.optimize = optimize,
|
||||
.target = target,
|
||||
});
|
||||
lib.addCSourceFile("shared_lib.c", &.{"-gdwarf"});
|
||||
lib.linkLibC();
|
||||
exe.linkLibrary(lib);
|
||||
|
||||
const run = b.addRunArtifact(exe);
|
||||
run.expectExitCode(0);
|
||||
run.skip_foreign_checks = true;
|
||||
|
||||
test_step.dependOn(&run.step);
|
||||
}
|
27
test/standalone/coff_dwarf/main.zig
Normal file
27
test/standalone/coff_dwarf/main.zig
Normal file
@ -0,0 +1,27 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
extern fn add(a: u32, b: u32, addr: *usize) u32;
|
||||
|
||||
pub fn main() !void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
defer assert(gpa.deinit() == .ok);
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
var debug_info = try std.debug.openSelfDebugInfo(allocator);
|
||||
defer debug_info.deinit();
|
||||
|
||||
var add_addr: usize = undefined;
|
||||
_ = add(1, 2, &add_addr);
|
||||
|
||||
const module = try debug_info.getModuleForAddress(add_addr);
|
||||
const symbol = try module.getSymbolAtAddress(allocator, add_addr);
|
||||
defer symbol.deinit(allocator);
|
||||
|
||||
try testing.expectEqualStrings("add", symbol.symbol_name);
|
||||
try testing.expect(symbol.line_info != null);
|
||||
try testing.expectEqualStrings("shared_lib.c", std.fs.path.basename(symbol.line_info.?.file_name));
|
||||
try testing.expectEqual(@as(u64, 3), symbol.line_info.?.line);
|
||||
try testing.expectEqual(@as(u64, 0), symbol.line_info.?.column);
|
||||
}
|
6
test/standalone/coff_dwarf/shared_lib.c
Normal file
6
test/standalone/coff_dwarf/shared_lib.c
Normal file
@ -0,0 +1,6 @@
|
||||
#include <stdint.h>
|
||||
|
||||
__declspec(dllexport) uint32_t add(uint32_t a, uint32_t b, uintptr_t* addr) {
|
||||
*addr = (uintptr_t)&add;
|
||||
return a + b;
|
||||
}
|
94
test/standalone/stack_iterator/build.zig
Normal file
94
test/standalone/stack_iterator/build.zig
Normal file
@ -0,0 +1,94 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *std.Build) void {
|
||||
const test_step = b.step("test", "Test it");
|
||||
b.default_step = test_step;
|
||||
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
// Unwinding with a frame pointer
|
||||
//
|
||||
// getcontext version: zig std
|
||||
//
|
||||
// Unwind info type:
|
||||
// - ELF: DWARF .debug_frame
|
||||
// - MachO: __unwind_info encodings:
|
||||
// - x86_64: RBP_FRAME
|
||||
// - aarch64: FRAME, DWARF
|
||||
{
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "unwind_fp",
|
||||
.root_source_file = .{ .path = "unwind.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
if (target.isDarwin()) exe.unwind_tables = true;
|
||||
exe.omit_frame_pointer = false;
|
||||
|
||||
const run_cmd = b.addRunArtifact(exe);
|
||||
test_step.dependOn(&run_cmd.step);
|
||||
}
|
||||
|
||||
// Unwinding without a frame pointer
|
||||
//
|
||||
// getcontext version: zig std
|
||||
//
|
||||
// Unwind info type:
|
||||
// - ELF: DWARF .eh_frame_hdr + .eh_frame
|
||||
// - MachO: __unwind_info encodings:
|
||||
// - x86_64: STACK_IMMD, STACK_IND
|
||||
// - aarch64: FRAMELESS, DWARF
|
||||
{
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "unwind_nofp",
|
||||
.root_source_file = .{ .path = "unwind.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
exe.omit_frame_pointer = true;
|
||||
exe.unwind_tables = true;
|
||||
|
||||
const run_cmd = b.addRunArtifact(exe);
|
||||
test_step.dependOn(&run_cmd.step);
|
||||
}
|
||||
|
||||
// Unwinding through a C shared library without a frame pointer (libc)
|
||||
//
|
||||
// getcontext version: libc
|
||||
//
|
||||
// Unwind info type:
|
||||
// - ELF: DWARF .eh_frame + .debug_frame
|
||||
// - MachO: __unwind_info encodings:
|
||||
// - x86_64: STACK_IMMD, STACK_IND
|
||||
// - aarch64: FRAMELESS, DWARF
|
||||
{
|
||||
const c_shared_lib = b.addSharedLibrary(.{
|
||||
.name = "c_shared_lib",
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
if (target.isWindows()) c_shared_lib.defineCMacro("LIB_API", "__declspec(dllexport)");
|
||||
|
||||
c_shared_lib.strip = false;
|
||||
c_shared_lib.addCSourceFile("shared_lib.c", &.{"-fomit-frame-pointer"});
|
||||
c_shared_lib.linkLibC();
|
||||
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "shared_lib_unwind",
|
||||
.root_source_file = .{ .path = "shared_lib_unwind.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
if (target.isDarwin()) exe.unwind_tables = true;
|
||||
exe.omit_frame_pointer = true;
|
||||
exe.linkLibrary(c_shared_lib);
|
||||
|
||||
const run_cmd = b.addRunArtifact(exe);
|
||||
test_step.dependOn(&run_cmd.step);
|
||||
}
|
||||
}
|
22
test/standalone/stack_iterator/shared_lib.c
Normal file
22
test/standalone/stack_iterator/shared_lib.c
Normal file
@ -0,0 +1,22 @@
|
||||
#include <stdint.h>
|
||||
|
||||
#ifndef LIB_API
|
||||
#define LIB_API
|
||||
#endif
|
||||
|
||||
__attribute__((noinline)) void frame1(
|
||||
void** expected,
|
||||
void** unwound,
|
||||
void (*frame2)(void** expected, void** unwound)) {
|
||||
expected[3] = __builtin_extract_return_addr(__builtin_return_address(0));
|
||||
frame2(expected, unwound);
|
||||
}
|
||||
|
||||
LIB_API void frame0(
|
||||
void** expected,
|
||||
void** unwound,
|
||||
void (*frame2)(void** expected, void** unwound)) {
|
||||
expected[4] = __builtin_extract_return_addr(__builtin_return_address(0));
|
||||
frame1(expected, unwound, frame2);
|
||||
}
|
||||
|
47
test/standalone/stack_iterator/shared_lib_unwind.zig
Normal file
47
test/standalone/stack_iterator/shared_lib_unwind.zig
Normal file
@ -0,0 +1,47 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const debug = std.debug;
|
||||
const testing = std.testing;
|
||||
|
||||
noinline fn frame4(expected: *[5]usize, unwound: *[5]usize) void {
|
||||
expected[0] = @returnAddress();
|
||||
|
||||
var context: debug.ThreadContext = undefined;
|
||||
testing.expect(debug.getContext(&context)) catch @panic("failed to getContext");
|
||||
|
||||
var debug_info = debug.getSelfDebugInfo() catch @panic("failed to openSelfDebugInfo");
|
||||
var it = debug.StackIterator.initWithContext(expected[0], debug_info, &context) catch @panic("failed to initWithContext");
|
||||
defer it.deinit();
|
||||
|
||||
for (unwound) |*addr| {
|
||||
if (it.next()) |return_address| addr.* = return_address;
|
||||
}
|
||||
}
|
||||
|
||||
noinline fn frame3(expected: *[5]usize, unwound: *[5]usize) void {
|
||||
expected[1] = @returnAddress();
|
||||
frame4(expected, unwound);
|
||||
}
|
||||
|
||||
fn frame2(expected: *[5]usize, unwound: *[5]usize) callconv(.C) void {
|
||||
expected[2] = @returnAddress();
|
||||
frame3(expected, unwound);
|
||||
}
|
||||
|
||||
extern fn frame0(
|
||||
expected: *[5]usize,
|
||||
unwound: *[5]usize,
|
||||
frame_2: *const fn (expected: *[5]usize, unwound: *[5]usize) callconv(.C) void,
|
||||
) void;
|
||||
|
||||
pub fn main() !void {
|
||||
// Disabled until the DWARF unwinder bugs on .aarch64 are solved
|
||||
if (builtin.omit_frame_pointer and comptime builtin.target.isDarwin() and builtin.cpu.arch == .aarch64) return;
|
||||
|
||||
if (!std.debug.have_ucontext or !std.debug.have_getcontext) return;
|
||||
|
||||
var expected: [5]usize = undefined;
|
||||
var unwound: [5]usize = undefined;
|
||||
frame0(&expected, &unwound, &frame2);
|
||||
try testing.expectEqual(expected, unwound);
|
||||
}
|
99
test/standalone/stack_iterator/unwind.zig
Normal file
99
test/standalone/stack_iterator/unwind.zig
Normal file
@ -0,0 +1,99 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const debug = std.debug;
|
||||
const testing = std.testing;
|
||||
|
||||
noinline fn frame3(expected: *[4]usize, unwound: *[4]usize) void {
|
||||
expected[0] = @returnAddress();
|
||||
|
||||
var context: debug.ThreadContext = undefined;
|
||||
testing.expect(debug.getContext(&context)) catch @panic("failed to getContext");
|
||||
|
||||
var debug_info = debug.getSelfDebugInfo() catch @panic("failed to openSelfDebugInfo");
|
||||
var it = debug.StackIterator.initWithContext(expected[0], debug_info, &context) catch @panic("failed to initWithContext");
|
||||
defer it.deinit();
|
||||
|
||||
for (unwound) |*addr| {
|
||||
if (it.next()) |return_address| addr.* = return_address;
|
||||
}
|
||||
}
|
||||
|
||||
noinline fn frame2(expected: *[4]usize, unwound: *[4]usize) void {
|
||||
// Excercise different __unwind_info / DWARF CFI encodings by forcing some registers to be restored
|
||||
if (builtin.target.ofmt != .c) {
|
||||
switch (builtin.cpu.arch) {
|
||||
.x86 => {
|
||||
if (builtin.omit_frame_pointer) {
|
||||
asm volatile (
|
||||
\\movl $3, %%ebx
|
||||
\\movl $1, %%ecx
|
||||
\\movl $2, %%edx
|
||||
\\movl $7, %%edi
|
||||
\\movl $6, %%esi
|
||||
\\movl $5, %%ebp
|
||||
::: "ebx", "ecx", "edx", "edi", "esi", "ebp");
|
||||
} else {
|
||||
asm volatile (
|
||||
\\movl $3, %%ebx
|
||||
\\movl $1, %%ecx
|
||||
\\movl $2, %%edx
|
||||
\\movl $7, %%edi
|
||||
\\movl $6, %%esi
|
||||
::: "ebx", "ecx", "edx", "edi", "esi");
|
||||
}
|
||||
},
|
||||
.x86_64 => {
|
||||
if (builtin.omit_frame_pointer) {
|
||||
asm volatile (
|
||||
\\movq $3, %%rbx
|
||||
\\movq $12, %%r12
|
||||
\\movq $13, %%r13
|
||||
\\movq $14, %%r14
|
||||
\\movq $15, %%r15
|
||||
\\movq $6, %%rbp
|
||||
::: "rbx", "r12", "r13", "r14", "r15", "rbp");
|
||||
} else {
|
||||
asm volatile (
|
||||
\\movq $3, %%rbx
|
||||
\\movq $12, %%r12
|
||||
\\movq $13, %%r13
|
||||
\\movq $14, %%r14
|
||||
\\movq $15, %%r15
|
||||
::: "rbx", "r12", "r13", "r14", "r15");
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
expected[1] = @returnAddress();
|
||||
frame3(expected, unwound);
|
||||
}
|
||||
|
||||
noinline fn frame1(expected: *[4]usize, unwound: *[4]usize) void {
|
||||
expected[2] = @returnAddress();
|
||||
|
||||
// Use a stack frame that is too big to encode in __unwind_info's stack-immediate encoding
|
||||
// to exercise the stack-indirect encoding path
|
||||
var pad: [std.math.maxInt(u8) * @sizeOf(usize) + 1]u8 = undefined;
|
||||
_ = pad;
|
||||
|
||||
frame2(expected, unwound);
|
||||
}
|
||||
|
||||
noinline fn frame0(expected: *[4]usize, unwound: *[4]usize) void {
|
||||
expected[3] = @returnAddress();
|
||||
frame1(expected, unwound);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
// Disabled until the DWARF unwinder bugs on .aarch64 are solved
|
||||
if (builtin.omit_frame_pointer and comptime builtin.target.isDarwin() and builtin.cpu.arch == .aarch64) return;
|
||||
|
||||
if (!std.debug.have_ucontext or !std.debug.have_getcontext) return;
|
||||
|
||||
var expected: [4]usize = undefined;
|
||||
var unwound: [4]usize = undefined;
|
||||
frame0(&expected, &unwound);
|
||||
try testing.expectEqual(expected, unwound);
|
||||
}
|
Loading…
Reference in New Issue
Block a user