wasm-linker: Do not merge data segments for obj

When creating a relocatable object file, we do no longer perform the following actions:
- Merge data segments
- Calculate stack size
- Relocations

We now also make the stack pointer symbol `undefined` for this use case as well as add the symbol
as an import.
This commit is contained in:
Luuk de Gram 2022-02-18 21:47:57 +01:00
parent daf741318e
commit 2b0431a8d3
3 changed files with 137 additions and 101 deletions

View File

@ -163,6 +163,26 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
try file.writeAll(&(wasm.magic ++ wasm.version)); try file.writeAll(&(wasm.magic ++ wasm.version));
// As sym_index '0' is reserved, we use it for our stack pointer symbol // As sym_index '0' is reserved, we use it for our stack pointer symbol
const symbol = try wasm_bin.symbols.addOne(allocator);
symbol.* = .{
.name = "__stack_pointer",
.tag = .global,
.flags = 0,
.index = 0,
};
// For object files we will import the stack pointer symbol
if (options.output_mode == .Obj) {
symbol.setUndefined(true);
try wasm_bin.imports.putNoClobber(
allocator,
.{ .file = null, .index = 0 },
.{
.module_name = wasm_bin.host_name,
.name = "__stack_pointer",
.kind = .{ .global = .{ .valtype = .i32, .mutable = true } },
},
);
} else {
const global = try wasm_bin.wasm_globals.addOne(allocator); const global = try wasm_bin.wasm_globals.addOne(allocator);
global.* = .{ global.* = .{
.global_type = .{ .global_type = .{
@ -171,13 +191,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
}, },
.init = .{ .i32_const = 0 }, .init = .{ .i32_const = 0 },
}; };
const symbol = try wasm_bin.symbols.addOne(allocator); }
symbol.* = .{
.name = "__stack_pointer",
.tag = .global,
.flags = 0,
.index = 0,
};
return wasm_bin; return wasm_bin;
} }
@ -651,11 +665,37 @@ fn parseAtom(self: *Wasm, atom: *Atom, kind: Kind) !void {
break :result self.code_section_index.?; break :result self.code_section_index.?;
}, },
.data => result: { .data => result: {
const gop = try self.data_segments.getOrPut(self.base.allocator, ".rodata"); // TODO: Add mutables global decls to .bss section instead
const atom_index = if (gop.found_existing) blk: { const segment_name = try std.mem.concat(self.base.allocator, u8, &.{
self.segments.items[gop.value_ptr.*].size += atom.size; ".rodata.",
break :blk gop.value_ptr.*; std.mem.span(symbol.name),
} else blk: { });
errdefer self.base.allocator.free(segment_name);
const segment_info: types.Segment = .{
.name = segment_name,
.alignment = atom.alignment,
.flags = 0,
};
symbol.tag = .data;
const should_merge = self.base.options.output_mode != .Obj;
const gop = try self.data_segments.getOrPut(self.base.allocator, segment_info.outputName(should_merge));
if (gop.found_existing) {
const index = gop.value_ptr.*;
self.segments.items[index].size += atom.size;
// segment indexes can be off by 1 due to also containing a segment
// for the code section, so we must check if the existing segment
// is larger than that of the code section, and substract the index by 1 in such case.
const info_add = if (self.code_section_index) |idx| blk: {
if (idx < index) break :blk @as(u32, 1);
break :blk 0;
} else @as(u32, 0);
symbol.index = index - info_add;
// segment info already exists, so free its memory
self.base.allocator.free(segment_name);
break :result index;
} else {
const index = @intCast(u32, self.segments.items.len); const index = @intCast(u32, self.segments.items.len);
try self.segments.append(self.base.allocator, .{ try self.segments.append(self.base.allocator, .{
.alignment = atom.alignment, .alignment = atom.alignment,
@ -663,24 +703,12 @@ fn parseAtom(self: *Wasm, atom: *Atom, kind: Kind) !void {
.offset = 0, .offset = 0,
}); });
gop.value_ptr.* = index; gop.value_ptr.* = index;
break :blk index;
};
const info_index = @intCast(u32, self.segment_info.items.len);
// TODO: Add mutables global decls to .bss section instead
const segment_name = try std.mem.concat(self.base.allocator, u8, &.{
".rodata.",
std.mem.span(symbol.name),
});
errdefer self.base.allocator.free(segment_name);
try self.segment_info.append(self.base.allocator, .{
.name = segment_name,
.alignment = atom.alignment,
.flags = 0,
});
symbol.tag = .data;
symbol.index = info_index;
break :result atom_index; const info_index = @intCast(u32, self.segment_info.items.len);
try self.segment_info.append(self.base.allocator, segment_info);
symbol.index = info_index;
break :result index;
}
}, },
}; };
@ -932,7 +960,9 @@ fn setupMemory(self: *Wasm) !void {
break :blk base; break :blk base;
} else 0; } else 0;
if (place_stack_first) { const is_obj = self.base.options.output_mode == .Obj;
if (place_stack_first and !is_obj) {
memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment);
memory_ptr += stack_size; memory_ptr += stack_size;
// We always put the stack pointer global at index 0 // We always put the stack pointer global at index 0
@ -951,7 +981,7 @@ fn setupMemory(self: *Wasm) !void {
offset += segment.size; offset += segment.size;
} }
if (!place_stack_first) { if (!place_stack_first and !is_obj) {
memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment);
memory_ptr += stack_size; memory_ptr += stack_size;
self.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); self.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr));
@ -1011,7 +1041,8 @@ pub fn getMatchingSegment(self: *Wasm, object_index: u16, relocatable_index: u32
switch (relocatable_data.type) { switch (relocatable_data.type) {
.data => { .data => {
const segment_info = object.segment_info[relocatable_data.index]; const segment_info = object.segment_info[relocatable_data.index];
const result = try self.data_segments.getOrPut(self.base.allocator, segment_info.outputName()); const merge_segment = self.base.options.output_mode != .Obj;
const result = try self.data_segments.getOrPut(self.base.allocator, segment_info.outputName(merge_segment));
if (!result.found_existing) { if (!result.found_existing) {
result.value_ptr.* = index; result.value_ptr.* = index;
try self.segments.append(self.base.allocator, .{ try self.segments.append(self.base.allocator, .{
@ -1368,7 +1399,9 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
const writer = file.writer(); const writer = file.writer();
var atom: *Atom = self.atoms.get(code_index).?.getFirst(); var atom: *Atom = self.atoms.get(code_index).?.getFirst();
while (true) { while (true) {
if (!is_obj) {
try atom.resolveRelocs(self); try atom.resolveRelocs(self);
}
try leb.writeULEB128(writer, atom.size); try leb.writeULEB128(writer, atom.size);
try writer.writeAll(atom.code.items); try writer.writeAll(atom.code.items);
atom = atom.next orelse break; atom = atom.next orelse break;
@ -1390,8 +1423,9 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
var it = self.data_segments.iterator(); var it = self.data_segments.iterator();
var segment_count: u32 = 0; var segment_count: u32 = 0;
while (it.next()) |entry| { while (it.next()) |entry| {
// do not output 'bss' section // do not output 'bss' section unless we import memory and therefore
if (std.mem.eql(u8, entry.key_ptr.*, ".bss")) continue; // want to guarantee the data is zero initialized
if (std.mem.eql(u8, entry.key_ptr.*, ".bss") and !import_memory) continue;
segment_count += 1; segment_count += 1;
const atom_index = entry.value_ptr.*; const atom_index = entry.value_ptr.*;
var atom: *Atom = self.atoms.getPtr(atom_index).?.*.getFirst(); var atom: *Atom = self.atoms.getPtr(atom_index).?.*.getFirst();
@ -1406,7 +1440,9 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
// fill in the offset table and the data segments // fill in the offset table and the data segments
var current_offset: u32 = 0; var current_offset: u32 = 0;
while (true) { while (true) {
if (!is_obj) {
try atom.resolveRelocs(self); try atom.resolveRelocs(self);
}
// Pad with zeroes to ensure all segments are aligned // Pad with zeroes to ensure all segments are aligned
if (current_offset != atom.offset) { if (current_offset != atom.offset) {
@ -1443,8 +1479,14 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
); );
} }
// Custom section "name" which contains symbol names if (is_obj) {
if (!is_obj) { try self.emitLinkSection(file, arena);
} else {
try self.emitNameSection(file, arena);
}
}
fn emitNameSection(self: *Wasm, file: fs.File, arena: Allocator) !void {
const Name = struct { const Name = struct {
index: u32, index: u32,
name: []const u8, name: []const u8,
@ -1455,12 +1497,9 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
} }
}; };
var funcs = try std.ArrayList(Name).initCapacity(self.base.allocator, self.functions.items.len + self.imported_functions_count); var funcs = try std.ArrayList(Name).initCapacity(arena, self.functions.items.len + self.imported_functions_count);
defer funcs.deinit(); var globals = try std.ArrayList(Name).initCapacity(arena, self.wasm_globals.items.len);
var globals = try std.ArrayList(Name).initCapacity(self.base.allocator, self.wasm_globals.items.len); var segments = try std.ArrayList(Name).initCapacity(arena, self.data_segments.count());
defer globals.deinit();
var segments = try std.ArrayList(Name).initCapacity(self.base.allocator, self.data_segments.count());
defer segments.deinit();
for (self.resolved_symbols.keys()) |sym_loc| { for (self.resolved_symbols.keys()) |sym_loc| {
const symbol = sym_loc.getSymbol(self).*; const symbol = sym_loc.getSymbol(self).*;
@ -1490,15 +1529,10 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
try writeCustomSectionHeader( try writeCustomSectionHeader(
file, file,
header_offset, header_offset,
@intCast(u32, (try file.getPos()) - header_offset - header_size), @intCast(u32, (try file.getPos()) - header_offset - 6),
); );
} }
if (is_obj) {
try self.emitLinkSection(file, arena);
}
}
fn emitNameSubsection(self: *Wasm, section_id: std.wasm.NameSubsection, names: anytype, writer: anytype) !void { fn emitNameSubsection(self: *Wasm, section_id: std.wasm.NameSubsection, names: anytype, writer: anytype) !void {
// We must emit subsection size, so first write to a temporary list // We must emit subsection size, so first write to a temporary list
var section_list = std.ArrayList(u8).init(self.base.allocator); var section_list = std.ArrayList(u8).init(self.base.allocator);

View File

@ -147,33 +147,34 @@ pub fn resolveRelocs(self: *Atom, wasm_bin: *const Wasm) !void {
fn relocationValue(self: Atom, relocation: types.Relocation, wasm_bin: *const Wasm) !u64 { fn relocationValue(self: Atom, relocation: types.Relocation, wasm_bin: *const Wasm) !u64 {
const target_loc: Wasm.SymbolLoc = .{ .file = self.file, .index = relocation.index }; const target_loc: Wasm.SymbolLoc = .{ .file = self.file, .index = relocation.index };
const symbol = target_loc.getSymbol(wasm_bin).*; const symbol = target_loc.getSymbol(wasm_bin).*;
return switch (relocation.relocation_type) { switch (relocation.relocation_type) {
.R_WASM_FUNCTION_INDEX_LEB => symbol.index, .R_WASM_FUNCTION_INDEX_LEB => return symbol.index,
.R_WASM_TABLE_NUMBER_LEB => symbol.index, .R_WASM_TABLE_NUMBER_LEB => return symbol.index,
.R_WASM_TABLE_INDEX_I32, .R_WASM_TABLE_INDEX_I32,
.R_WASM_TABLE_INDEX_I64, .R_WASM_TABLE_INDEX_I64,
.R_WASM_TABLE_INDEX_SLEB, .R_WASM_TABLE_INDEX_SLEB,
.R_WASM_TABLE_INDEX_SLEB64, .R_WASM_TABLE_INDEX_SLEB64,
=> return wasm_bin.function_table.get(relocation.index) orelse 0, => return wasm_bin.function_table.get(relocation.index) orelse 0,
.R_WASM_TYPE_INDEX_LEB => wasm_bin.functions.items[symbol.index].type_index, .R_WASM_TYPE_INDEX_LEB => return wasm_bin.functions.items[symbol.index].type_index,
.R_WASM_GLOBAL_INDEX_I32, .R_WASM_GLOBAL_INDEX_I32,
.R_WASM_GLOBAL_INDEX_LEB, .R_WASM_GLOBAL_INDEX_LEB,
=> symbol.index, => return symbol.index,
.R_WASM_MEMORY_ADDR_I32, .R_WASM_MEMORY_ADDR_I32,
.R_WASM_MEMORY_ADDR_I64, .R_WASM_MEMORY_ADDR_I64,
.R_WASM_MEMORY_ADDR_LEB, .R_WASM_MEMORY_ADDR_LEB,
.R_WASM_MEMORY_ADDR_LEB64, .R_WASM_MEMORY_ADDR_LEB64,
.R_WASM_MEMORY_ADDR_SLEB, .R_WASM_MEMORY_ADDR_SLEB,
.R_WASM_MEMORY_ADDR_SLEB64, .R_WASM_MEMORY_ADDR_SLEB64,
=> blk: { => {
if (symbol.isUndefined() and (symbol.tag == .data or symbol.isWeak())) { if (symbol.isUndefined() and (symbol.tag == .data or symbol.isWeak())) {
return 0; return 0;
} }
const segment_name = wasm_bin.segment_info.items[symbol.index].outputName(); const merge_segment = wasm_bin.base.options.output_mode != .Obj;
const segment_name = wasm_bin.segment_info.items[symbol.index].outputName(merge_segment);
const atom_index = wasm_bin.data_segments.get(segment_name).?; const atom_index = wasm_bin.data_segments.get(segment_name).?;
var target_atom = wasm_bin.atoms.getPtr(atom_index).?.*.getFirst(); var target_atom = wasm_bin.atoms.getPtr(atom_index).?.*.getFirst();
while (true) { while (true) {
// TODO: Can we simplify this by providing the ability to find and atom // TODO: Can we simplify this by providing the ability to find an atom
// based on a symbol location. // based on a symbol location.
if (target_atom.sym_index == relocation.index) { if (target_atom.sym_index == relocation.index) {
if (target_atom.file) |file| { if (target_atom.file) |file| {
@ -183,11 +184,11 @@ fn relocationValue(self: Atom, relocation: types.Relocation, wasm_bin: *const Wa
target_atom = target_atom.next orelse break; target_atom = target_atom.next orelse break;
} }
const segment = wasm_bin.segments.items[atom_index]; const segment = wasm_bin.segments.items[atom_index];
break :blk target_atom.offset + segment.offset + (relocation.addend orelse 0); return target_atom.offset + segment.offset + (relocation.addend orelse 0);
}, },
.R_WASM_EVENT_INDEX_LEB => symbol.index, .R_WASM_EVENT_INDEX_LEB => return symbol.index,
.R_WASM_SECTION_OFFSET_I32, .R_WASM_SECTION_OFFSET_I32,
.R_WASM_FUNCTION_OFFSET_I32, .R_WASM_FUNCTION_OFFSET_I32,
=> relocation.offset, => return relocation.offset,
}; }
} }

View File

@ -93,7 +93,8 @@ pub const Segment = struct {
/// Bitfield containing flags for a segment /// Bitfield containing flags for a segment
flags: u32, flags: u32,
pub fn outputName(self: Segment) []const u8 { pub fn outputName(self: Segment, merge_segments: bool) []const u8 {
if (!merge_segments) return self.name;
if (std.mem.startsWith(u8, self.name, ".rodata.")) { if (std.mem.startsWith(u8, self.name, ".rodata.")) {
return ".rodata"; return ".rodata";
} else if (std.mem.startsWith(u8, self.name, ".text.")) { } else if (std.mem.startsWith(u8, self.name, ".text.")) {