Merge pull request #10188 from Luukdegram/stage2-wasm-stack

stage2: wasm - implement the stack
This commit is contained in:
Andrew Kelley 2021-11-21 22:09:47 -05:00 committed by GitHub
commit 722c6b9567
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 671 additions and 215 deletions

View File

@ -28,16 +28,15 @@ const WValue = union(enum) {
local: u32,
/// Holds a memoized typed value
constant: TypedValue,
/// Offset position in the list of MIR instructions
mir_offset: usize,
/// Used for variables that create multiple locals on the stack when allocated
/// such as structs and optionals.
multi_value: struct {
/// The index of the first local variable
index: u32,
/// The count of local variables this `WValue` consists of.
/// i.e. an ErrorUnion has a 'count' of 2.
count: u32,
/// Used for types that contains of multiple areas within
/// a memory region in the stack.
/// The local represents the position in the stack,
/// whereas the offset represents the offset from that position.
local_with_offset: struct {
/// Index of the local variable
local: u32,
/// The offset from the local's stack position
offset: u32,
},
};
@ -187,7 +186,14 @@ fn buildOpcode(args: OpcodeBuildArguments) wasm.Opcode {
},
32 => switch (args.valtype1.?) {
.i64 => if (args.signedness.? == .signed) return .i64_load32_s else return .i64_load32_u,
.i32, .f32, .f64 => unreachable,
.i32 => return .i32_load,
.f32 => return .f32_load,
.f64 => unreachable,
},
64 => switch (args.valtype1.?) {
.i64 => return .i64_load,
.f64 => return .f64_load,
else => unreachable,
},
else => unreachable,
} else switch (args.valtype1.?) {
@ -210,7 +216,14 @@ fn buildOpcode(args: OpcodeBuildArguments) wasm.Opcode {
},
32 => switch (args.valtype1.?) {
.i64 => return .i64_store32,
.i32, .f32, .f64 => unreachable,
.i32 => return .i32_store,
.f32 => return .f32_store,
.f64 => unreachable,
},
64 => switch (args.valtype1.?) {
.i64 => return .i64_store,
.f64 => return .f64_store,
else => unreachable,
},
else => unreachable,
}
@ -499,7 +512,10 @@ gpa: *mem.Allocator,
/// Table to save `WValue`'s generated by an `Air.Inst`
values: ValueTable,
/// Mapping from Air.Inst.Index to block ids
blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, u32) = .{},
blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct {
label: u32,
value: WValue,
}) = .{},
/// `bytes` contains the wasm bytecode belonging to the 'code' section.
code: ArrayList(u8),
/// Contains the generated function type bytecode for the current function
@ -509,6 +525,9 @@ func_type_data: ArrayList(u8),
/// NOTE: arguments share the index with locals therefore the first variable
/// will have the index that comes after the last argument's index
local_index: u32 = 0,
/// The index of the current argument.
/// Used to track which argument is being referenced in `airArg`.
arg_index: u32 = 0,
/// If codegen fails, an error messages will be allocated and saved in `err_msg`
err_msg: *Module.ErrorMsg,
/// Current block depth. Used to calculate the relative difference between a break
@ -529,6 +548,16 @@ global_error_set: std.StringHashMapUnmanaged(Module.ErrorInt),
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
/// Contains extra data for MIR
mir_extra: std.ArrayListUnmanaged(u32) = .{},
/// When a function is executing, we store the the current stack pointer's value within this local.
/// This value is then used to restore the stack pointer to the original value at the return of the function.
initial_stack_value: WValue = .none,
/// Arguments of this function declaration
/// This will be set after `resolveCallingConventionValues`
args: []WValue = undefined,
/// This will only be `.none` if the function returns void, or returns an immediate.
/// When it returns a pointer to the stack, the `.local` tag will be active and must be populated
/// before this function returns its execution to the caller.
return_value: WValue = .none,
const InnerError = error{
OutOfMemory,
@ -662,9 +691,11 @@ fn typeToValtype(self: *Self, ty: Type) InnerError!wasm.Valtype {
.Bool,
.Pointer,
.ErrorSet,
.Struct,
.ErrorUnion,
.Optional,
=> wasm.Valtype.i32,
.Struct, .ErrorUnion, .Optional => unreachable, // Multi typed, must be handled individually.
else => |tag| self.fail("TODO - Wasm valtype for type '{s}'", .{tag}),
else => self.fail("TODO - Wasm valtype for type '{}'", .{ty}),
};
}
@ -686,78 +717,21 @@ fn genBlockType(self: *Self, ty: Type) InnerError!u8 {
/// Writes the bytecode depending on the given `WValue` in `val`
fn emitWValue(self: *Self, val: WValue) InnerError!void {
switch (val) {
.multi_value => unreachable, // multi_value can never be written directly, and must be accessed individually
.none, .mir_offset => {}, // no-op
.local => |idx| {
try self.addLabel(.local_get, idx);
},
.none => {}, // no-op
.local_with_offset => |with_off| try self.addLabel(.local_get, with_off.local),
.local => |idx| try self.addLabel(.local_get, idx),
.constant => |tv| try self.emitConstant(tv.val, tv.ty), // Creates a new constant on the stack
}
}
/// Creates one or multiple locals for a given `Type`.
/// Returns a corresponding `Wvalue` that can either be of tag
/// local or multi_value
/// Creates one locals for a given `Type`.
/// Returns a corresponding `Wvalue` with `local` as active tag
fn allocLocal(self: *Self, ty: Type) InnerError!WValue {
const initial_index = self.local_index;
switch (ty.zigTypeTag()) {
.Struct => {
// for each struct field, generate a local
const struct_data: *Module.Struct = ty.castTag(.@"struct").?.data;
const fields_len = @intCast(u32, struct_data.fields.count());
try self.locals.ensureUnusedCapacity(self.gpa, fields_len);
for (struct_data.fields.values()) |*value| {
const val_type = try self.genValtype(value.ty);
self.locals.appendAssumeCapacity(val_type);
self.local_index += 1;
}
return WValue{ .multi_value = .{
.index = initial_index,
.count = fields_len,
} };
},
.ErrorUnion => {
const payload_type = ty.errorUnionPayload();
const val_type = try self.genValtype(payload_type);
// we emit the error value as the first local, and the payload as the following.
// The first local is also used to find the index of the error and payload.
//
// TODO: Add support where the payload is a type that contains multiple locals such as a struct.
try self.locals.ensureUnusedCapacity(self.gpa, 2);
self.locals.appendAssumeCapacity(wasm.valtype(.i32)); // error values are always i32
self.locals.appendAssumeCapacity(val_type);
self.local_index += 2;
return WValue{ .multi_value = .{
.index = initial_index,
.count = 2,
} };
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
const child_type = ty.optionalChild(&opt_buf);
if (ty.isPtrLikeOptional()) {
return self.fail("TODO: wasm optional pointer", .{});
}
try self.locals.ensureUnusedCapacity(self.gpa, 2);
self.locals.appendAssumeCapacity(wasm.valtype(.i32)); // optional 'tag' for null-checking is always i32
self.locals.appendAssumeCapacity(try self.genValtype(child_type));
self.local_index += 2;
return WValue{ .multi_value = .{
.index = initial_index,
.count = 2,
} };
},
else => {
const valtype = try self.genValtype(ty);
try self.locals.append(self.gpa, valtype);
self.local_index += 1;
return WValue{ .local = initial_index };
},
}
const valtype = try self.genValtype(ty);
try self.locals.append(self.gpa, valtype);
self.local_index += 1;
return WValue{ .local = initial_index };
}
fn genFunctype(self: *Self) InnerError!void {
@ -786,17 +760,8 @@ fn genFunctype(self: *Self) InnerError!void {
.Void, .NoReturn => try leb.writeULEB128(writer, @as(u32, 0)),
.Struct => return self.fail("TODO: Implement struct as return type for wasm", .{}),
.Optional => return self.fail("TODO: Implement optionals as return type for wasm", .{}),
.ErrorUnion => {
const val_type = try self.genValtype(return_type.errorUnionPayload());
// write down the amount of return values
try leb.writeULEB128(writer, @as(u32, 2));
try writer.writeByte(wasm.valtype(.i32)); // error code is always an i32 integer.
try writer.writeByte(val_type);
},
else => {
try leb.writeULEB128(writer, @as(u32, 1));
// Can we maybe get the source index of the return type?
const val_type = try self.genValtype(return_type);
try writer.writeByte(val_type);
},
@ -807,6 +772,12 @@ pub fn genFunc(self: *Self) InnerError!Result {
try self.genFunctype();
// TODO: check for and handle death of instructions
var cc_result = try self.resolveCallingConventionValues(self.decl.ty);
defer cc_result.deinit(self.gpa);
self.args = cc_result.args;
self.return_value = cc_result.return_value;
// Generate MIR for function body
try self.genBody(self.air.getMainBody());
// End of function body
@ -853,7 +824,7 @@ pub fn gen(self: *Self, ty: Type, val: Value) InnerError!Result {
if (ty.sentinel()) |sentinel| {
try self.code.appendSlice(payload.data);
switch (try self.gen(ty.elemType(), sentinel)) {
switch (try self.gen(ty.childType(), sentinel)) {
.appended => return Result.appended,
.externally_managed => |data| {
try self.code.appendSlice(data);
@ -887,6 +858,110 @@ pub fn gen(self: *Self, ty: Type, val: Value) InnerError!Result {
}
}
const CallWValues = struct {
args: []WValue,
return_value: WValue,
fn deinit(self: *CallWValues, gpa: *Allocator) void {
gpa.free(self.args);
self.* = undefined;
}
};
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValues {
const cc = fn_ty.fnCallingConvention();
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
defer self.gpa.free(param_types);
fn_ty.fnParamTypes(param_types);
var result: CallWValues = .{
.args = try self.gpa.alloc(WValue, param_types.len),
.return_value = .none,
};
errdefer self.gpa.free(result.args);
switch (cc) {
.Naked => return result,
.Unspecified, .C => {
for (param_types) |ty, ty_index| {
if (!ty.hasCodeGenBits()) {
result.args[ty_index] = .{ .none = {} };
continue;
}
result.args[ty_index] = .{ .local = self.local_index };
self.local_index += 1;
}
const ret_ty = fn_ty.fnReturnType();
switch (ret_ty.zigTypeTag()) {
.ErrorUnion, .Optional => result.return_value = try self.allocLocal(Type.initTag(.i32)),
.Int, .Float, .Bool, .Void, .NoReturn => {},
else => return self.fail("TODO: Implement function return type {}", .{ret_ty}),
}
// Check if we store the result as a pointer to the stack rather than
// by value
if (result.return_value != .none) {
if (self.initial_stack_value == .none) try self.initializeStack();
const offset = std.math.cast(u32, ret_ty.abiSize(self.target)) catch {
return self.fail("Return type '{}' too big for stack frame", .{ret_ty});
};
try self.moveStack(offset, result.return_value.local);
}
},
else => return self.fail("TODO implement function parameters for cc '{}' on wasm", .{cc}),
}
return result;
}
/// Retrieves the stack pointer's value from the global variable and stores
/// it in a local
/// Asserts `initial_stack_value` is `.none`
fn initializeStack(self: *Self) !void {
assert(self.initial_stack_value == .none);
// reserve space for immediate value
// get stack pointer global
// TODO: For now, we hardcode the stack pointer to index '0',
// once the linker is further implemented, we can replace this by inserting
// a relocation and have the linker resolve the correct index to the stack pointer global.
// NOTE: relocations of the type GLOBAL_INDEX_LEB are 5-bytes big
try self.addLabel(.global_get, 0);
// Reserve a local to store the current stack pointer
// We can later use this local to set the stack pointer back to the value
// we have stored here.
self.initial_stack_value = try self.allocLocal(Type.initTag(.i32));
// save the value to the local
try self.addLabel(.local_set, self.initial_stack_value.local);
}
/// Reads the stack pointer from `Context.initial_stack_value` and writes it
/// to the global stack pointer variable
fn restoreStackPointer(self: *Self) !void {
// only restore the pointer if it was initialized
if (self.initial_stack_value == .none) return;
// Get the original stack pointer's value
try self.emitWValue(self.initial_stack_value);
// save its value in the global stack pointer
try self.addLabel(.global_set, 0);
}
/// Moves the stack pointer by given `offset`
/// It does this by retrieving the stack pointer, subtracting `offset` and storing
/// the result back into the stack pointer.
fn moveStack(self: *Self, offset: u32, local: u32) !void {
if (offset == 0) return;
// TODO: Rather than hardcode the stack pointer to position 0,
// have the linker resolve its relocation
try self.addLabel(.global_get, 0);
try self.addImm32(@bitCast(i32, offset));
try self.addTag(.i32_sub);
try self.addLabel(.local_tee, local);
try self.addLabel(.global_set, 0);
}
fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
const air_tags = self.air.instructions.items(.tag);
return switch (air_tags[inst]) {
@ -965,7 +1040,15 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = self.resolveInst(un_op);
try self.emitWValue(operand);
// result must be stored in the stack and we return a pointer
// to the stack instead
if (self.return_value != .none) {
try self.store(self.return_value, operand, self.decl.ty.fnReturnType(), 0);
try self.emitWValue(self.return_value);
} else {
try self.emitWValue(operand);
}
try self.restoreStackPointer();
try self.addTag(.@"return");
return .none;
}
@ -993,12 +1076,33 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addLabel(.call, target.link.wasm.symbol_index);
return .none;
const ret_ty = target.ty.fnReturnType();
switch (ret_ty.zigTypeTag()) {
.Void, .NoReturn => return WValue.none,
else => {
const result_local = try self.allocLocal(ret_ty);
try self.addLabel(.local_set, result_local.local);
return result_local;
},
}
}
fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const elem_type = self.air.typeOfIndex(inst).elemType();
return self.allocLocal(elem_type);
const child_type = self.air.typeOfIndex(inst).childType();
// Initialize the stack
if (self.initial_stack_value == .none) {
try self.initializeStack();
}
const abi_size = child_type.abiSize(self.target);
if (abi_size == 0) return WValue{ .none = {} };
// local, containing the offset to the stack position
const local = try self.allocLocal(Type.initTag(.i32)); // always pointer therefore i32
try self.moveStack(@intCast(u32, abi_size), local.local);
return local;
}
fn airStore(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -1006,56 +1110,149 @@ fn airStore(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const lhs = self.resolveInst(bin_op.lhs);
const rhs = self.resolveInst(bin_op.rhs);
const ty = self.air.typeOf(bin_op.lhs).childType();
switch (lhs) {
.multi_value => |multi_value| switch (rhs) {
// When assigning a value to a multi_value such as a struct,
// we simply assign the local_index to the rhs one.
// This allows us to update struct fields without having to individually
// set each local as each field's index will be calculated off the struct's base index
.multi_value => self.values.put(self.gpa, Air.refToIndex(bin_op.lhs).?, rhs) catch unreachable, // Instruction does not dominate all uses!
.constant, .none => {
// emit all values onto the stack if constant
try self.emitWValue(rhs);
const offset: u32 = switch (lhs) {
.local_with_offset => |with_off| with_off.offset,
else => 0,
};
// for each local, pop the stack value into the local
// As the last element is on top of the stack, we must populate the locals
// in reverse.
var i: u32 = multi_value.count;
while (i > 0) : (i -= 1) {
try self.addLabel(.local_set, multi_value.index + i - 1);
}
},
.local => {
// This can occur when we wrap a single value into a multi-value,
// such as wrapping a non-optional value into an optional.
// This means we must zero the null-tag, and set the payload.
assert(multi_value.count == 2);
// set payload
try self.emitWValue(rhs);
try self.addLabel(.local_set, multi_value.index + 1);
},
else => unreachable,
},
.local => |local| {
try self.emitWValue(rhs);
try self.addLabel(.local_set, local);
},
else => unreachable,
}
try self.store(lhs, rhs, ty, offset);
return .none;
}
fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
switch (ty.zigTypeTag()) {
.ErrorUnion, .Optional => {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = if (ty.zigTypeTag() == .ErrorUnion) ty.errorUnionPayload() else ty.optionalChild(&buf);
const tag_ty = if (ty.zigTypeTag() == .ErrorUnion) ty.errorUnionSet() else Type.initTag(.u8);
const payload_offset = if (ty.zigTypeTag() == .ErrorUnion)
@intCast(u32, tag_ty.abiSize(self.target))
else
@intCast(u32, ty.abiSize(self.target) - payload_ty.abiSize(self.target));
switch (rhs) {
.constant => {
// constant will contain both tag and payload,
// so save those in 2 temporary locals before storing them
// in memory
try self.emitWValue(rhs);
const tag_local = try self.allocLocal(tag_ty);
const payload_local = try self.allocLocal(payload_ty);
try self.addLabel(.local_set, payload_local.local);
try self.addLabel(.local_set, tag_local.local);
try self.store(lhs, tag_local, tag_ty, 0);
return try self.store(lhs, payload_local, payload_ty, payload_offset);
},
.local => {
// Load values from `rhs` stack position and store in `lhs` instead
const tag_local = try self.load(rhs, tag_ty, 0);
const payload_local = try self.load(rhs, payload_ty, payload_offset);
try self.store(lhs, tag_local, tag_ty, 0);
return try self.store(lhs, payload_local, payload_ty, payload_offset);
},
.local_with_offset => |with_offset| {
const tag_local = try self.allocLocal(tag_ty);
try self.addImm32(0);
try self.store(lhs, tag_local, tag_ty, 0);
return try self.store(
lhs,
.{ .local = with_offset.local },
payload_ty,
with_offset.offset,
);
},
else => unreachable,
}
},
.Struct => {
// we are copying a struct with its fields.
// Replace this with a wasm memcpy instruction once we support that feature.
const fields_len = ty.structFieldCount();
var index: usize = 0;
while (index < fields_len) : (index += 1) {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasCodeGenBits()) continue;
const field_offset = std.math.cast(u32, ty.structFieldOffset(index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty});
};
const field_local = try self.load(rhs, field_ty, field_offset);
try self.store(lhs, field_local, field_ty, field_offset);
}
return;
},
else => {},
}
try self.emitWValue(lhs);
try self.emitWValue(rhs);
const valtype = try self.typeToValtype(ty);
const opcode = buildOpcode(.{
.valtype1 = valtype,
.width = @intCast(u8, Type.abiSize(ty, self.target) * 8), // use bitsize instead of byte size
.op = .store,
});
// store rhs value at stack pointer's location in memory
const mem_arg_index = try self.addExtra(Mir.MemArg{
.offset = offset,
.alignment = ty.abiAlignment(self.target),
});
try self.addInst(.{
.tag = Mir.Inst.Tag.fromOpcode(opcode),
.data = .{ .payload = mem_arg_index },
});
}
fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
return self.resolveInst(ty_op.operand);
const operand = self.resolveInst(ty_op.operand);
const ty = self.air.getRefType(ty_op.ty);
return switch (ty.zigTypeTag()) {
.Struct, .ErrorUnion, .Optional => operand, // pass as pointer
else => switch (operand) {
.local_with_offset => |with_offset| try self.load(operand, ty, with_offset.offset),
else => try self.load(operand, ty, 0),
},
};
}
fn load(self: *Self, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
// load local's value from memory by its stack position
try self.emitWValue(operand);
// Build the opcode with the right bitsize
const signedness: std.builtin.Signedness = if (ty.isUnsignedInt()) .unsigned else .signed;
const opcode = buildOpcode(.{
.valtype1 = try self.typeToValtype(ty),
.width = @intCast(u8, Type.abiSize(ty, self.target) * 8), // use bitsize instead of byte size
.op = .load,
.signedness = signedness,
});
const mem_arg_index = try self.addExtra(Mir.MemArg{
.offset = offset,
.alignment = ty.abiAlignment(self.target),
});
try self.addInst(.{
.tag = Mir.Inst.Tag.fromOpcode(opcode),
.data = .{ .payload = mem_arg_index },
});
// store the result in a local
const result = try self.allocLocal(ty);
try self.addLabel(.local_set, result.local);
return result;
}
fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
_ = inst;
// arguments share the index with locals
defer self.local_index += 1;
return WValue{ .local = self.local_index };
defer self.arg_index += 1;
return self.args[self.arg_index];
}
fn airBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
@ -1063,14 +1260,6 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
const lhs = self.resolveInst(bin_op.lhs);
const rhs = self.resolveInst(bin_op.rhs);
// it's possible for both lhs and/or rhs to return an offset as well,
// in which case we return the first offset occurrence we find.
const offset = blk: {
if (lhs == .mir_offset) break :blk lhs.mir_offset;
if (rhs == .mir_offset) break :blk rhs.mir_offset;
break :blk self.mir_instructions.len;
};
try self.emitWValue(lhs);
try self.emitWValue(rhs);
@ -1081,7 +1270,11 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
.signedness = if (bin_ty.isSignedInt()) .signed else .unsigned,
});
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return WValue{ .mir_offset = offset };
// save the result in a temporary
const bin_local = try self.allocLocal(bin_ty);
try self.addLabel(.local_set, bin_local.local);
return bin_local;
}
fn airWrapBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
@ -1089,14 +1282,6 @@ fn airWrapBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
const lhs = self.resolveInst(bin_op.lhs);
const rhs = self.resolveInst(bin_op.rhs);
// it's possible for both lhs and/or rhs to return an offset as well,
// in which case we return the first offset occurrence we find.
const offset = blk: {
if (lhs == .mir_offset) break :blk lhs.mir_offset;
if (rhs == .mir_offset) break :blk rhs.mir_offset;
break :blk self.mir_instructions.len;
};
try self.emitWValue(lhs);
try self.emitWValue(rhs);
@ -1135,7 +1320,10 @@ fn airWrapBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
return self.fail("TODO wasm: Integer wrapping for bitsizes larger than 64", .{});
}
return WValue{ .mir_offset = offset };
// save the result in a temporary
const bin_local = try self.allocLocal(bin_ty);
try self.addLabel(.local_set, bin_local.local);
return bin_local;
}
fn emitConstant(self: *Self, val: Value, ty: Type) InnerError!void {
@ -1176,7 +1364,7 @@ fn emitConstant(self: *Self, val: Value, ty: Type) InnerError!void {
// memory instruction followed by their memarg immediate
// memarg ::== x:u32, y:u32 => {align x, offset y}
const extra_index = try self.addExtra(Mir.MemArg{ .offset = 0, .alignment = 0 });
const extra_index = try self.addExtra(Mir.MemArg{ .offset = 0, .alignment = 4 });
try self.addInst(.{ .tag = .i32_load, .data = .{ .payload = extra_index } });
} else return self.fail("Wasm TODO: emitConstant for other const pointer tag {s}", .{val.tag()});
},
@ -1289,21 +1477,29 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.startBlock(.block, block_ty, null);
// if block_ty is non-empty, we create a register to store the temporary value
const block_result: WValue = if (block_ty != wasm.block_empty)
try self.allocLocal(self.air.getRefType(ty_pl.ty))
else
WValue.none;
try self.startBlock(.block, wasm.block_empty);
// Here we set the current block idx, so breaks know the depth to jump
// to when breaking out.
try self.blocks.putNoClobber(self.gpa, inst, self.block_depth);
try self.blocks.putNoClobber(self.gpa, inst, .{
.label = self.block_depth,
.value = block_result,
});
try self.genBody(body);
try self.endBlock();
return .none;
return block_result;
}
/// appends a new wasm block to the code section and increases the `block_depth` by 1
fn startBlock(self: *Self, block_tag: wasm.Opcode, valtype: u8, with_offset: ?usize) !void {
fn startBlock(self: *Self, block_tag: wasm.Opcode, valtype: u8) !void {
self.block_depth += 1;
const offset = with_offset orelse self.mir_instructions.len;
try self.addInstAt(offset, .{
try self.addInst(.{
.tag = Mir.Inst.Tag.fromOpcode(block_tag),
.data = .{ .block_type = valtype },
});
@ -1322,7 +1518,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// result type of loop is always 'noreturn', meaning we can always
// emit the wasm type 'block_empty'.
try self.startBlock(.loop, wasm.block_empty, null);
try self.startBlock(.loop, wasm.block_empty);
try self.genBody(body);
// breaking to the index of a loop block will continue the loop instead
@ -1340,19 +1536,10 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
// TODO: Handle death instructions for then and else body
// insert blocks at the position of `offset` so
// the condition can jump to it
const offset = switch (condition) {
.mir_offset => |offset| offset,
else => blk: {
const offset = self.mir_instructions.len;
try self.emitWValue(condition);
break :blk offset;
},
};
// result type is always noreturn, so use `block_empty` as type.
try self.startBlock(.block, wasm.block_empty, offset);
try self.startBlock(.block, wasm.block_empty);
// emit the conditional value
try self.emitWValue(condition);
// we inserted the block in front of the condition
// so now check if condition matches. If not, break outside this block
@ -1369,10 +1556,6 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!WValue {
// save offset, so potential conditions can insert blocks in front of
// the comparison that we can later jump back to
const offset = self.mir_instructions.len;
const data: Air.Inst.Data = self.air.instructions.items(.data)[inst];
const lhs = self.resolveInst(data.bin_op.lhs);
const rhs = self.resolveInst(data.bin_op.rhs);
@ -1401,20 +1584,28 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner
.signedness = signedness,
});
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return WValue{ .mir_offset = offset };
const cmp_tmp = try self.allocLocal(lhs_ty);
try self.addLabel(.local_set, cmp_tmp.local);
return cmp_tmp;
}
fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const br = self.air.instructions.items(.data)[inst].br;
const block = self.blocks.get(br.block_inst).?;
// if operand has codegen bits we should break with a value
if (self.air.typeOf(br.operand).hasCodeGenBits()) {
try self.emitWValue(self.resolveInst(br.operand));
if (block.value != .none) {
try self.addLabel(.local_set, block.value.local);
}
}
// We map every block to its block index.
// We then determine how far we have to jump to it by subtracting it from current block depth
const idx: u32 = self.block_depth - self.blocks.get(br.block_inst).?;
const idx: u32 = self.block_depth - block.label;
try self.addLabel(.br, idx);
return .none;
@ -1422,7 +1613,6 @@ fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const offset = self.mir_instructions.len;
const operand = self.resolveInst(ty_op.operand);
try self.emitWValue(operand);
@ -1432,7 +1622,10 @@ fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addImm32(0);
try self.addTag(.i32_eq);
return WValue{ .mir_offset = offset };
// save the result in the local
const not_tmp = try self.allocLocal(self.air.getRefType(ty_op.ty));
try self.addLabel(.local_set, not_tmp.local);
return not_tmp;
}
fn airBreakpoint(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -1458,24 +1651,45 @@ fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload);
const struct_ptr = self.resolveInst(extra.data.struct_operand);
return structFieldPtr(struct_ptr, extra.data.field_index);
const struct_ty = self.air.typeOf(extra.data.struct_operand).childType();
const offset = std.math.cast(u32, struct_ty.structFieldOffset(extra.data.field_index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{
struct_ty.structFieldType(extra.data.field_index),
});
};
return structFieldPtr(struct_ptr, offset);
}
fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u32) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const struct_ptr = self.resolveInst(ty_op.operand);
return structFieldPtr(struct_ptr, index);
const struct_ty = self.air.typeOf(ty_op.operand).childType();
const offset = std.math.cast(u32, struct_ty.structFieldOffset(index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{
struct_ty.structFieldType(index),
});
};
return structFieldPtr(struct_ptr, offset);
}
fn structFieldPtr(struct_ptr: WValue, index: u32) InnerError!WValue {
return WValue{ .local = struct_ptr.multi_value.index + index };
fn structFieldPtr(struct_ptr: WValue, offset: u32) InnerError!WValue {
return WValue{ .local_with_offset = .{ .local = struct_ptr.local, .offset = offset } };
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue.none;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_multivalue = self.resolveInst(extra.struct_operand).multi_value;
return WValue{ .local = struct_multivalue.index + extra.field_index };
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ty = self.air.typeOf(struct_field.struct_operand);
const operand = self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
if (!field_ty.hasCodeGenBits()) return WValue.none;
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty});
};
return try self.load(operand, field_ty, offset);
}
fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -1521,7 +1735,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
case_list.appendAssumeCapacity(.{ .values = values, .body = case_body });
try self.startBlock(.block, blocktype, null);
try self.startBlock(.block, blocktype);
}
// When the highest and lowest values are seperated by '50',
@ -1534,7 +1748,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
const has_else_body = else_body.len != 0;
if (has_else_body) {
try self.startBlock(.block, blocktype, null);
try self.startBlock(.block, blocktype);
}
if (!is_sparse) {
@ -1542,7 +1756,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// The value 'target' represents the index into the table.
// Each index in the table represents a label to the branch
// to jump to.
try self.startBlock(.block, blocktype, null);
try self.startBlock(.block, blocktype);
try self.emitWValue(target);
if (lowest < 0) {
// since br_table works using indexes, starting from '0', we must ensure all values
@ -1598,7 +1812,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addLabel(.br_if, 0);
} else {
// in multi-value prongs we must check if any prongs match the target value.
try self.startBlock(.block, blocktype, null);
try self.startBlock(.block, blocktype);
for (case.values) |value| {
try self.emitWValue(target);
try self.emitConstant(value.value, target_ty);
@ -1629,30 +1843,40 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = self.resolveInst(un_op);
const offset = self.mir_instructions.len;
const err_ty = self.air.typeOf(un_op).errorUnionSet();
// load the error tag value
try self.emitWValue(operand);
const mem_arg_index = try self.addExtra(Mir.MemArg{
.offset = 0,
.alignment = err_ty.abiAlignment(self.target),
});
try self.addInst(.{
.tag = .i32_load16_u,
.data = .{ .payload = mem_arg_index },
});
// load the error value which is positioned at multi_value's index
try self.emitWValue(.{ .local = operand.multi_value.index });
// Compare the error value with '0'
try self.addImm32(0);
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return WValue{ .mir_offset = offset };
const is_err_tmp = try self.allocLocal(err_ty);
try self.addLabel(.local_set, is_err_tmp.local);
return is_err_tmp;
}
fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = self.resolveInst(ty_op.operand);
// The index of multi_value contains the error code. To get the initial index of the payload we get
// the following index. Next, convert it to a `WValue.local`
//
// TODO: Check if payload is a type that requires a multi_value as well and emit that instead. i.e. a struct.
return WValue{ .local = operand.multi_value.index + 1 };
const err_ty = self.air.typeOf(ty_op.operand);
const offset = @intCast(u32, err_ty.errorUnionSet().abiSize(self.target));
return self.load(operand, err_ty.errorUnionPayload(), offset);
}
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
return self.resolveInst(ty_op.operand);
_ = ty_op;
return self.fail("TODO: wasm airWrapErrUnionPayload", .{});
}
fn airIntcast(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -1682,22 +1906,41 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = self.resolveInst(un_op);
// load the null value which is positioned at multi_value's index
try self.emitWValue(.{ .local = operand.multi_value.index });
// load the null tag value
try self.emitWValue(operand);
const mem_arg_index = try self.addExtra(Mir.MemArg{ .offset = 0, .alignment = 1 });
try self.addInst(.{
.tag = .i32_load8_u,
.data = .{ .payload = mem_arg_index },
});
// Compare the error value with '0'
try self.addImm32(0);
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
// we save the result in a new local
const local = try self.allocLocal(Type.initTag(.i32));
try self.addLabel(.local_set, local.local);
return local;
const is_null_tmp = try self.allocLocal(Type.initTag(.u8));
try self.addLabel(.local_set, is_null_tmp.local);
return is_null_tmp;
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = self.resolveInst(ty_op.operand);
return WValue{ .local = operand.multi_value.index + 1 };
const opt_ty = self.air.typeOf(ty_op.operand);
// For pointers we simply return its stack address, rather than
// loading its value
if (opt_ty.zigTypeTag() == .Pointer) {
return WValue{ .local_with_offset = .{ .local = operand.local, .offset = 1 } };
}
if (opt_ty.isPtrLikeOptional()) return operand;
var buf: Type.Payload.ElemType = undefined;
const child_ty = opt_ty.optionalChild(&buf);
const offset = opt_ty.abiSize(self.target) - child_ty.abiSize(self.target);
return self.load(operand, child_ty, @intCast(u32, offset));
}
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -1709,5 +1952,14 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
return self.resolveInst(ty_op.operand);
const operand = self.resolveInst(ty_op.operand);
const op_ty = self.air.typeOf(ty_op.operand);
const optional_ty = self.air.getRefType(ty_op.ty);
const offset = optional_ty.abiSize(self.target) - op_ty.abiSize(self.target);
return WValue{ .local_with_offset = .{
.local = operand.local,
.offset = @intCast(u32, offset),
} };
}

View File

@ -60,8 +60,30 @@ pub fn emitMir(emit: *Emit) InnerError!void {
// memory instructions
.i32_load => try emit.emitMemArg(tag, inst),
.i64_load => try emit.emitMemArg(tag, inst),
.f32_load => try emit.emitMemArg(tag, inst),
.f64_load => try emit.emitMemArg(tag, inst),
.i32_load8_s => try emit.emitMemArg(tag, inst),
.i32_load8_u => try emit.emitMemArg(tag, inst),
.i32_load16_s => try emit.emitMemArg(tag, inst),
.i32_load16_u => try emit.emitMemArg(tag, inst),
.i64_load8_s => try emit.emitMemArg(tag, inst),
.i64_load8_u => try emit.emitMemArg(tag, inst),
.i64_load16_s => try emit.emitMemArg(tag, inst),
.i64_load16_u => try emit.emitMemArg(tag, inst),
.i64_load32_s => try emit.emitMemArg(tag, inst),
.i64_load32_u => try emit.emitMemArg(tag, inst),
.i32_store => try emit.emitMemArg(tag, inst),
.i64_store => try emit.emitMemArg(tag, inst),
.f32_store => try emit.emitMemArg(tag, inst),
.f64_store => try emit.emitMemArg(tag, inst),
.i32_store8 => try emit.emitMemArg(tag, inst),
.i32_store16 => try emit.emitMemArg(tag, inst),
.i64_store8 => try emit.emitMemArg(tag, inst),
.i64_store16 => try emit.emitMemArg(tag, inst),
.i64_store32 => try emit.emitMemArg(tag, inst),
// Instructions with an index that do not require relocations
.local_get => try emit.emitLabel(tag, inst),
.local_set => try emit.emitLabel(tag, inst),
.local_tee => try emit.emitLabel(tag, inst),
@ -229,7 +251,10 @@ fn emitMemArg(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const mem_arg = emit.mir.extraData(Mir.MemArg, extra_index).data;
try emit.code.append(@enumToInt(tag));
try leb128.writeULEB128(emit.code.writer(), mem_arg.alignment);
// wasm encodes alignment as power of 2, rather than natural alignment
const encoded_alignment = mem_arg.alignment >> 1;
try leb128.writeULEB128(emit.code.writer(), encoded_alignment);
try leb128.writeULEB128(emit.code.writer(), mem_arg.offset);
}

View File

@ -97,11 +97,125 @@ pub const Inst = struct {
///
/// Uses `payload` of type `MemArg`.
i32_load = 0x28,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i64_load = 0x29,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
f32_load = 0x2A,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
f64_load = 0x2B,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i32_load8_s = 0x2C,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i32_load8_u = 0x2D,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i32_load16_s = 0x2E,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i32_load16_u = 0x2F,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i64_load8_s = 0x30,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i64_load8_u = 0x31,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i64_load16_s = 0x32,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i64_load16_u = 0x33,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i64_load32_s = 0x34,
/// Loads a value from memory onto the stack, based on the signedness
/// and bitsize of the type.
///
/// Uses `payload` with type `MemArg`
i64_load32_u = 0x35,
/// Pops 2 values from the stack, where the first value represents the value to write into memory
/// and the second value represents the offset into memory where the value must be written to.
/// This opcode is typed and expects the stack value's type to be equal to this opcode's type.
///
/// Uses `payload` of type `MemArg`.
i32_store = 0x36,
/// Pops 2 values from the stack, where the first value represents the value to write into memory
/// and the second value represents the offset into memory where the value must be written to.
/// This opcode is typed and expects the stack value's type to be equal to this opcode's type.
///
/// Uses `Payload` with type `MemArg`
i64_store = 0x37,
/// Pops 2 values from the stack, where the first value represents the value to write into memory
/// and the second value represents the offset into memory where the value must be written to.
/// This opcode is typed and expects the stack value's type to be equal to this opcode's type.
///
/// Uses `Payload` with type `MemArg`
f32_store = 0x38,
/// Pops 2 values from the stack, where the first value represents the value to write into memory
/// and the second value represents the offset into memory where the value must be written to.
/// This opcode is typed and expects the stack value's type to be equal to this opcode's type.
///
/// Uses `Payload` with type `MemArg`
f64_store = 0x39,
/// Pops 2 values from the stack, where the first value represents the value to write into memory
/// and the second value represents the offset into memory where the value must be written to.
/// This opcode is typed and expects the stack value's type to be equal to this opcode's type.
///
/// Uses `Payload` with type `MemArg`
i32_store8 = 0x3A,
/// Pops 2 values from the stack, where the first value represents the value to write into memory
/// and the second value represents the offset into memory where the value must be written to.
/// This opcode is typed and expects the stack value's type to be equal to this opcode's type.
///
/// Uses `Payload` with type `MemArg`
i32_store16 = 0x3B,
/// Pops 2 values from the stack, where the first value represents the value to write into memory
/// and the second value represents the offset into memory where the value must be written to.
/// This opcode is typed and expects the stack value's type to be equal to this opcode's type.
///
/// Uses `Payload` with type `MemArg`
i64_store8 = 0x3C,
/// Pops 2 values from the stack, where the first value represents the value to write into memory
/// and the second value represents the offset into memory where the value must be written to.
/// This opcode is typed and expects the stack value's type to be equal to this opcode's type.
///
/// Uses `Payload` with type `MemArg`
i64_store16 = 0x3D,
/// Pops 2 values from the stack, where the first value represents the value to write into memory
/// and the second value represents the offset into memory where the value must be written to.
/// This opcode is typed and expects the stack value's type to be equal to this opcode's type.
///
/// Uses `Payload` with type `MemArg`
i64_store32 = 0x3E,
/// Returns the memory size in amount of pages.
///
/// Uses `nop`
@ -247,7 +361,7 @@ pub const Inst = struct {
/// From a given wasm opcode, returns a MIR tag.
pub fn fromOpcode(opcode: std.wasm.Opcode) Tag {
return @intToEnum(Tag, @enumToInt(opcode));
return @intToEnum(Tag, @enumToInt(opcode)); // Given `Opcode` is not present as a tag for MIR yet
}
/// Returns a wasm opcode from a given MIR tag.

View File

@ -404,6 +404,8 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
// The table contains all decl's with its corresponding offset into
// the 'data' section
const offset_table_size = @intCast(u32, self.offset_table.items.len * ptr_width);
// The size of the emulated stack
const stack_size = @intCast(u32, self.base.options.stack_size_override orelse std.wasm.page_size);
// The size of the data, this together with `offset_table_size` amounts to the
// total size of the 'data' section
@ -487,7 +489,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
}
// Memory section
if (data_size != 0) {
{
const header_offset = try reserveVecSectionHeader(file);
const writer = file.writer();
@ -498,7 +500,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
writer,
try std.math.divCeil(
u32,
offset_table_size + data_size,
offset_table_size + data_size + stack_size,
std.wasm.page_size,
),
);
@ -511,6 +513,34 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
);
}
// Global section (used to emit stack pointer)
{
// We emit the emulated stack at the end of the data section,
// 'growing' downwards towards the program memory.
// TODO: Have linker resolve the offset table, so we can emit the stack
// at the start so we can't overwrite program memory with the stack.
const sp_value = offset_table_size + data_size + std.wasm.page_size;
const mutable = true; // stack pointer MUST be mutable
const header_offset = try reserveVecSectionHeader(file);
const writer = file.writer();
try writer.writeByte(wasm.valtype(.i32));
try writer.writeByte(@boolToInt(mutable));
// set the initial value of the stack pointer to the data size + stack size
try writer.writeByte(wasm.opcode(.i32_const));
try leb.writeILEB128(writer, @bitCast(i32, sp_value));
try writer.writeByte(wasm.opcode(.end));
try writeVecSectionHeader(
file,
header_offset,
.global,
@intCast(u32, (try file.getPos()) - header_offset - header_size),
@as(u32, 1),
);
}
// Export section
if (self.base.options.module) |module| {
const header_offset = try reserveVecSectionHeader(file);

View File

@ -234,7 +234,7 @@ const Writer = struct {
fn writeTyStr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_str = w.air.instructions.items(.data)[inst].ty_str;
const name = w.zir.nullTerminatedString(ty_str.str);
try s.print("\"{}\", {}", .{ std.zig.fmtEscapes(name), ty_str.ty });
try s.print("\"{}\", {}", .{ std.zig.fmtEscapes(name), w.air.getRefType(ty_str.ty) });
}
fn writeBinOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {

View File

@ -740,4 +740,39 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, "0\n");
}
{
var case = ctx.exe("wasm pointers", wasi);
case.addCompareOutput(
\\pub export fn _start() u32 {
\\ var x: u32 = 0;
\\
\\ foo(&x);
\\ return x;
\\}
\\
\\fn foo(x: *u32)void {
\\ x.* = 2;
\\}
, "2\n");
case.addCompareOutput(
\\pub export fn _start() u32 {
\\ var x: u32 = 0;
\\
\\ foo(&x);
\\ bar(&x);
\\ return x;
\\}
\\
\\fn foo(x: *u32)void {
\\ x.* = 2;
\\}
\\
\\fn bar(x: *u32) void {
\\ x.* += 2;
\\}
, "4\n");
}
}