Merge pull request #10780 from Luukdegram/wasm-behavior-tests

stage2: Wasm - Account for stack alignment
This commit is contained in:
Andrew Kelley 2022-02-03 20:23:46 -05:00 committed by GitHub
commit 71e0cca7a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 338 additions and 97 deletions

View File

@ -39,6 +39,14 @@ const WValue = union(enum) {
/// Note: The value contains the symbol index, rather than the actual address
/// as we use this to perform the relocation.
memory: u32,
/// A value that represents a parent pointer and an offset
/// from that pointer. i.e. when slicing with constant values.
memory_offset: struct {
/// The symbol of the parent pointer
pointer: u32,
/// Offset will be set as addend when relocating
offset: u32,
},
/// Represents a function pointer
/// In wasm function pointers are indexes into a function table,
/// rather than an address in the data section.
@ -552,6 +560,9 @@ mir_extra: std.ArrayListUnmanaged(u32) = .{},
/// When a function is executing, we store the the current stack pointer's value within this local.
/// This value is then used to restore the stack pointer to the original value at the return of the function.
initial_stack_value: WValue = .none,
/// The current stack pointer substracted with the stack size. From this value, we will calculate
/// all offsets of the stack values.
bottom_stack_value: WValue = .none,
/// Arguments of this function declaration
/// This will be set after `resolveCallingConventionValues`
args: []WValue = &.{},
@ -559,6 +570,14 @@ args: []WValue = &.{},
/// When it returns a pointer to the stack, the `.local` tag will be active and must be populated
/// before this function returns its execution to the caller.
return_value: WValue = .none,
/// The size of the stack this function occupies. In the function prologue
/// we will move the stack pointer by this number, forward aligned with the `stack_alignment`.
stack_size: u32 = 0,
/// The stack alignment, which is 16 bytes by default. This is specified by the
/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
/// and also what the llvm backend will emit.
/// However, local variables or the usage of `@setAlignStack` can overwrite this default.
stack_alignment: u32 = 16,
const InnerError = error{
OutOfMemory,
@ -598,7 +617,10 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
// means we must generate it from a constant.
const val = self.air.value(ref).?;
const ty = self.air.typeOf(ref);
if (!ty.hasRuntimeBits() and !ty.isInt()) return WValue{ .none = {} };
if (!ty.hasRuntimeBits() and !ty.isInt()) {
gop.value_ptr.* = WValue{ .none = {} };
return gop.value_ptr.*;
}
// When we need to pass the value by reference (such as a struct), we will
// leverage `genTypedValue` to lower the constant to bytes and emit it
@ -643,13 +665,6 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!void {
try self.mir_instructions.append(self.gpa, inst);
}
/// Inserts a Mir instruction at the given `offset`.
/// Asserts offset is within bound.
fn addInstAt(self: *Self, offset: usize, inst: Mir.Inst) error{OutOfMemory}!void {
try self.mir_instructions.ensureUnusedCapacity(self.gpa, 1);
self.mir_instructions.insertAssumeCapacity(offset, inst);
}
fn addTag(self: *Self, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
try self.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
}
@ -754,7 +769,14 @@ fn emitWValue(self: *Self, value: WValue) InnerError!void {
.imm64 => |val| try self.addImm64(val),
.float32 => |val| try self.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
.float64 => |val| try self.addFloat64(val),
.memory => |ptr| try self.addLabel(.memory_address, ptr), // write sybol address and generate relocation
.memory => |ptr| {
const extra_index = try self.addExtra(Mir.Memory{ .pointer = ptr, .offset = 0 });
try self.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
},
.memory_offset => |mem_off| {
const extra_index = try self.addExtra(Mir.Memory{ .pointer = mem_off.pointer, .offset = mem_off.offset });
try self.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
},
.function_index => |index| try self.addLabel(.function_index, index), // write function index and generate relocation
}
}
@ -827,10 +849,43 @@ pub fn genFunc(self: *Self) InnerError!void {
try self.addTag(.@"unreachable");
}
}
// End of function body
try self.addTag(.end);
// check if we have to initialize and allocate anything into the stack frame.
// If so, create enough stack space and insert the instructions at the front of the list.
if (self.stack_size > 0) {
var prologue = std.ArrayList(Mir.Inst).init(self.gpa);
defer prologue.deinit();
// load stack pointer
try prologue.append(.{ .tag = .global_get, .data = .{ .label = 0 } });
// store stack pointer so we can restore it when we return from the function
try prologue.append(.{ .tag = .local_tee, .data = .{ .label = self.initial_stack_value.local } });
// get the total stack size
const aligned_stack = std.mem.alignForwardGeneric(u32, self.stack_size, self.stack_alignment);
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } });
// substract it from the current stack pointer
try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
// Get negative stack aligment
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, self.stack_alignment) * -1 } });
// Bit and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
// store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
try prologue.append(.{ .tag = .local_tee, .data = .{ .label = self.bottom_stack_value.local } });
// Store the current stack pointer value into the global stack pointer so other function calls will
// start from this value instead and not overwrite the current stack.
try prologue.append(.{ .tag = .global_set, .data = .{ .label = 0 } });
// reserve space and insert all prologue instructions at the front of the instruction list
// We insert them in reserve order as there is no insertSlice in multiArrayList.
try self.mir_instructions.ensureUnusedCapacity(self.gpa, prologue.items.len);
for (prologue.items) |_, index| {
const inst = prologue.items[prologue.items.len - 1 - index];
self.mir_instructions.insertAssumeCapacity(0, inst);
}
}
var mir: Mir = .{
.instructions = self.mir_instructions.toOwnedSlice(),
.extra = self.mir_extra.toOwnedSlice(self.gpa),
@ -927,7 +982,7 @@ pub const DeclGen = struct {
.function => val.castTag(.function).?.data.owner_decl,
else => unreachable,
};
return try self.lowerDeclRef(ty, val, fn_decl);
return try self.lowerDeclRefValue(ty, val, fn_decl, 0);
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
@ -1115,11 +1170,11 @@ pub const DeclGen = struct {
.Pointer => switch (val.tag()) {
.variable => {
const decl = val.castTag(.variable).?.data.owner_decl;
return self.lowerDeclRef(ty, val, decl);
return self.lowerDeclRefValue(ty, val, decl, 0);
},
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
return self.lowerDeclRef(ty, val, decl);
return self.lowerDeclRefValue(ty, val, decl, 0);
},
.slice => {
const slice = val.castTag(.slice).?.data;
@ -1139,6 +1194,13 @@ pub const DeclGen = struct {
try writer.writeByteNTimes(0, @divExact(self.target().cpu.arch.ptrBitWidth(), 8));
return Result{ .appended = {} };
},
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
const elem_size = ty.childType().abiSize(self.target());
const offset = elem_ptr.index * elem_size;
return self.lowerParentPtr(elem_ptr.array_ptr, @intCast(usize, offset));
},
.int_u64 => return self.genTypedValue(Type.usize, val),
else => return self.fail("TODO: Implement zig decl gen for pointer type value: '{s}'", .{@tagName(val.tag())}),
},
.ErrorUnion => {
@ -1179,7 +1241,36 @@ pub const DeclGen = struct {
}
}
fn lowerDeclRef(self: *DeclGen, ty: Type, val: Value, decl: *Module.Decl) InnerError!Result {
fn lowerParentPtr(self: *DeclGen, ptr_value: Value, offset: usize) InnerError!Result {
switch (ptr_value.tag()) {
.decl_ref => {
const decl = ptr_value.castTag(.decl_ref).?.data;
return self.lowerParentPtrDecl(ptr_value, decl, offset);
},
else => |tag| return self.fail("TODO: Implement lowerParentPtr for pointer value tag: {s}", .{tag}),
}
}
fn lowerParentPtrDecl(self: *DeclGen, ptr_val: Value, decl: *Module.Decl, offset: usize) InnerError!Result {
decl.markAlive();
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = decl.ty,
};
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
return self.lowerDeclRefValue(ptr_ty, ptr_val, decl, offset);
}
fn lowerDeclRefValue(
self: *DeclGen,
ty: Type,
val: Value,
/// The target decl that is being pointed to
decl: *Module.Decl,
/// When lowering to an indexed pointer, we can specify the offset
/// which will then be used as 'addend' to the relocation.
offset: usize,
) InnerError!Result {
const writer = self.code.writer();
if (ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
@ -1202,6 +1293,7 @@ pub const DeclGen = struct {
self.symbol_index, // source symbol index
decl.link.wasm.sym_index, // target symbol index
@intCast(u32, self.code.items.len), // offset
@intCast(u32, offset), // addend
));
return Result{ .appended = {} };
}
@ -1254,22 +1346,16 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu
return result;
}
/// Retrieves the stack pointer's value from the global variable and stores
/// it in a local
/// Creates a local for the initial stack value
/// Asserts `initial_stack_value` is `.none`
fn initializeStack(self: *Self) !void {
assert(self.initial_stack_value == .none);
// reserve space for immediate value
// get stack pointer global
try self.addLabel(.global_get, 0);
// Reserve a local to store the current stack pointer
// We can later use this local to set the stack pointer back to the value
// we have stored here.
self.initial_stack_value = try self.allocLocal(Type.initTag(.i32));
// save the value to the local
try self.addLabel(.local_set, self.initial_stack_value.local);
self.initial_stack_value = try self.allocLocal(Type.usize);
// Also reserve a local to store the bottom stack value
self.bottom_stack_value = try self.allocLocal(Type.usize);
}
/// Reads the stack pointer from `Context.initial_stack_value` and writes it
@ -1284,36 +1370,75 @@ fn restoreStackPointer(self: *Self) !void {
try self.addLabel(.global_set, 0);
}
/// Moves the stack pointer by given `offset`
/// It does this by retrieving the stack pointer, subtracting `offset` and storing
/// the result back into the stack pointer.
fn moveStack(self: *Self, offset: u32, local: u32) !void {
if (offset == 0) return;
try self.addLabel(.global_get, 0);
try self.addImm32(@bitCast(i32, offset));
try self.addTag(.i32_sub);
try self.addLabel(.local_tee, local);
try self.addLabel(.global_set, 0);
/// Saves the current stack size's stack pointer position into a given local
/// It does this by retrieving the bottom stack pointer, adding `self.stack_size` and storing
/// the result back into the local.
fn saveStack(self: *Self) !WValue {
const local = try self.allocLocal(Type.usize);
try self.addLabel(.local_get, self.bottom_stack_value.local);
try self.addImm32(@intCast(i32, self.stack_size));
try self.addTag(.i32_add);
try self.addLabel(.local_set, local.local);
return local;
}
/// From a given type, will create space on the virtual stack to store the value of such type.
/// This returns a `WValue` with its active tag set to `local`, containing the index to the local
/// that points to the position on the virtual stack. This function should be used instead of
/// moveStack unless a local was already created to store the point.
/// moveStack unless a local was already created to store the pointer.
///
/// Asserts Type has codegenbits
fn allocStack(self: *Self, ty: Type) !WValue {
assert(ty.hasRuntimeBits());
if (self.initial_stack_value == .none) {
try self.initializeStack();
}
// calculate needed stack space
const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch {
return self.fail("Given type '{}' too big to fit into stack frame", .{ty});
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ ty, ty.abiSize(self.target) });
};
const abi_align = ty.abiAlignment(self.target);
// allocate a local using wasm's pointer size
const local = try self.allocLocal(Type.@"usize");
try self.moveStack(abi_size, local.local);
return local;
if (abi_align > self.stack_alignment) {
self.stack_alignment = abi_align;
}
const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_align);
defer self.stack_size = offset + abi_size;
// store the stack pointer and return a local to it
return self.saveStack();
}
/// From a given AIR instruction generates a pointer to the stack where
/// the value of its type will live.
/// This is different from allocStack where this will use the pointer's alignment
/// if it is set, to ensure the stack alignment will be set correctly.
fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue {
const ptr_ty = self.air.typeOfIndex(inst);
const pointee_ty = ptr_ty.childType();
if (self.initial_stack_value == .none) {
try self.initializeStack();
}
if (!pointee_ty.hasRuntimeBits()) {
return self.allocStack(Type.usize); // create a value containing just the stack pointer.
}
const abi_alignment = ptr_ty.ptrAlignment(self.target);
const abi_size = std.math.cast(u32, pointee_ty.abiSize(self.target)) catch {
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ pointee_ty, pointee_ty.abiSize(self.target) });
};
if (abi_alignment > self.stack_alignment) {
self.stack_alignment = abi_alignment;
}
const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_alignment);
defer self.stack_size = offset + abi_size;
// store the stack pointer and return a local to it
return self.saveStack();
}
/// From given zig bitsize, returns the wasm bitsize
@ -1592,6 +1717,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
// result must be stored in the stack and we return a pointer
// to the stack instead
if (self.return_value != .none) {
@ -1601,7 +1727,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
try self.restoreStackPointer();
try self.addTag(.@"return");
return .none;
return WValue{ .none = {} };
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -1611,12 +1737,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(child_type, self.target)) {
return self.return_value;
}
// Initialize the stack
if (self.initial_stack_value == .none) {
try self.initializeStack();
}
return self.allocStack(child_type);
return self.allocStackPtr(inst);
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -1708,20 +1829,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const pointee_type = self.air.typeOfIndex(inst).childType();
// Initialize the stack
if (self.initial_stack_value == .none) {
try self.initializeStack();
}
if (!pointee_type.hasRuntimeBits()) {
// when the pointee is zero-sized, we still want to create a pointer.
// but instead use a default pointer type as storage.
const zero_ptr = try self.allocStack(Type.usize);
return zero_ptr;
}
return self.allocStack(pointee_type);
return self.allocStackPtr(inst);
}
fn airStore(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -1741,11 +1849,10 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
const err_ty = ty.errorUnionSet();
const pl_ty = ty.errorUnionPayload();
if (!pl_ty.hasRuntimeBits()) {
const err_val = try self.load(rhs, err_ty, 0);
return self.store(lhs, err_val, err_ty, 0);
return self.store(lhs, rhs, err_ty, 0);
}
return try self.memCopy(ty, lhs, rhs);
return self.memCopy(ty, lhs, rhs);
},
.Optional => {
if (ty.isPtrLikeOptional()) {
@ -1760,7 +1867,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
return self.memCopy(ty, lhs, rhs);
},
.Struct, .Array, .Union => {
return try self.memCopy(ty, lhs, rhs);
return self.memCopy(ty, lhs, rhs);
},
.Pointer => {
if (ty.isSlice()) {
@ -1775,7 +1882,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
}
},
.Int => if (ty.intInfo(self.target).bits > 64) {
return try self.memCopy(ty, lhs, rhs);
return self.memCopy(ty, lhs, rhs);
},
else => {},
}
@ -1974,6 +2081,17 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
return WValue{ .function_index = target_sym_index };
} else return WValue{ .memory = target_sym_index };
},
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
const index = elem_ptr.index;
const offset = index * ty.childType().abiSize(self.target);
const array_ptr = try self.lowerConstant(elem_ptr.array_ptr, ty);
return WValue{ .memory_offset = .{
.pointer = array_ptr.memory,
.offset = @intCast(u32, offset),
} };
},
.int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt()) },
.zero, .null_value => return WValue{ .imm32 = 0 },
else => return self.fail("Wasm TODO: lowerConstant for other const pointer tag {s}", .{val.tag()}),
@ -2524,11 +2642,11 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue
if (isByRef(payload_ty, self.target)) {
return self.buildPointerOffset(operand, offset, .new);
}
return try self.load(operand, payload_ty, offset);
return self.load(operand, payload_ty, offset);
}
fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue.none;
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@ -2538,11 +2656,12 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return operand;
}
return try self.load(operand, err_ty.errorUnionSet(), 0);
return self.load(operand, err_ty.errorUnionSet(), 0);
}
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue.none;
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@ -2564,11 +2683,14 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue.none;
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const err_ty = self.air.getRefType(ty_op.ty);
if (!err_ty.errorUnionPayload().hasRuntimeBits()) return operand;
const err_union = try self.allocStack(err_ty);
// TODO: Also write 'undefined' to the payload
try self.store(err_union, operand, err_ty.errorUnionSet(), 0);
@ -2750,16 +2872,16 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airSliceLen(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue.none;
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
return try self.load(operand, Type.usize, self.ptrSize());
return self.load(operand, Type.usize, self.ptrSize());
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue.none;
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
@ -2784,7 +2906,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(elem_ty, self.target)) {
return result;
}
return try self.load(result, elem_ty, 0);
return self.load(result, elem_ty, 0);
}
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -2812,10 +2934,10 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue.none;
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
return try self.load(operand, Type.usize, 0);
return self.load(operand, Type.usize, 0);
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -2880,7 +3002,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const un_op = self.air.instructions.items(.data)[inst].un_op;
return try self.resolveInst(un_op);
return self.resolveInst(un_op);
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -2912,7 +3034,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airPtrToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const un_op = self.air.instructions.items(.data)[inst].un_op;
return try self.resolveInst(un_op);
return self.resolveInst(un_op);
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -2927,7 +3049,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// load pointer onto the stack
if (ptr_ty.isSlice()) {
const ptr_local = try self.load(pointer, ptr_ty, 0);
const ptr_local = try self.load(pointer, Type.usize, 0);
try self.addLabel(.local_get, ptr_local.local);
} else {
try self.emitWValue(pointer);
@ -2944,7 +3066,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(elem_ty, self.target)) {
return result;
}
return try self.load(result, elem_ty, 0);
return self.load(result, elem_ty, 0);
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -2960,7 +3082,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// load pointer onto the stack
if (ptr_ty.isSlice()) {
const ptr_local = try self.load(ptr, ptr_ty, 0);
const ptr_local = try self.load(ptr, Type.usize, 0);
try self.addLabel(.local_get, ptr_local.local);
} else {
try self.emitWValue(ptr);
@ -3094,7 +3216,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(elem_ty, self.target)) {
return result;
}
return try self.load(result, elem_ty, 0);
return self.load(result, elem_ty, 0);
}
fn airFloatToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@ -3138,8 +3260,63 @@ fn airVectorInit(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
_ = elements;
return self.fail("TODO: Wasm backend: implement airVectorInit", .{});
switch (vector_ty.zigTypeTag()) {
.Vector => return self.fail("TODO: Wasm backend: implement airVectorInit for vectors", .{}),
.Array => {
const result = try self.allocStack(vector_ty);
const elem_ty = vector_ty.childType();
const elem_size = @intCast(u32, elem_ty.abiSize(self.target));
// When the element type is by reference, we must copy the entire
// value. It is therefore safer to move the offset pointer and store
// each value individually, instead of using store offsets.
if (isByRef(elem_ty, self.target)) {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try self.allocLocal(Type.usize);
try self.emitWValue(result);
try self.addLabel(.local_set, offset.local);
for (elements) |elem, elem_index| {
const elem_val = try self.resolveInst(elem);
try self.store(offset, elem_val, elem_ty, 0);
if (elem_index < elements.len - 1) {
_ = try self.buildPointerOffset(offset, elem_size, .modify);
}
}
} else {
var offset: u32 = 0;
for (elements) |elem| {
const elem_val = try self.resolveInst(elem);
try self.store(result, elem_val, elem_ty, offset);
offset += elem_size;
}
}
return result;
},
.Struct => {
const tuple = vector_ty.castTag(.tuple).?.data;
const result = try self.allocStack(vector_ty);
const offset = try self.allocLocal(Type.usize); // pointer to offset
try self.emitWValue(result);
try self.addLabel(.local_set, offset.local);
for (elements) |elem, elem_index| {
if (tuple.values[elem_index].tag() != .unreachable_value) continue;
const elem_ty = tuple.types[elem_index];
const elem_size = @intCast(u32, elem_ty.abiSize(self.target));
const value = try self.resolveInst(elem);
try self.store(offset, value, elem_ty, 0);
if (elem_index < elements.len - 1) {
_ = try self.buildPointerOffset(offset, elem_size, .modify);
}
}
return result;
},
else => unreachable,
}
}
fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!WValue {

View File

@ -326,25 +326,27 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
}
fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
const symbol_index = emit.mir.instructions.items(.data)[inst].label;
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
const mem_offset = emit.offset() + 1;
const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32;
if (is_wasm32) {
try emit.code.append(std.wasm.opcode(.i32_const));
var buf: [5]u8 = undefined;
leb128.writeUnsignedFixed(5, &buf, symbol_index);
leb128.writeUnsignedFixed(5, &buf, mem.pointer);
try emit.code.appendSlice(&buf);
} else {
try emit.code.append(std.wasm.opcode(.i64_const));
var buf: [10]u8 = undefined;
leb128.writeUnsignedFixed(10, &buf, symbol_index);
leb128.writeUnsignedFixed(10, &buf, mem.pointer);
try emit.code.appendSlice(&buf);
}
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
.offset = mem_offset,
.index = symbol_index,
.index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
.addend = mem.offset,
});
}

View File

@ -546,3 +546,10 @@ pub const MemArg = struct {
offset: u32,
alignment: u32,
};
/// Represents a memory address, which holds both the pointer
/// or the parent pointer and the offset to it.
pub const Memory = struct {
pointer: u32,
offset: u32,
};

View File

@ -345,10 +345,19 @@ pub fn updateLocalSymbolCode(self: *Wasm, decl: *Module.Decl, symbol_index: u32,
/// For a given decl, find the given symbol index's atom, and create a relocation for the type.
/// Returns the given pointer address
pub fn getDeclVAddr(self: *Wasm, decl: *Module.Decl, ty: Type, symbol_index: u32, target_symbol_index: u32, offset: u32) !u32 {
pub fn getDeclVAddr(
self: *Wasm,
decl: *Module.Decl,
ty: Type,
symbol_index: u32,
target_symbol_index: u32,
offset: u32,
addend: u32,
) !u32 {
const atom = decl.link.wasm.symbolAtom(symbol_index);
const is_wasm32 = self.base.options.target.cpu.arch == .wasm32;
if (ty.zigTypeTag() == .Fn) {
std.debug.assert(addend == 0); // addend not allowed for function relocations
// We found a function pointer, so add it to our table,
// as function pointers are not allowed to be stored inside the data section.
// They are instead stored in a function table which are called by index.
@ -363,6 +372,7 @@ pub fn getDeclVAddr(self: *Wasm, decl: *Module.Decl, ty: Type, symbol_index: u32
.index = target_symbol_index,
.offset = offset,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64,
.addend = addend,
});
}
// we do not know the final address at this point,

View File

@ -10,6 +10,7 @@ test {
_ = @import("behavior/bugs/655.zig");
_ = @import("behavior/bugs/656.zig");
_ = @import("behavior/bugs/679.zig");
_ = @import("behavior/bugs/1025.zig");
_ = @import("behavior/bugs/1111.zig");
_ = @import("behavior/bugs/1277.zig");
_ = @import("behavior/bugs/1310.zig");
@ -17,6 +18,8 @@ test {
_ = @import("behavior/bugs/1486.zig");
_ = @import("behavior/bugs/1500.zig");
_ = @import("behavior/bugs/1735.zig");
_ = @import("behavior/bugs/1741.zig");
_ = @import("behavior/bugs/1914.zig");
_ = @import("behavior/bugs/2006.zig");
_ = @import("behavior/bugs/2346.zig");
_ = @import("behavior/bugs/3112.zig");
@ -38,7 +41,8 @@ test {
_ = @import("behavior/struct.zig");
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
// Tests that pass for stage1, llvm backend, C backend, wasm backend.
// Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend.
_ = @import("behavior/array_llvm.zig");
_ = @import("behavior/basic.zig");
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bugs/624.zig");
@ -69,6 +73,7 @@ test {
_ = @import("behavior/pointers.zig");
_ = @import("behavior/ptrcast.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
_ = @import("behavior/slice.zig");
_ = @import("behavior/src.zig");
_ = @import("behavior/this.zig");
_ = @import("behavior/try.zig");
@ -88,11 +93,7 @@ test {
if (builtin.zig_backend != .stage2_c) {
// Tests that pass for stage1 and the llvm backend.
_ = @import("behavior/array_llvm.zig");
_ = @import("behavior/atomics.zig");
_ = @import("behavior/bugs/1025.zig");
_ = @import("behavior/bugs/1741.zig");
_ = @import("behavior/bugs/1914.zig");
_ = @import("behavior/bugs/2578.zig");
_ = @import("behavior/bugs/3007.zig");
_ = @import("behavior/bugs/9584.zig");
@ -108,7 +109,6 @@ test {
_ = @import("behavior/popcount.zig");
_ = @import("behavior/saturating_arithmetic.zig");
_ = @import("behavior/sizeof_and_typeof.zig");
_ = @import("behavior/slice.zig");
_ = @import("behavior/struct_llvm.zig");
_ = @import("behavior/switch.zig");
_ = @import("behavior/widening.zig");

View File

@ -7,6 +7,7 @@ var s_array: [8]Sub = undefined;
const Sub = struct { b: u8 };
const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var s = Str{ .a = s_array[0..] };
s.a[0].b = 1;
@ -19,6 +20,7 @@ test "set global var array via slice embedded in struct" {
}
test "read/write through global variable array of struct fields initialized via array mult" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
try expect(storage[0].term == 1);
@ -36,6 +38,7 @@ test "read/write through global variable array of struct fields initialized via
}
test "implicit cast single-item pointer" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try testImplicitCastSingleItemPtr();
comptime try testImplicitCastSingleItemPtr();
}
@ -52,6 +55,7 @@ fn testArrayByValAtComptime(b: [2]u8) u8 {
}
test "comptime evaluating function that takes array by value" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const arr = [_]u8{ 1, 2 };
const x = comptime testArrayByValAtComptime(arr);
const y = comptime testArrayByValAtComptime(arr);
@ -60,12 +64,14 @@ test "comptime evaluating function that takes array by value" {
}
test "runtime initialize array elem and then implicit cast to slice" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var two: i32 = 2;
const x: []const i32 = &[_]i32{two};
try expect(x[0] == 2);
}
test "array literal as argument to function" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn entry(two: i32) !void {
try foo(&[_]i32{ 1, 2, 3 });
@ -90,6 +96,7 @@ test "array literal as argument to function" {
}
test "double nested array to const slice cast in array literal" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn entry(two: i32) !void {
const cases = [_][]const []const i32{
@ -147,6 +154,7 @@ test "double nested array to const slice cast in array literal" {
}
test "anonymous literal in array" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
const Foo = struct {
a: usize = 2,
@ -168,6 +176,7 @@ test "anonymous literal in array" {
}
test "access the null element of a null terminated array" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var array: [4:0]u8 = .{ 'a', 'o', 'e', 'u' };
@ -181,6 +190,7 @@ test "access the null element of a null terminated array" {
}
test "type deduction for array subscript expression" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var array = [_]u8{ 0x55, 0xAA };
@ -196,6 +206,8 @@ test "type deduction for array subscript expression" {
test "sentinel element count towards the ABI size calculation" {
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
@ -218,6 +230,8 @@ test "sentinel element count towards the ABI size calculation" {
test "zero-sized array with recursive type definition" {
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const U = struct {
fn foo(comptime T: type, comptime n: usize) type {
@ -237,6 +251,7 @@ test "zero-sized array with recursive type definition" {
}
test "type coercion of anon struct literal to array" {
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
const U = union {
a: u32,
@ -253,6 +268,7 @@ test "type coercion of anon struct literal to array" {
try expect(arr1[2] == 54);
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
var x2: U = .{ .a = 42 };
const t2 = .{ x2, .{ .b = true }, .{ .c = "hello" } };
@ -268,6 +284,8 @@ test "type coercion of anon struct literal to array" {
test "type coercion of pointer to anon struct literal to pointer to array" {
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
const U = union {

View File

@ -1,3 +1,5 @@
const builtin = @import("builtin");
const A = struct {
B: type,
};
@ -7,6 +9,8 @@ fn getA() A {
}
test "bug 1025" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a = getA();
try @import("std").testing.expect(a.B == u8);
}

View File

@ -1,6 +1,9 @@
const std = @import("std");
const builtin = @import("builtin");
test "fixed" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const x: f32 align(128) = 12.34;
try std.testing.expect(@ptrToInt(&x) % 128 == 0);
}

View File

@ -1,4 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const A = struct {
b_list_pointer: *const []B,
@ -11,6 +12,9 @@ const b_list: []B = &[_]B{};
const a = A{ .b_list_pointer = &b_list };
test "segfault bug" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const assert = std.debug.assert;
const obj = B{ .a_pointer = &a };
assert(obj.a_pointer == &a); // this makes zig crash
@ -27,5 +31,8 @@ pub const B2 = struct {
var b_value = B2{ .pointer_array = &[_]*A2{} };
test "basic stuff" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
std.debug.assert(&b_value == &b_value);
}

View File

@ -27,6 +27,7 @@ comptime {
}
test "slicing" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var array: [20]i32 = undefined;
array[5] = 1234;
@ -43,6 +44,7 @@ test "slicing" {
}
test "const slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime {
const a = "1234567890";
try expect(a.len == 10);
@ -53,6 +55,7 @@ test "const slice" {
}
test "comptime slice of undefined pointer of length 0" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const slice1 = @as([*]i32, undefined)[0..0];
try expect(slice1.len == 0);
const slice2 = @as([*]i32, undefined)[100..100];
@ -60,6 +63,7 @@ test "comptime slice of undefined pointer of length 0" {
}
test "implicitly cast array of size 0 to slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var msg = [_]u8{};
try assertLenIsZero(&msg);
}
@ -69,6 +73,7 @@ fn assertLenIsZero(msg: []const u8) !void {
}
test "access len index of sentinel-terminated slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var slice: [:0]const u8 = "hello";
@ -82,6 +87,7 @@ test "access len index of sentinel-terminated slice" {
}
test "comptime slice of slice preserves comptime var" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime {
var buff: [10]u8 = undefined;
buff[0..][0..][0] = 1;
@ -90,6 +96,7 @@ test "comptime slice of slice preserves comptime var" {
}
test "slice of type" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime {
var types_array = [_]type{ i32, f64, type };
for (types_array) |T, i| {
@ -112,6 +119,7 @@ test "slice of type" {
}
test "generic malloc free" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const a = memAlloc(u8, 10) catch unreachable;
memFree(u8, a);
}
@ -124,6 +132,7 @@ fn memFree(comptime T: type, memory: []T) void {
}
test "slice of hardcoded address to pointer" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
const pointer = @intToPtr([*]u8, 0x04)[0..2];
@ -138,6 +147,7 @@ test "slice of hardcoded address to pointer" {
}
test "comptime slice of pointer preserves comptime var" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime {
var buff: [10]u8 = undefined;
var a = @ptrCast([*]u8, &buff);
@ -147,6 +157,7 @@ test "comptime slice of pointer preserves comptime var" {
}
test "comptime pointer cast array and then slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
const ptrA: [*]const u8 = @ptrCast([*]const u8, &array);
@ -160,6 +171,7 @@ test "comptime pointer cast array and then slice" {
}
test "slicing zero length array" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const s1 = ""[0..];
const s2 = ([_]u32{})[0..];
try expect(s1.len == 0);
@ -171,6 +183,7 @@ test "slicing zero length array" {
const x = @intToPtr([*]i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
try expect(@ptrToInt(x) == 0x1000);