From 29013220d95f60669c4a181d157157aea9f137b5 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sun, 30 Jan 2022 15:24:03 +0100 Subject: [PATCH 1/4] wasm: Implement elem_ptr This implements lowering elem_ptr for decl's and constants. To generate the correct pointer, we perform a relocation by using the addend that represents the offset. The offset is calculated by taking the element's size and multiplying that by the index. For constants this generates a single immediate instruction, and for decl's this generates a single pointer address. --- src/arch/wasm/CodeGen.zig | 73 ++++++++++++++++++++++++++++++++++++--- src/arch/wasm/Emit.zig | 10 +++--- src/arch/wasm/Mir.zig | 7 ++++ src/link/Wasm.zig | 12 ++++++- 4 files changed, 92 insertions(+), 10 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 7d2046b90b..2d0cf57fd4 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -39,6 +39,14 @@ const WValue = union(enum) { /// Note: The value contains the symbol index, rather than the actual address /// as we use this to perform the relocation. memory: u32, + /// A value that represents a parent pointer and an offset + /// from that pointer. i.e. when slicing with constant values. + memory_offset: struct { + /// The symbol of the parent pointer + pointer: u32, + /// Offset will be set as 'addend' when relocating + offset: u32, + }, /// Represents a function pointer /// In wasm function pointers are indexes into a function table, /// rather than an address in the data section. @@ -754,7 +762,14 @@ fn emitWValue(self: *Self, value: WValue) InnerError!void { .imm64 => |val| try self.addImm64(val), .float32 => |val| try self.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }), .float64 => |val| try self.addFloat64(val), - .memory => |ptr| try self.addLabel(.memory_address, ptr), // write sybol address and generate relocation + .memory => |ptr| { + const extra_index = try self.addExtra(Mir.Memory{ .pointer = ptr, .offset = 0 }); + try self.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } }); + }, + .memory_offset => |mem_off| { + const extra_index = try self.addExtra(Mir.Memory{ .pointer = mem_off.pointer, .offset = mem_off.offset }); + try self.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } }); + }, .function_index => |index| try self.addLabel(.function_index, index), // write function index and generate relocation } } @@ -927,7 +942,7 @@ pub const DeclGen = struct { .function => val.castTag(.function).?.data.owner_decl, else => unreachable, }; - return try self.lowerDeclRef(ty, val, fn_decl); + return try self.lowerDeclRefValue(ty, val, fn_decl, 0); }, .Optional => { var opt_buf: Type.Payload.ElemType = undefined; @@ -1115,11 +1130,11 @@ pub const DeclGen = struct { .Pointer => switch (val.tag()) { .variable => { const decl = val.castTag(.variable).?.data.owner_decl; - return self.lowerDeclRef(ty, val, decl); + return self.lowerDeclRefValue(ty, val, decl, 0); }, .decl_ref => { const decl = val.castTag(.decl_ref).?.data; - return self.lowerDeclRef(ty, val, decl); + return self.lowerDeclRefValue(ty, val, decl, writer, 0); }, .slice => { const slice = val.castTag(.slice).?.data; @@ -1139,6 +1154,13 @@ pub const DeclGen = struct { try writer.writeByteNTimes(0, @divExact(self.target().cpu.arch.ptrBitWidth(), 8)); return Result{ .appended = {} }; }, + .elem_ptr => { + const elem_ptr = val.castTag(.elem_ptr).?.data; + const elem_size = ty.childType().abiSize(self.target()); + const offset = elem_ptr.index * elem_size; + return self.lowerParentPtr(elem_ptr.array_ptr, writer, offset); + }, + .int_u64 => return self.genTypedValue(Type.usize, val, writer), else => return self.fail("TODO: Implement zig decl gen for pointer type value: '{s}'", .{@tagName(val.tag())}), }, .ErrorUnion => { @@ -1179,7 +1201,36 @@ pub const DeclGen = struct { } } - fn lowerDeclRef(self: *DeclGen, ty: Type, val: Value, decl: *Module.Decl) InnerError!Result { + fn lowerParentPtr(self: *DeclGen, ptr_value: Value, offset: usize) InnerError!Result { + switch (ptr_value.tag()) { + .decl_ref => { + const decl = ptr_value.castTag(.decl_ref).?.data; + return self.lowerParentPtrDecl(ptr_value, decl, offset); + }, + else => |tag| return self.fail("TODO: Implement lowerParentPtr for pointer value tag: {s}", .{tag}), + } + } + + fn lowerParentPtrDecl(self: *DeclGen, ptr_val: Value, decl: *Module.Decl, offset: usize) InnerError!Result { + decl.markAlive(); + var ptr_ty_payload: Type.Payload.ElemType = .{ + .base = .{ .tag = .single_mut_pointer }, + .data = decl.ty, + }; + const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + return self.lowerDeclRefValue(ptr_ty, ptr_val, decl, offset); + } + + fn lowerDeclRefValue( + self: *DeclGen, + ty: Type, + val: Value, + /// The target decl that is being pointed to + decl: *Module.Decl, + /// When lowering to an indexed pointer, we can specify the offset + /// which will then be used as 'addend' to the relocation. + offset: usize, + ) InnerError!Result { const writer = self.code.writer(); if (ty.isSlice()) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -1202,6 +1253,7 @@ pub const DeclGen = struct { self.symbol_index, // source symbol index decl.link.wasm.sym_index, // target symbol index @intCast(u32, self.code.items.len), // offset + @intCast(u32, offset), // addend )); return Result{ .appended = {} }; } @@ -1974,6 +2026,17 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue { return WValue{ .function_index = target_sym_index }; } else return WValue{ .memory = target_sym_index }; }, + .elem_ptr => { + const elem_ptr = val.castTag(.elem_ptr).?.data; + const index = elem_ptr.index; + const offset = index * ty.childType().abiSize(self.target); + const array_ptr = try self.lowerConstant(elem_ptr.array_ptr, ty); + + return WValue{ .memory_offset = .{ + .pointer = array_ptr.memory, + .offset = @intCast(u32, offset), + } }; + }, .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt()) }, .zero, .null_value => return WValue{ .imm32 = 0 }, else => return self.fail("Wasm TODO: lowerConstant for other const pointer tag {s}", .{val.tag()}), diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index 9283a0e0b7..8cae78caf1 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -326,25 +326,27 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void { } fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void { - const symbol_index = emit.mir.instructions.items(.data)[inst].label; + const extra_index = emit.mir.instructions.items(.data)[inst].payload; + const mem = emit.mir.extraData(Mir.Memory, extra_index).data; const mem_offset = emit.offset() + 1; const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32; if (is_wasm32) { try emit.code.append(std.wasm.opcode(.i32_const)); var buf: [5]u8 = undefined; - leb128.writeUnsignedFixed(5, &buf, symbol_index); + leb128.writeUnsignedFixed(5, &buf, mem.pointer); try emit.code.appendSlice(&buf); } else { try emit.code.append(std.wasm.opcode(.i64_const)); var buf: [10]u8 = undefined; - leb128.writeUnsignedFixed(10, &buf, symbol_index); + leb128.writeUnsignedFixed(10, &buf, mem.pointer); try emit.code.appendSlice(&buf); } try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{ .offset = mem_offset, - .index = symbol_index, + .index = mem.pointer, .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64, + .addend = mem.offset, }); } diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig index 07696f0dd3..ed0867e583 100644 --- a/src/arch/wasm/Mir.zig +++ b/src/arch/wasm/Mir.zig @@ -546,3 +546,10 @@ pub const MemArg = struct { offset: u32, alignment: u32, }; + +/// Represents a memory address, which holds both the pointer +/// or the parent pointer and the offset to it. +pub const Memory = struct { + pointer: u32, + offset: u32, +}; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index a5d4630378..b047e4b68a 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -345,10 +345,19 @@ pub fn updateLocalSymbolCode(self: *Wasm, decl: *Module.Decl, symbol_index: u32, /// For a given decl, find the given symbol index's atom, and create a relocation for the type. /// Returns the given pointer address -pub fn getDeclVAddr(self: *Wasm, decl: *Module.Decl, ty: Type, symbol_index: u32, target_symbol_index: u32, offset: u32) !u32 { +pub fn getDeclVAddr( + self: *Wasm, + decl: *Module.Decl, + ty: Type, + symbol_index: u32, + target_symbol_index: u32, + offset: u32, + addend: u32, +) !u32 { const atom = decl.link.wasm.symbolAtom(symbol_index); const is_wasm32 = self.base.options.target.cpu.arch == .wasm32; if (ty.zigTypeTag() == .Fn) { + std.debug.assert(addend == 0); // addend not allowed for function relocations // We found a function pointer, so add it to our table, // as function pointers are not allowed to be stored inside the data section. // They are instead stored in a function table which are called by index. @@ -363,6 +372,7 @@ pub fn getDeclVAddr(self: *Wasm, decl: *Module.Decl, ty: Type, symbol_index: u32 .index = target_symbol_index, .offset = offset, .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64, + .addend = addend, }); } // we do not know the final address at this point, From ae1e3c8f9bc86eeefb5a83233884a134f7b974f4 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 31 Jan 2022 21:12:30 +0100 Subject: [PATCH 2/4] wasm: Implement vector_init for array & structs Implements the instruction `vector_init` for structs and arrays. For arrays, it checks if the element must be passed by reference or not. When not, it can simply use the `offset` field of a store instruction to copy the values into the array. When it is byref, it will move the pointer by the element size, and then perform a store operation. This ensures types like structs will be moved into the right position. For structs we will always move the pointer, as we currently cannot verify if all fields are not by ref. --- src/arch/wasm/CodeGen.zig | 116 +++++++++++++++++++++++++++++--------- 1 file changed, 89 insertions(+), 27 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2d0cf57fd4..420fbdf4ab 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -44,7 +44,7 @@ const WValue = union(enum) { memory_offset: struct { /// The symbol of the parent pointer pointer: u32, - /// Offset will be set as 'addend' when relocating + /// Offset will be set as addend when relocating offset: u32, }, /// Represents a function pointer @@ -606,7 +606,10 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue { // means we must generate it from a constant. const val = self.air.value(ref).?; const ty = self.air.typeOf(ref); - if (!ty.hasRuntimeBits() and !ty.isInt()) return WValue{ .none = {} }; + if (!ty.hasRuntimeBits() and !ty.isInt()) { + gop.value_ptr.* = WValue{ .none = {} }; + return gop.value_ptr.*; + } // When we need to pass the value by reference (such as a struct), we will // leverage `genTypedValue` to lower the constant to bytes and emit it @@ -1644,6 +1647,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); + // result must be stored in the stack and we return a pointer // to the stack instead if (self.return_value != .none) { @@ -1653,7 +1657,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue { } try self.restoreStackPointer(); try self.addTag(.@"return"); - return .none; + return WValue{ .none = {} }; } fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -1793,11 +1797,10 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro const err_ty = ty.errorUnionSet(); const pl_ty = ty.errorUnionPayload(); if (!pl_ty.hasRuntimeBits()) { - const err_val = try self.load(rhs, err_ty, 0); - return self.store(lhs, err_val, err_ty, 0); + return self.store(lhs, rhs, err_ty, 0); } - return try self.memCopy(ty, lhs, rhs); + return self.memCopy(ty, lhs, rhs); }, .Optional => { if (ty.isPtrLikeOptional()) { @@ -1812,7 +1815,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro return self.memCopy(ty, lhs, rhs); }, .Struct, .Array, .Union => { - return try self.memCopy(ty, lhs, rhs); + return self.memCopy(ty, lhs, rhs); }, .Pointer => { if (ty.isSlice()) { @@ -1827,7 +1830,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro } }, .Int => if (ty.intInfo(self.target).bits > 64) { - return try self.memCopy(ty, lhs, rhs); + return self.memCopy(ty, lhs, rhs); }, else => {}, } @@ -2587,11 +2590,11 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue if (isByRef(payload_ty, self.target)) { return self.buildPointerOffset(operand, offset, .new); } - return try self.load(operand, payload_ty, offset); + return self.load(operand, payload_ty, offset); } fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index) InnerError!WValue { - if (self.liveness.isUnused(inst)) return WValue.none; + if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -2601,11 +2604,12 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index) InnerError!WValue { return operand; } - return try self.load(operand, err_ty.errorUnionSet(), 0); + return self.load(operand, err_ty.errorUnionSet(), 0); } fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { - if (self.liveness.isUnused(inst)) return WValue.none; + if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -2627,11 +2631,14 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { } fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { - if (self.liveness.isUnused(inst)) return WValue.none; + if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const err_ty = self.air.getRefType(ty_op.ty); + if (!err_ty.errorUnionPayload().hasRuntimeBits()) return operand; + const err_union = try self.allocStack(err_ty); // TODO: Also write 'undefined' to the payload try self.store(err_union, operand, err_ty.errorUnionSet(), 0); @@ -2813,16 +2820,16 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue { } fn airSliceLen(self: *Self, inst: Air.Inst.Index) InnerError!WValue { - if (self.liveness.isUnused(inst)) return WValue.none; + if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - return try self.load(operand, Type.usize, self.ptrSize()); + return self.load(operand, Type.usize, self.ptrSize()); } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue { - if (self.liveness.isUnused(inst)) return WValue.none; + if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.air.typeOf(bin_op.lhs); @@ -2847,7 +2854,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue { if (isByRef(elem_ty, self.target)) { return result; } - return try self.load(result, elem_ty, 0); + return self.load(result, elem_ty, 0); } fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -2875,10 +2882,10 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { } fn airSlicePtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { - if (self.liveness.isUnused(inst)) return WValue.none; + if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - return try self.load(operand, Type.usize, 0); + return self.load(operand, Type.usize, 0); } fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -2943,7 +2950,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue { fn airBoolToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; - return try self.resolveInst(un_op); + return self.resolveInst(un_op); } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -2975,7 +2982,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue { fn airPtrToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue { if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; const un_op = self.air.instructions.items(.data)[inst].un_op; - return try self.resolveInst(un_op); + return self.resolveInst(un_op); } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -2990,7 +2997,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue { // load pointer onto the stack if (ptr_ty.isSlice()) { - const ptr_local = try self.load(pointer, ptr_ty, 0); + const ptr_local = try self.load(pointer, Type.usize, 0); try self.addLabel(.local_get, ptr_local.local); } else { try self.emitWValue(pointer); @@ -3007,7 +3014,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue { if (isByRef(elem_ty, self.target)) { return result; } - return try self.load(result, elem_ty, 0); + return self.load(result, elem_ty, 0); } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -3023,7 +3030,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { // load pointer onto the stack if (ptr_ty.isSlice()) { - const ptr_local = try self.load(ptr, ptr_ty, 0); + const ptr_local = try self.load(ptr, Type.usize, 0); try self.addLabel(.local_get, ptr_local.local); } else { try self.emitWValue(ptr); @@ -3157,7 +3164,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue { if (isByRef(elem_ty, self.target)) { return result; } - return try self.load(result, elem_ty, 0); + return self.load(result, elem_ty, 0); } fn airFloatToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -3201,8 +3208,63 @@ fn airVectorInit(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); - _ = elements; - return self.fail("TODO: Wasm backend: implement airVectorInit", .{}); + switch (vector_ty.zigTypeTag()) { + .Vector => return self.fail("TODO: Wasm backend: implement airVectorInit for vectors", .{}), + .Array => { + const result = try self.allocStack(vector_ty); + const elem_ty = vector_ty.childType(); + const elem_size = @intCast(u32, elem_ty.abiSize(self.target)); + + // When the element type is by reference, we must copy the entire + // value. It is therefore safer to move the offset pointer and store + // each value individually, instead of using store offsets. + if (isByRef(elem_ty, self.target)) { + // copy stack pointer into a temporary local, which is + // moved for each element to store each value in the right position. + const offset = try self.allocLocal(Type.usize); + try self.emitWValue(result); + try self.addLabel(.local_set, offset.local); + for (elements) |elem, elem_index| { + const elem_val = try self.resolveInst(elem); + try self.store(offset, elem_val, elem_ty, 0); + + if (elem_index < elements.len - 1) { + _ = try self.buildPointerOffset(offset, elem_size, .modify); + } + } + } else { + var offset: u32 = 0; + for (elements) |elem| { + const elem_val = try self.resolveInst(elem); + try self.store(result, elem_val, elem_ty, offset); + offset += elem_size; + } + } + return result; + }, + .Struct => { + const tuple = vector_ty.castTag(.tuple).?.data; + const result = try self.allocStack(vector_ty); + const offset = try self.allocLocal(Type.usize); // pointer to offset + try self.emitWValue(result); + try self.addLabel(.local_set, offset.local); + for (elements) |elem, elem_index| { + if (tuple.values[elem_index].tag() != .unreachable_value) continue; + + const elem_ty = tuple.types[elem_index]; + const elem_size = @intCast(u32, elem_ty.abiSize(self.target)); + const value = try self.resolveInst(elem); + try self.store(offset, value, elem_ty, 0); + + if (elem_index < elements.len - 1) { + _ = try self.buildPointerOffset(offset, elem_size, .modify); + } + } + + return result; + }, + else => unreachable, + } } fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!WValue { From e35414bf5c356798f201be85303101f59220326c Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Thu, 3 Feb 2022 21:31:35 +0100 Subject: [PATCH 3/4] wasm: Refactor stack to account for alignment We now calculate the total stack size required for the current frame. The default alignment of the stack is 16 bytes, and will be overwritten when the alignment of a given type is larger than that. After we have generated all instructions for the body, we calculate the total stack size by forward aligning the stack size while accounting for the max alignment. We then insert a prologue into the body, where we substract this size from the stack pointer and save it inside a bottom stackframe local. We use this local then, to calculate the stack pointer locals of all variables we allocate into the stack. In a future iteration we can improve this further by storing the offsets as a new `stack_offset` `WValue`. This has the benefit of not having to spend runtime cost of storing those offsets, but instead we append those offsets whenever we need the value that lives in the stack. --- src/arch/wasm/CodeGen.zig | 168 +++++++++++++++++++++++++------------- 1 file changed, 110 insertions(+), 58 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 420fbdf4ab..d2db5fd92b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -560,6 +560,9 @@ mir_extra: std.ArrayListUnmanaged(u32) = .{}, /// When a function is executing, we store the the current stack pointer's value within this local. /// This value is then used to restore the stack pointer to the original value at the return of the function. initial_stack_value: WValue = .none, +/// The current stack pointer substracted with the stack size. From this value, we will calculate +/// all offsets of the stack values. +bottom_stack_value: WValue = .none, /// Arguments of this function declaration /// This will be set after `resolveCallingConventionValues` args: []WValue = &.{}, @@ -567,6 +570,14 @@ args: []WValue = &.{}, /// When it returns a pointer to the stack, the `.local` tag will be active and must be populated /// before this function returns its execution to the caller. return_value: WValue = .none, +/// The size of the stack this function occupies. In the function prologue +/// we will move the stack pointer by this number, forward aligned with the `stack_alignment`. +stack_size: u32 = 0, +/// The stack alignment, which is 16 bytes by default. This is specified by the +/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md +/// and also what the llvm backend will emit. +/// However, local variables or the usage of `@setAlignStack` can overwrite this default. +stack_alignment: u32 = 16, const InnerError = error{ OutOfMemory, @@ -654,13 +665,6 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!void { try self.mir_instructions.append(self.gpa, inst); } -/// Inserts a Mir instruction at the given `offset`. -/// Asserts offset is within bound. -fn addInstAt(self: *Self, offset: usize, inst: Mir.Inst) error{OutOfMemory}!void { - try self.mir_instructions.ensureUnusedCapacity(self.gpa, 1); - self.mir_instructions.insertAssumeCapacity(offset, inst); -} - fn addTag(self: *Self, tag: Mir.Inst.Tag) error{OutOfMemory}!void { try self.addInst(.{ .tag = tag, .data = .{ .tag = {} } }); } @@ -845,10 +849,43 @@ pub fn genFunc(self: *Self) InnerError!void { try self.addTag(.@"unreachable"); } } - // End of function body try self.addTag(.end); + // check if we have to initialize and allocate anything into the stack frame. + // If so, create enough stack space and insert the instructions at the front of the list. + if (self.stack_size > 0) { + var prologue = std.ArrayList(Mir.Inst).init(self.gpa); + defer prologue.deinit(); + + // load stack pointer + try prologue.append(.{ .tag = .global_get, .data = .{ .label = 0 } }); + // store stack pointer so we can restore it when we return from the function + try prologue.append(.{ .tag = .local_tee, .data = .{ .label = self.initial_stack_value.local } }); + // get the total stack size + const aligned_stack = std.mem.alignForwardGeneric(u32, self.stack_size, self.stack_alignment); + try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } }); + // substract it from the current stack pointer + try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); + // Get negative stack aligment + try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, self.stack_alignment) * -1 } }); + // Bit and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment + try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } }); + // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets + try prologue.append(.{ .tag = .local_tee, .data = .{ .label = self.bottom_stack_value.local } }); + // Store the current stack pointer value into the global stack pointer so other function calls will + // start from this value instead and not overwrite the current stack. + try prologue.append(.{ .tag = .global_set, .data = .{ .label = 0 } }); + + // reserve space and insert all prologue instructions at the front of the instruction list + // We insert them in reserve order as there is no insertSlice in multiArrayList. + try self.mir_instructions.ensureUnusedCapacity(self.gpa, prologue.items.len); + for (prologue.items) |_, index| { + const inst = prologue.items[prologue.items.len - 1 - index]; + self.mir_instructions.insertAssumeCapacity(0, inst); + } + } + var mir: Mir = .{ .instructions = self.mir_instructions.toOwnedSlice(), .extra = self.mir_extra.toOwnedSlice(self.gpa), @@ -1137,7 +1174,7 @@ pub const DeclGen = struct { }, .decl_ref => { const decl = val.castTag(.decl_ref).?.data; - return self.lowerDeclRefValue(ty, val, decl, writer, 0); + return self.lowerDeclRefValue(ty, val, decl, 0); }, .slice => { const slice = val.castTag(.slice).?.data; @@ -1161,9 +1198,9 @@ pub const DeclGen = struct { const elem_ptr = val.castTag(.elem_ptr).?.data; const elem_size = ty.childType().abiSize(self.target()); const offset = elem_ptr.index * elem_size; - return self.lowerParentPtr(elem_ptr.array_ptr, writer, offset); + return self.lowerParentPtr(elem_ptr.array_ptr, offset); }, - .int_u64 => return self.genTypedValue(Type.usize, val, writer), + .int_u64 => return self.genTypedValue(Type.usize, val), else => return self.fail("TODO: Implement zig decl gen for pointer type value: '{s}'", .{@tagName(val.tag())}), }, .ErrorUnion => { @@ -1309,22 +1346,16 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu return result; } -/// Retrieves the stack pointer's value from the global variable and stores -/// it in a local +/// Creates a local for the initial stack value /// Asserts `initial_stack_value` is `.none` fn initializeStack(self: *Self) !void { assert(self.initial_stack_value == .none); - // reserve space for immediate value - // get stack pointer global - try self.addLabel(.global_get, 0); - // Reserve a local to store the current stack pointer // We can later use this local to set the stack pointer back to the value // we have stored here. - self.initial_stack_value = try self.allocLocal(Type.initTag(.i32)); - - // save the value to the local - try self.addLabel(.local_set, self.initial_stack_value.local); + self.initial_stack_value = try self.allocLocal(Type.usize); + // Also reserve a local to store the bottom stack value + self.bottom_stack_value = try self.allocLocal(Type.usize); } /// Reads the stack pointer from `Context.initial_stack_value` and writes it @@ -1339,36 +1370,75 @@ fn restoreStackPointer(self: *Self) !void { try self.addLabel(.global_set, 0); } -/// Moves the stack pointer by given `offset` -/// It does this by retrieving the stack pointer, subtracting `offset` and storing -/// the result back into the stack pointer. -fn moveStack(self: *Self, offset: u32, local: u32) !void { - if (offset == 0) return; - try self.addLabel(.global_get, 0); - try self.addImm32(@bitCast(i32, offset)); - try self.addTag(.i32_sub); - try self.addLabel(.local_tee, local); - try self.addLabel(.global_set, 0); +/// Saves the current stack size's stack pointer position into a given local +/// It does this by retrieving the bottom stack pointer, adding `self.stack_size` and storing +/// the result back into the local. +fn saveStack(self: *Self) !WValue { + const local = try self.allocLocal(Type.usize); + try self.addLabel(.local_get, self.bottom_stack_value.local); + try self.addImm32(@intCast(i32, self.stack_size)); + try self.addTag(.i32_add); + try self.addLabel(.local_set, local.local); + return local; } /// From a given type, will create space on the virtual stack to store the value of such type. /// This returns a `WValue` with its active tag set to `local`, containing the index to the local /// that points to the position on the virtual stack. This function should be used instead of -/// moveStack unless a local was already created to store the point. +/// moveStack unless a local was already created to store the pointer. /// /// Asserts Type has codegenbits fn allocStack(self: *Self, ty: Type) !WValue { assert(ty.hasRuntimeBits()); + if (self.initial_stack_value == .none) { + try self.initializeStack(); + } - // calculate needed stack space const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch { - return self.fail("Given type '{}' too big to fit into stack frame", .{ty}); + return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ ty, ty.abiSize(self.target) }); }; + const abi_align = ty.abiAlignment(self.target); - // allocate a local using wasm's pointer size - const local = try self.allocLocal(Type.@"usize"); - try self.moveStack(abi_size, local.local); - return local; + if (abi_align > self.stack_alignment) { + self.stack_alignment = abi_align; + } + + const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_align); + defer self.stack_size = offset + abi_size; + + // store the stack pointer and return a local to it + return self.saveStack(); +} + +/// From a given AIR instruction generates a pointer to the stack where +/// the value of its type will live. +/// This is different from allocStack where this will use the pointer's alignment +/// if it is set, to ensure the stack alignment will be set correctly. +fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue { + const ptr_ty = self.air.typeOfIndex(inst); + const pointee_ty = ptr_ty.childType(); + + if (self.initial_stack_value == .none) { + try self.initializeStack(); + } + + if (!pointee_ty.hasRuntimeBits()) { + return self.allocStack(Type.usize); // create a value containing just the stack pointer. + } + + const abi_alignment = ptr_ty.ptrAlignment(self.target); + const abi_size = std.math.cast(u32, pointee_ty.abiSize(self.target)) catch { + return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ pointee_ty, pointee_ty.abiSize(self.target) }); + }; + if (abi_alignment > self.stack_alignment) { + self.stack_alignment = abi_alignment; + } + + const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_alignment); + defer self.stack_size = offset + abi_size; + + // store the stack pointer and return a local to it + return self.saveStack(); } /// From given zig bitsize, returns the wasm bitsize @@ -1667,12 +1737,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { if (isByRef(child_type, self.target)) { return self.return_value; } - - // Initialize the stack - if (self.initial_stack_value == .none) { - try self.initializeStack(); - } - return self.allocStack(child_type); + return self.allocStackPtr(inst); } fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -1764,20 +1829,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue { } fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!WValue { - const pointee_type = self.air.typeOfIndex(inst).childType(); - - // Initialize the stack - if (self.initial_stack_value == .none) { - try self.initializeStack(); - } - - if (!pointee_type.hasRuntimeBits()) { - // when the pointee is zero-sized, we still want to create a pointer. - // but instead use a default pointer type as storage. - const zero_ptr = try self.allocStack(Type.usize); - return zero_ptr; - } - return self.allocStack(pointee_type); + return self.allocStackPtr(inst); } fn airStore(self: *Self, inst: Air.Inst.Index) InnerError!WValue { From 588b88b98753f02061e562a9c15c2396bcd95dee Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Thu, 3 Feb 2022 22:25:46 +0100 Subject: [PATCH 4/4] Move passing behavior tests Singular tests (such as in the bug ones) are moved to top level with exclusions for non-passing backends. The big behavior tests such as array_llvm and slice are moved to the inner scope with the C backend disabled. They all pass for the wasm backend now --- src/arch/wasm/CodeGen.zig | 2 +- test/behavior.zig | 12 ++++++------ test/behavior/array_llvm.zig | 18 ++++++++++++++++++ test/behavior/bugs/1025.zig | 4 ++++ test/behavior/bugs/1741.zig | 3 +++ test/behavior/bugs/1914.zig | 7 +++++++ test/behavior/slice.zig | 13 +++++++++++++ 7 files changed, 52 insertions(+), 7 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d2db5fd92b..67aa9a6c88 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1198,7 +1198,7 @@ pub const DeclGen = struct { const elem_ptr = val.castTag(.elem_ptr).?.data; const elem_size = ty.childType().abiSize(self.target()); const offset = elem_ptr.index * elem_size; - return self.lowerParentPtr(elem_ptr.array_ptr, offset); + return self.lowerParentPtr(elem_ptr.array_ptr, @intCast(usize, offset)); }, .int_u64 => return self.genTypedValue(Type.usize, val), else => return self.fail("TODO: Implement zig decl gen for pointer type value: '{s}'", .{@tagName(val.tag())}), diff --git a/test/behavior.zig b/test/behavior.zig index a0db9b9f57..0f74ed7d59 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -10,6 +10,7 @@ test { _ = @import("behavior/bugs/655.zig"); _ = @import("behavior/bugs/656.zig"); _ = @import("behavior/bugs/679.zig"); + _ = @import("behavior/bugs/1025.zig"); _ = @import("behavior/bugs/1111.zig"); _ = @import("behavior/bugs/1277.zig"); _ = @import("behavior/bugs/1310.zig"); @@ -17,6 +18,8 @@ test { _ = @import("behavior/bugs/1486.zig"); _ = @import("behavior/bugs/1500.zig"); _ = @import("behavior/bugs/1735.zig"); + _ = @import("behavior/bugs/1741.zig"); + _ = @import("behavior/bugs/1914.zig"); _ = @import("behavior/bugs/2006.zig"); _ = @import("behavior/bugs/2346.zig"); _ = @import("behavior/bugs/3112.zig"); @@ -38,7 +41,8 @@ test { _ = @import("behavior/struct.zig"); if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) { - // Tests that pass for stage1, llvm backend, C backend, wasm backend. + // Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend. + _ = @import("behavior/array_llvm.zig"); _ = @import("behavior/basic.zig"); _ = @import("behavior/bitcast.zig"); _ = @import("behavior/bugs/624.zig"); @@ -69,6 +73,7 @@ test { _ = @import("behavior/pointers.zig"); _ = @import("behavior/ptrcast.zig"); _ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig"); + _ = @import("behavior/slice.zig"); _ = @import("behavior/src.zig"); _ = @import("behavior/this.zig"); _ = @import("behavior/try.zig"); @@ -88,11 +93,7 @@ test { if (builtin.zig_backend != .stage2_c) { // Tests that pass for stage1 and the llvm backend. - _ = @import("behavior/array_llvm.zig"); _ = @import("behavior/atomics.zig"); - _ = @import("behavior/bugs/1025.zig"); - _ = @import("behavior/bugs/1741.zig"); - _ = @import("behavior/bugs/1914.zig"); _ = @import("behavior/bugs/2578.zig"); _ = @import("behavior/bugs/3007.zig"); _ = @import("behavior/bugs/9584.zig"); @@ -108,7 +109,6 @@ test { _ = @import("behavior/popcount.zig"); _ = @import("behavior/saturating_arithmetic.zig"); _ = @import("behavior/sizeof_and_typeof.zig"); - _ = @import("behavior/slice.zig"); _ = @import("behavior/struct_llvm.zig"); _ = @import("behavior/switch.zig"); _ = @import("behavior/widening.zig"); diff --git a/test/behavior/array_llvm.zig b/test/behavior/array_llvm.zig index 5be5974fff..c3df5ba837 100644 --- a/test/behavior/array_llvm.zig +++ b/test/behavior/array_llvm.zig @@ -7,6 +7,7 @@ var s_array: [8]Sub = undefined; const Sub = struct { b: u8 }; const Str = struct { a: []Sub }; test "set global var array via slice embedded in struct" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO var s = Str{ .a = s_array[0..] }; s.a[0].b = 1; @@ -19,6 +20,7 @@ test "set global var array via slice embedded in struct" { } test "read/write through global variable array of struct fields initialized via array mult" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { try expect(storage[0].term == 1); @@ -36,6 +38,7 @@ test "read/write through global variable array of struct fields initialized via } test "implicit cast single-item pointer" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO try testImplicitCastSingleItemPtr(); comptime try testImplicitCastSingleItemPtr(); } @@ -52,6 +55,7 @@ fn testArrayByValAtComptime(b: [2]u8) u8 { } test "comptime evaluating function that takes array by value" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const arr = [_]u8{ 1, 2 }; const x = comptime testArrayByValAtComptime(arr); const y = comptime testArrayByValAtComptime(arr); @@ -60,12 +64,14 @@ test "comptime evaluating function that takes array by value" { } test "runtime initialize array elem and then implicit cast to slice" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO var two: i32 = 2; const x: []const i32 = &[_]i32{two}; try expect(x[0] == 2); } test "array literal as argument to function" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { fn entry(two: i32) !void { try foo(&[_]i32{ 1, 2, 3 }); @@ -90,6 +96,7 @@ test "array literal as argument to function" { } test "double nested array to const slice cast in array literal" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { fn entry(two: i32) !void { const cases = [_][]const []const i32{ @@ -147,6 +154,7 @@ test "double nested array to const slice cast in array literal" { } test "anonymous literal in array" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { const Foo = struct { a: usize = 2, @@ -168,6 +176,7 @@ test "anonymous literal in array" { } test "access the null element of a null terminated array" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var array: [4:0]u8 = .{ 'a', 'o', 'e', 'u' }; @@ -181,6 +190,7 @@ test "access the null element of a null terminated array" { } test "type deduction for array subscript expression" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var array = [_]u8{ 0x55, 0xAA }; @@ -196,6 +206,8 @@ test "type deduction for array subscript expression" { test "sentinel element count towards the ABI size calculation" { if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO + if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -218,6 +230,8 @@ test "sentinel element count towards the ABI size calculation" { test "zero-sized array with recursive type definition" { if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO + if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const U = struct { fn foo(comptime T: type, comptime n: usize) type { @@ -237,6 +251,7 @@ test "zero-sized array with recursive type definition" { } test "type coercion of anon struct literal to array" { + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { const U = union { a: u32, @@ -253,6 +268,7 @@ test "type coercion of anon struct literal to array" { try expect(arr1[2] == 54); if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO + if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO var x2: U = .{ .a = 42 }; const t2 = .{ x2, .{ .b = true }, .{ .c = "hello" } }; @@ -268,6 +284,8 @@ test "type coercion of anon struct literal to array" { test "type coercion of pointer to anon struct literal to pointer to array" { if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO + if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (@import("builtin").zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { const U = union { diff --git a/test/behavior/bugs/1025.zig b/test/behavior/bugs/1025.zig index 69ee77eea1..fa72e522de 100644 --- a/test/behavior/bugs/1025.zig +++ b/test/behavior/bugs/1025.zig @@ -1,3 +1,5 @@ +const builtin = @import("builtin"); + const A = struct { B: type, }; @@ -7,6 +9,8 @@ fn getA() A { } test "bug 1025" { + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const a = getA(); try @import("std").testing.expect(a.B == u8); } diff --git a/test/behavior/bugs/1741.zig b/test/behavior/bugs/1741.zig index 8873de9b49..280aafc52e 100644 --- a/test/behavior/bugs/1741.zig +++ b/test/behavior/bugs/1741.zig @@ -1,6 +1,9 @@ const std = @import("std"); +const builtin = @import("builtin"); test "fixed" { + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const x: f32 align(128) = 12.34; try std.testing.expect(@ptrToInt(&x) % 128 == 0); } diff --git a/test/behavior/bugs/1914.zig b/test/behavior/bugs/1914.zig index 2c9e836e6a..6462937351 100644 --- a/test/behavior/bugs/1914.zig +++ b/test/behavior/bugs/1914.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const A = struct { b_list_pointer: *const []B, @@ -11,6 +12,9 @@ const b_list: []B = &[_]B{}; const a = A{ .b_list_pointer = &b_list }; test "segfault bug" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const assert = std.debug.assert; const obj = B{ .a_pointer = &a }; assert(obj.a_pointer == &a); // this makes zig crash @@ -27,5 +31,8 @@ pub const B2 = struct { var b_value = B2{ .pointer_array = &[_]*A2{} }; test "basic stuff" { + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO std.debug.assert(&b_value == &b_value); } diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 01ae10ee4e..0b01139800 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -27,6 +27,7 @@ comptime { } test "slicing" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO var array: [20]i32 = undefined; array[5] = 1234; @@ -43,6 +44,7 @@ test "slicing" { } test "const slice" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO comptime { const a = "1234567890"; try expect(a.len == 10); @@ -53,6 +55,7 @@ test "const slice" { } test "comptime slice of undefined pointer of length 0" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const slice1 = @as([*]i32, undefined)[0..0]; try expect(slice1.len == 0); const slice2 = @as([*]i32, undefined)[100..100]; @@ -60,6 +63,7 @@ test "comptime slice of undefined pointer of length 0" { } test "implicitly cast array of size 0 to slice" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO var msg = [_]u8{}; try assertLenIsZero(&msg); } @@ -69,6 +73,7 @@ fn assertLenIsZero(msg: []const u8) !void { } test "access len index of sentinel-terminated slice" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { var slice: [:0]const u8 = "hello"; @@ -82,6 +87,7 @@ test "access len index of sentinel-terminated slice" { } test "comptime slice of slice preserves comptime var" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO comptime { var buff: [10]u8 = undefined; buff[0..][0..][0] = 1; @@ -90,6 +96,7 @@ test "comptime slice of slice preserves comptime var" { } test "slice of type" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO comptime { var types_array = [_]type{ i32, f64, type }; for (types_array) |T, i| { @@ -112,6 +119,7 @@ test "slice of type" { } test "generic malloc free" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const a = memAlloc(u8, 10) catch unreachable; memFree(u8, a); } @@ -124,6 +132,7 @@ fn memFree(comptime T: type, memory: []T) void { } test "slice of hardcoded address to pointer" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { const pointer = @intToPtr([*]u8, 0x04)[0..2]; @@ -138,6 +147,7 @@ test "slice of hardcoded address to pointer" { } test "comptime slice of pointer preserves comptime var" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO comptime { var buff: [10]u8 = undefined; var a = @ptrCast([*]u8, &buff); @@ -147,6 +157,7 @@ test "comptime slice of pointer preserves comptime var" { } test "comptime pointer cast array and then slice" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; const ptrA: [*]const u8 = @ptrCast([*]const u8, &array); @@ -160,6 +171,7 @@ test "comptime pointer cast array and then slice" { } test "slicing zero length array" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const s1 = ""[0..]; const s2 = ([_]u32{})[0..]; try expect(s1.len == 0); @@ -171,6 +183,7 @@ test "slicing zero length array" { const x = @intToPtr([*]i32, 0x1000)[0..0x500]; const y = x[0x100..]; test "compile time slice of pointer to hard coded address" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage1) return error.SkipZigTest; try expect(@ptrToInt(x) == 0x1000);