From ca2bd6d6ef55f02100a99b1012529c3804db75d7 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Thu, 27 Feb 2025 02:06:20 +0100 Subject: [PATCH 1/9] stage2-wasm: fix comparing and storing optionals --- src/arch/wasm/CodeGen.zig | 52 +++++++++++++++++++++++--------------- test/behavior/array.zig | 2 -- test/behavior/optional.zig | 3 --- 3 files changed, 32 insertions(+), 25 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 641347bee1..a0de387dfd 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2361,29 +2361,32 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr .error_union => { const pl_ty = ty.errorUnionPayload(zcu); if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return cg.store(lhs, rhs, Type.anyerror, 0); + return cg.store(lhs, rhs, Type.anyerror, offset); } const len = @as(u32, @intCast(abi_size)); + assert(offset == 0); return cg.memcpy(lhs, rhs, .{ .imm32 = len }); }, .optional => { if (ty.isPtrLikeOptional(zcu)) { - return cg.store(lhs, rhs, Type.usize, 0); + return cg.store(lhs, rhs, Type.usize, offset); } const pl_ty = ty.optionalChild(zcu); if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return cg.store(lhs, rhs, Type.u8, 0); + return cg.store(lhs, rhs, Type.u8, offset); } if (pl_ty.zigTypeTag(zcu) == .error_set) { - return cg.store(lhs, rhs, Type.anyerror, 0); + return cg.store(lhs, rhs, Type.anyerror, offset); } const len = @as(u32, @intCast(abi_size)); + assert(offset == 0); return cg.memcpy(lhs, rhs, .{ .imm32 = len }); }, .@"struct", .array, .@"union" => if (isByRef(ty, zcu, cg.target)) { const len = @as(u32, @intCast(abi_size)); + assert(offset == 0); return cg.memcpy(lhs, rhs, .{ .imm32 = len }); }, .vector => switch (determineSimdStoreStrategy(ty, zcu, cg.target)) { @@ -2407,6 +2410,7 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr }, .pointer => { if (ty.isSlice(zcu)) { + assert(offset == 0); // store pointer first // lower it to the stack so we do not have to store rhs into a local first try cg.emitWValue(lhs); @@ -2421,6 +2425,7 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr } }, .int, .@"enum", .float => if (abi_size > 8 and abi_size <= 16) { + assert(offset == 0); try cg.emitWValue(lhs); const lsb = try cg.load(rhs, Type.u64, 0); try cg.store(.stack, lsb, Type.u64, 0 + lhs.offset()); @@ -2430,6 +2435,7 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr try cg.store(.stack, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { + assert(offset == 0); try cg.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) }); }, else => if (abi_size > 8) { @@ -4438,9 +4444,6 @@ fn airOptionalPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void const operand = try cg.resolveInst(ty_op.operand); const opt_ty = cg.typeOf(ty_op.operand).childType(zcu); const payload_ty = opt_ty.optionalChild(zcu); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return cg.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); - } if (opt_ty.optionalReprIsPayload(zcu)) { return cg.finishAir(inst, operand, &.{ty_op.operand}); @@ -5407,31 +5410,40 @@ fn cmpOptionals(cg: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: st assert(operand_ty.hasRuntimeBitsIgnoreComptime(zcu)); assert(op == .eq or op == .neq); const payload_ty = operand_ty.optionalChild(zcu); + assert(!isByRef(payload_ty, zcu, cg.target)); - // We store the final result in here that will be validated - // if the optional is truly equal. - var result = try cg.ensureAllocLocal(Type.i32); + var result = try cg.allocLocal(Type.i32); defer result.free(cg); + var lhs_null = try cg.allocLocal(Type.i32); + defer lhs_null.free(cg); + try cg.startBlock(.block, .empty); + + try cg.addImm32(if (op == .eq) 0 else 1); + try cg.addLocal(.local_set, result.local.value); + _ = try cg.isNull(lhs, operand_ty, .i32_eq); + try cg.addLocal(.local_tee, lhs_null.local.value); _ = try cg.isNull(rhs, operand_ty, .i32_eq); - try cg.addTag(.i32_ne); // inverse so we can exit early - try cg.addLabel(.br_if, 0); + try cg.addTag(.i32_ne); + try cg.addLabel(.br_if, 0); // only one is null + + try cg.addImm32(if (op == .eq) 1 else 0); + try cg.addLocal(.local_set, result.local.value); + + try cg.addLocal(.local_get, lhs_null.local.value); + try cg.addLabel(.br_if, 0); // both are null _ = try cg.load(lhs, payload_ty, 0); _ = try cg.load(rhs, payload_ty, 0); - const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, zcu, cg.target) }); - try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode)); - try cg.addLabel(.br_if, 0); - - try cg.addImm32(1); + _ = try cg.cmp(.stack, .stack, payload_ty, op); try cg.addLocal(.local_set, result.local.value); + try cg.endBlock(); - try cg.emitWValue(result); - try cg.addImm32(0); - try cg.addTag(if (op == .eq) .i32_ne else .i32_eq); + try cg.addLocal(.local_get, result.local.value); + return .stack; } diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 14b2a9694b..a258c49d89 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -1049,7 +1049,6 @@ test "@splat array with sentinel" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const S = struct { @@ -1074,7 +1073,6 @@ test "@splat zero-length array" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index ca011ac079..0b1e6a9d8a 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -59,7 +59,6 @@ fn testNullPtrsEql() !void { } test "optional with zero-bit type" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -212,7 +211,6 @@ test "equality compare optionals and non-optionals" { } test "compare optionals with modified payloads" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var lhs: ?bool = false; @@ -643,7 +641,6 @@ test "result location initialization of optional with OPV payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { From a429d04ba9cb1205d88211b7b39cbd68113b7178 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Thu, 27 Feb 2025 14:37:33 +0100 Subject: [PATCH 2/9] stage2-wasm: enable already working tests --- test/behavior/call.zig | 2 -- test/behavior/cast.zig | 2 -- test/behavior/cast_int.zig | 1 - test/behavior/defer.zig | 1 - test/behavior/error.zig | 1 - test/behavior/floatop.zig | 3 --- test/behavior/int128.zig | 1 - test/behavior/packed_struct_explicit_backing_int.zig | 1 - 8 files changed, 12 deletions(-) diff --git a/test/behavior/call.zig b/test/behavior/call.zig index b3928b880a..cc5962114d 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -439,7 +439,6 @@ test "method call as parameter type" { } test "non-anytype generic parameters provide result type" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -469,7 +468,6 @@ test "non-anytype generic parameters provide result type" { } test "argument to generic function has correct result type" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 3093800377..122715bea1 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -159,7 +159,6 @@ test "@floatFromInt(f80)" { test "@intFromFloat" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -2639,7 +2638,6 @@ test "numeric coercions with undefined" { } test "15-bit int to float" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u15 = 42; diff --git a/test/behavior/cast_int.zig b/test/behavior/cast_int.zig index 9cbcddc191..34bd690d49 100644 --- a/test/behavior/cast_int.zig +++ b/test/behavior/cast_int.zig @@ -214,7 +214,6 @@ test "load non byte-sized value in union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // note: this bug is triggered by the == operator, expectEqual will hide it diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig index a495bbb4ad..3a22459092 100644 --- a/test/behavior/defer.zig +++ b/test/behavior/defer.zig @@ -134,7 +134,6 @@ test "errdefer with payload" { test "reference to errdefer payload" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 216e69a1b7..6969b80e78 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -1107,7 +1107,6 @@ test "result location initialization of error union with OPV payload" { } test "return error union with i65" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index 77fa4c593f..d80ef18b6d 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -123,7 +123,6 @@ fn testMul(comptime T: type) !void { test "cmp f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 @@ -1536,7 +1535,6 @@ test "neg f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -1553,7 +1551,6 @@ test "neg f32/f64" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testNeg(f32); diff --git a/test/behavior/int128.zig b/test/behavior/int128.zig index 544b38fca6..a88b6572b0 100644 --- a/test/behavior/int128.zig +++ b/test/behavior/int128.zig @@ -68,7 +68,6 @@ test "int128" { } test "truncate int128" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig index c1bc2426d8..215c3efd38 100644 --- a/test/behavior/packed_struct_explicit_backing_int.zig +++ b/test/behavior/packed_struct_explicit_backing_int.zig @@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual; const native_endian = builtin.cpu.arch.endian(); test "packed struct explicit backing integer" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO From 15bc2ab0a868fed31780d73451db4d1c69e7b489 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Thu, 27 Feb 2025 15:12:18 +0100 Subject: [PATCH 3/9] stage2-wasm: clz fix --- src/arch/wasm/CodeGen.zig | 14 ++++++++++++-- test/behavior/math.zig | 1 - 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a0de387dfd..dbd650ab01 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -6272,11 +6272,21 @@ fn airClz(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { switch (wasm_bits) { 32 => { - try cg.emitWValue(operand); + if (int_info.signedness == .signed) { + const mask = ~@as(u32, 0) >> @intCast(32 - int_info.bits); + _ = try cg.binOp(operand, .{ .imm32 = mask }, ty, .@"and"); + } else { + try cg.emitWValue(operand); + } try cg.addTag(.i32_clz); }, 64 => { - try cg.emitWValue(operand); + if (int_info.signedness == .signed) { + const mask = ~@as(u64, 0) >> @intCast(64 - int_info.bits); + _ = try cg.binOp(operand, .{ .imm64 = mask }, ty, .@"and"); + } else { + try cg.emitWValue(operand); + } try cg.addTag(.i64_clz); try cg.addTag(.i32_wrap_i64); }, diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 76c55c2549..9c9ee012a8 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -65,7 +65,6 @@ test "@clz" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; From 0e109add37e304bf97b2f5f5d91be0870d18506c Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Thu, 27 Feb 2025 18:25:38 +0100 Subject: [PATCH 4/9] stage2-wasm: clean memcpy + fix another bug in aggr_init for optionals arr --- src/arch/wasm/CodeGen.zig | 26 ++++++++++++++++---------- test/behavior/tuple.zig | 1 - 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index dbd650ab01..aaa891f423 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1588,12 +1588,18 @@ fn toWasmBits(bits: u16) ?u16 { /// Performs a copy of bytes for a given type. Copying all bytes /// from rhs to lhs. fn memcpy(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void { + const len_known_neq_0 = switch (len) { + .imm32 => |val| if (val != 0) true else return, + .imm64 => |val| if (val != 0) true else return, + else => false, + }; // When bulk_memory is enabled, we lower it to wasm's memcpy instruction. // If not, we lower it ourselves manually if (std.Target.wasm.featureSetHas(cg.target.cpu.features, .bulk_memory)) { const len0_ok = std.Target.wasm.featureSetHas(cg.target.cpu.features, .nontrapping_bulk_memory_len0); + const emit_check = !(len0_ok or len_known_neq_0); - if (!len0_ok) { + if (emit_check) { try cg.startBlock(.block, .empty); // Even if `len` is zero, the spec requires an implementation to trap if `src + len` or @@ -1616,7 +1622,7 @@ fn memcpy(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void { try cg.emitWValue(len); try cg.addExtended(.memory_copy); - if (!len0_ok) { + if (emit_check) { try cg.endBlock(); } @@ -5196,9 +5202,7 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try cg.allocStack(result_ty); const elem_ty = result_ty.childType(zcu); const elem_size = @as(u32, @intCast(elem_ty.abiSize(zcu))); - const sentinel = if (result_ty.sentinel(zcu)) |sent| blk: { - break :blk try cg.lowerConstant(sent, elem_ty); - } else null; + const sentinel = result_ty.sentinel(zcu); // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store @@ -5211,12 +5215,13 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_val = try cg.resolveInst(elem); try cg.store(offset, elem_val, elem_ty, 0); - if (elem_index < elements.len - 1 and sentinel == null) { + if (elem_index < elements.len - 1 or sentinel != null) { _ = try cg.buildPointerOffset(offset, elem_size, .modify); } } - if (sentinel) |sent| { - try cg.store(offset, sent, elem_ty, 0); + if (sentinel) |s| { + const val = try cg.resolveInst(Air.internedToRef(s.toIntern())); + try cg.store(offset, val, elem_ty, 0); } } else { var offset: u32 = 0; @@ -5225,8 +5230,9 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { try cg.store(result, elem_val, elem_ty, offset); offset += elem_size; } - if (sentinel) |sent| { - try cg.store(result, sent, elem_ty, offset); + if (sentinel) |s| { + const val = try cg.resolveInst(Air.internedToRef(s.toIntern())); + try cg.store(result, val, elem_ty, offset); } } break :result_value result; diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 0a0ed1d620..492730df61 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -507,7 +507,6 @@ test "tuple with runtime value coerced into a slice with a sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; From 07f14bd43b983de4467095b37613325b49022835 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Fri, 28 Feb 2025 00:47:18 +0100 Subject: [PATCH 5/9] stage2-wasm: fix error union handling --- src/arch/wasm/CodeGen.zig | 40 +++++++++++++++------- test/behavior/switch_on_captured_error.zig | 3 -- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index aaa891f423..d198cd7eb2 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3130,7 +3130,13 @@ fn lowerPtr(cg: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerErro .nav => |nav| return .{ .nav_ref = .{ .nav_index = nav, .offset = @intCast(offset) } }, .uav => |uav| return .{ .uav_ref = .{ .ip_index = uav.val, .offset = @intCast(offset), .orig_ptr_ty = uav.orig_ty } }, .int => return cg.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize), - .eu_payload => return cg.fail("Wasm TODO: lower error union payload pointer", .{}), + .eu_payload => |eu_ptr| try cg.lowerPtr( + eu_ptr, + offset + codegen.errUnionPayloadOffset( + Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu), + zcu, + ), + ), .opt_payload => |opt_ptr| return cg.lowerPtr(opt_ptr, offset), .field => |field| { const base_ptr = Value.fromInterned(field.base); @@ -4179,52 +4185,62 @@ fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerEr return cg.finishAir(inst, result, &.{un_op}); } +/// E!T -> T op_is_ptr == false +/// *(E!T) -> *T op_is_prt == true fn airUnwrapErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { const zcu = cg.pt.zcu; const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try cg.resolveInst(ty_op.operand); const op_ty = cg.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty; - const payload_ty = err_ty.errorUnionPayload(zcu); + const eu_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty; + const payload_ty = eu_ty.errorUnionPayload(zcu); const result: WValue = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { if (op_is_ptr) { break :result cg.reuseOperand(ty_op.operand, operand); + } else { + break :result .none; } - break :result .none; } - const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))); + const pl_offset: u32 = @intCast(errUnionPayloadOffset(payload_ty, zcu)); if (op_is_ptr or isByRef(payload_ty, zcu, cg.target)) { break :result try cg.buildPointerOffset(operand, pl_offset, .new); + } else { + assert(isByRef(eu_ty, zcu, cg.target)); + break :result try cg.load(operand, payload_ty, pl_offset); } - break :result try cg.load(operand, payload_ty, pl_offset); }; return cg.finishAir(inst, result, &.{ty_op.operand}); } +/// E!T -> E op_is_ptr == false +/// *(E!T) -> E op_is_prt == true +/// NOTE: op_is_ptr will not change return type fn airUnwrapErrUnionError(cg: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { const zcu = cg.pt.zcu; const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try cg.resolveInst(ty_op.operand); const op_ty = cg.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty; - const payload_ty = err_ty.errorUnionPayload(zcu); + const eu_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty; + const payload_ty = eu_ty.errorUnionPayload(zcu); const result: WValue = result: { - if (err_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { + if (eu_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { break :result .{ .imm32 = 0 }; } - if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + const err_offset: u32 = @intCast(errUnionErrorOffset(payload_ty, zcu)); + if (op_is_ptr or isByRef(eu_ty, zcu, cg.target)) { + break :result try cg.load(operand, Type.anyerror, err_offset); + } else { + assert(!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)); break :result cg.reuseOperand(ty_op.operand, operand); } - - break :result try cg.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, zcu))); }; return cg.finishAir(inst, result, &.{ty_op.operand}); } diff --git a/test/behavior/switch_on_captured_error.zig b/test/behavior/switch_on_captured_error.zig index a4bdc8755f..ead4411d21 100644 --- a/test/behavior/switch_on_captured_error.zig +++ b/test/behavior/switch_on_captured_error.zig @@ -7,7 +7,6 @@ const builtin = @import("builtin"); test "switch on error union catch capture" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { @@ -302,7 +301,6 @@ test "switch on error union catch capture" { test "switch on error union if else capture" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { @@ -794,7 +792,6 @@ test "switch on error union if else capture" { } fn testAddressOf() !void { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; { const a: anyerror!usize = 0; const ptr = &(if (a) |*v| v.* else |e| switch (e) { From 58b38238f51b6a268559144e8bbe7c0646170488 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Fri, 28 Feb 2025 01:14:40 +0100 Subject: [PATCH 6/9] stage2-wasm: enable undef test + ignore undef store/memset with safety off --- src/arch/wasm/CodeGen.zig | 18 ++++++++---------- test/behavior/undefined.zig | 1 - 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d198cd7eb2..29a264f973 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2302,11 +2302,6 @@ fn airAlloc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airStore(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { const pt = cg.pt; const zcu = pt.zcu; - if (safety) { - // TODO if the value is undef, write 0xaa bytes to dest - } else { - // TODO if the value is undef, don't lower this instruction - } const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try cg.resolveInst(bin_op.lhs); @@ -2315,6 +2310,10 @@ fn airStore(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { const ptr_info = ptr_ty.ptrInfo(zcu); const ty = ptr_ty.childType(zcu); + if (!safety and bin_op.rhs == .undef) { + return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); + } + if (ptr_info.packed_offset.host_size == 0) { try cg.store(lhs, rhs, ty, 0); } else { @@ -4756,11 +4755,6 @@ fn airPtrBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { fn airMemset(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { const zcu = cg.pt.zcu; - if (safety) { - // TODO if the value is undef, write 0xaa bytes to dest - } else { - // TODO if the value is undef, don't lower this instruction - } const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr = try cg.resolveInst(bin_op.lhs); @@ -4777,6 +4771,10 @@ fn airMemset(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { else ptr_ty.childType(zcu); + if (!safety and bin_op.rhs == .undef) { + return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); + } + const dst_ptr = try cg.sliceOrArrayPtr(ptr, ptr_ty); try cg.memset(elem_ty, dst_ptr, len, value); diff --git a/test/behavior/undefined.zig b/test/behavior/undefined.zig index d4f74fb78c..307deb84bf 100644 --- a/test/behavior/undefined.zig +++ b/test/behavior/undefined.zig @@ -103,7 +103,6 @@ test "reslice of undefined global var slice" { test "returned undef is 0xaa bytes when runtime safety is enabled" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; From 47cd0961cb938248a2bc65d2a9d49ba1d37b0764 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Sun, 2 Mar 2025 20:10:25 +0100 Subject: [PATCH 7/9] stage2-wasm: pass field_parent_ptr tests Handle packed containers, also fixes packed union lowering for non int type + union field pointer logic fix --- src/arch/wasm/CodeGen.zig | 61 +++++++++++++++++------------- test/behavior/field_parent_ptr.zig | 8 ---- 2 files changed, 34 insertions(+), 35 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 29a264f973..ac8e0a68e4 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3154,19 +3154,7 @@ fn lowerPtr(cg: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerErro .@"extern", .@"packed" => unreachable, }, .@"union" => switch (base_ty.containerLayout(zcu)) { - .auto => off: { - // Keep in sync with the `un` case of `generateSymbol`. - const layout = base_ty.unionGetLayout(zcu); - if (layout.payload_size == 0) break :off 0; - if (layout.tag_size == 0) break :off 0; - if (layout.tag_align.compare(.gte, layout.payload_align)) { - // Tag first. - break :off layout.tag_size; - } else { - // Payload first. - break :off 0; - } - }, + .auto => base_ty.structFieldOffset(@intCast(field.index), zcu), .@"extern", .@"packed" => unreachable, }, else => unreachable, @@ -3312,16 +3300,16 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue { }, else => unreachable, }, - .un => |un| { - // in this case we have a packed union which will not be passed by reference. - const constant_ty = if (un.tag == .none) - try ty.unionBackingType(pt) - else field_ty: { - const union_obj = zcu.typeToUnion(ty).?; - const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; - break :field_ty Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - }; - return cg.lowerConstant(Value.fromInterned(un.val), constant_ty); + .un => { + const int_type = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu))); + + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, pt, &buf, 0) catch unreachable; + const int_val = try pt.intValue( + int_type, + mem.readInt(u64, &buf, .little), + ); + return cg.lowerConstant(int_val, int_type); }, .memoized_call => unreachable, } @@ -3369,6 +3357,14 @@ fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue { const packed_struct = zcu.typeToPackedStruct(ty).?; return cg.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip))); }, + .@"union" => switch (ty.containerLayout(zcu)) { + .@"packed" => switch (ty.bitSize(zcu)) { + 0...32 => return .{ .imm32 = 0xaaaaaaaa }, + 33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa }, + else => unreachable, + }, + else => unreachable, + }, else => return cg.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}), } } @@ -4211,7 +4207,6 @@ fn airUnwrapErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) assert(isByRef(eu_ty, zcu, cg.target)); break :result try cg.load(operand, payload_ty, pl_offset); } - }; return cg.finishAir(inst, result, &.{ty_op.operand}); } @@ -5691,13 +5686,25 @@ fn airErrUnionPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void } fn airFieldParentPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const zcu = cg.pt.zcu; + const pt = cg.pt; + const zcu = pt.zcu; const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = cg.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try cg.resolveInst(extra.field_ptr); - const parent_ty = ty_pl.ty.toType().childType(zcu); - const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu); + const parent_ptr_ty = cg.typeOfIndex(inst); + const parent_ty = parent_ptr_ty.childType(zcu); + const field_ptr_ty = cg.typeOf(extra.field_ptr); + const field_index = extra.field_index; + const field_offset = switch (parent_ty.containerLayout(zcu)) { + .auto, .@"extern" => parent_ty.structFieldOffset(field_index, zcu), + .@"packed" => offset: { + const parent_ptr_offset = parent_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset; + const field_offset = if (zcu.typeToStruct(parent_ty)) |loaded_struct| pt.structPackedFieldBitOffset(loaded_struct, field_index) else 0; + const field_ptr_offset = field_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset; + break :offset @divExact(parent_ptr_offset + field_offset - field_ptr_offset, 8); + }, + }; const result = if (field_offset != 0) result: { const base = try cg.buildPointerOffset(field_ptr, 0, .new); diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig index 9d915815fd..c04e5f36ca 100644 --- a/test/behavior/field_parent_ptr.zig +++ b/test/behavior/field_parent_ptr.zig @@ -588,7 +588,6 @@ test "@fieldParentPtr extern struct last zero-bit field" { test "@fieldParentPtr unaligned packed struct" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -727,7 +726,6 @@ test "@fieldParentPtr unaligned packed struct" { test "@fieldParentPtr aligned packed struct" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -865,7 +863,6 @@ test "@fieldParentPtr aligned packed struct" { test "@fieldParentPtr nested packed struct" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -1028,7 +1025,6 @@ test "@fieldParentPtr nested packed struct" { test "@fieldParentPtr packed struct first zero-bit field" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -1134,7 +1130,6 @@ test "@fieldParentPtr packed struct first zero-bit field" { test "@fieldParentPtr packed struct middle zero-bit field" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -1240,7 +1235,6 @@ test "@fieldParentPtr packed struct middle zero-bit field" { test "@fieldParentPtr packed struct last zero-bit field" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -1753,7 +1747,6 @@ test "@fieldParentPtr extern union" { } test "@fieldParentPtr packed union" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.target.cpu.arch.endian() == .big) return error.SkipZigTest; // TODO @@ -1892,7 +1885,6 @@ test "@fieldParentPtr packed union" { test "@fieldParentPtr tagged union all zero-bit fields" { if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; From 911f4527f0ad166d84d77887602a784b1e801421 Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Mon, 3 Mar 2025 23:21:15 +0100 Subject: [PATCH 8/9] stage2-wasm: behavior tests pass with ReleaseFast/Small --- src/arch/wasm/CodeGen.zig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index ac8e0a68e4..7b056a95cc 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2362,6 +2362,9 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr const pt = cg.pt; const zcu = pt.zcu; const abi_size = ty.abiSize(zcu); + + if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return; + switch (ty.zigTypeTag(zcu)) { .error_union => { const pl_ty = ty.errorUnionPayload(zcu); From 33ad2c949e0355b680d93c2e7c3ba58a9b2c506c Mon Sep 17 00:00:00 2001 From: Pavel Verigo Date: Mon, 24 Mar 2025 14:54:31 +0100 Subject: [PATCH 9/9] stage2-wasm: packed store/load 128 bits --- src/arch/wasm/CodeGen.zig | 140 ++++++++++++++++++++------------ test/behavior/bitcast.zig | 2 - test/behavior/packed-struct.zig | 1 - test/behavior/struct.zig | 3 - 4 files changed, 86 insertions(+), 60 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 7b056a95cc..597e1ec75b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -759,6 +759,16 @@ fn resolveInst(cg: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { return result; } +fn resolveValue(cg: *CodeGen, val: Value) InnerError!WValue { + const zcu = cg.pt.zcu; + const ty = val.typeOf(zcu); + + return if (isByRef(ty, zcu, cg.target)) + .{ .uav_ref = .{ .ip_index = val.toIntern() } } + else + try cg.lowerConstant(val, ty); +} + /// NOTE: if result == .stack, it will be stored in .local fn finishAir(cg: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) InnerError!void { assert(operands.len <= Liveness.bpi - 1); @@ -2319,39 +2329,56 @@ fn airStore(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { } else { // at this point we have a non-natural alignment, we must // load the value, and then shift+or the rhs into the result location. - const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8); + const host_size = ptr_info.packed_offset.host_size * 8; + const host_ty = try pt.intType(.unsigned, host_size); + const bit_size: u16 = @intCast(ty.bitSize(zcu)); + const bit_offset = ptr_info.packed_offset.bit_offset; - if (isByRef(int_elem_ty, zcu, cg.target)) { - return cg.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); + const mask_val = try cg.resolveValue(val: { + const limbs = try cg.gpa.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(host_size) + 1, + ); + defer cg.gpa.free(limbs); + + var mask_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined }; + mask_bigint.setTwosCompIntLimit(.max, .unsigned, host_size); + + if (bit_size != host_size) { + mask_bigint.shiftRight(mask_bigint.toConst(), host_size - bit_size); + } + if (bit_offset != 0) { + mask_bigint.shiftLeft(mask_bigint.toConst(), bit_offset); + } + mask_bigint.bitNotWrap(mask_bigint.toConst(), .unsigned, host_size); + + break :val try pt.intValue_big(host_ty, mask_bigint.toConst()); + }); + + const shift_val: WValue = if (33 <= host_size and host_size <= 64) + .{ .imm64 = bit_offset } + else + .{ .imm32 = bit_offset }; + + if (host_size <= 64) { + try cg.emitWValue(lhs); } - - var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(zcu)))) - 1)); - mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset)); - mask ^= ~@as(u64, 0); - const shift_val: WValue = if (ptr_info.packed_offset.host_size <= 4) - .{ .imm32 = ptr_info.packed_offset.bit_offset } + const loaded = if (host_size <= 64) + try cg.load(lhs, host_ty, 0) else - .{ .imm64 = ptr_info.packed_offset.bit_offset }; - const mask_val: WValue = if (ptr_info.packed_offset.host_size <= 4) - .{ .imm32 = @as(u32, @truncate(mask)) } + lhs; + const anded = try cg.binOp(loaded, mask_val, host_ty, .@"and"); + const extended_value = try cg.intcast(rhs, ty, host_ty); + const shifted_value = if (bit_offset > 0) + try cg.binOp(extended_value, shift_val, host_ty, .shl) else - .{ .imm64 = mask }; - const wrap_mask_val: WValue = if (ptr_info.packed_offset.host_size <= 4) - .{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu))) } - else - .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu)) }; - - try cg.emitWValue(lhs); - const loaded = try cg.load(lhs, int_elem_ty, 0); - const anded = try cg.binOp(loaded, mask_val, int_elem_ty, .@"and"); - const extended_value = try cg.intcast(rhs, ty, int_elem_ty); - const masked_value = try cg.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and"); - const shifted_value = if (ptr_info.packed_offset.bit_offset > 0) shifted: { - break :shifted try cg.binOp(masked_value, shift_val, int_elem_ty, .shl); - } else masked_value; - const result = try cg.binOp(anded, shifted_value, int_elem_ty, .@"or"); - // lhs is still on the stack - try cg.store(.stack, result, int_elem_ty, lhs.offset()); + extended_value; + const result = try cg.binOp(anded, shifted_value, host_ty, .@"or"); + if (host_size <= 64) { + try cg.store(.stack, result, host_ty, lhs.offset()); + } else { + try cg.store(lhs, result, host_ty, lhs.offset()); + } } return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); @@ -2494,22 +2521,30 @@ fn airLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { } if (ptr_info.packed_offset.host_size == 0) { - break :result try cg.load(operand, ty, 0); + const loaded = try cg.load(operand, ty, 0); + const ty_size = ty.abiSize(zcu); + if (ty.isAbiInt(zcu) and ty_size * 8 > ty.bitSize(zcu)) { + const int_elem_ty = try pt.intType(.unsigned, @intCast(ty_size * 8)); + break :result try cg.trunc(loaded, ty, int_elem_ty); + } else { + break :result loaded; + } + } else { + const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8); + const shift_val: WValue = if (ptr_info.packed_offset.host_size <= 4) + .{ .imm32 = ptr_info.packed_offset.bit_offset } + else if (ptr_info.packed_offset.host_size <= 8) + .{ .imm64 = ptr_info.packed_offset.bit_offset } + else + .{ .imm32 = ptr_info.packed_offset.bit_offset }; + + const stack_loaded = if (ptr_info.packed_offset.host_size <= 8) + try cg.load(operand, int_elem_ty, 0) + else + operand; + const shifted = try cg.binOp(stack_loaded, shift_val, int_elem_ty, .shr); + break :result try cg.trunc(shifted, ty, int_elem_ty); } - - // at this point we have a non-natural alignment, we must - // shift the value to obtain the correct bit. - const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8); - const shift_val: WValue = if (ptr_info.packed_offset.host_size <= 4) - .{ .imm32 = ptr_info.packed_offset.bit_offset } - else if (ptr_info.packed_offset.host_size <= 8) - .{ .imm64 = ptr_info.packed_offset.bit_offset } - else - return cg.fail("TODO: airLoad where ptr to bitfield exceeds 64 bits", .{}); - - const stack_loaded = try cg.load(operand, int_elem_ty, 0); - const shifted = try cg.binOp(stack_loaded, shift_val, int_elem_ty, .shr); - break :result try cg.trunc(shifted, ty, int_elem_ty); }; return cg.finishAir(inst, result, &.{ty_op.operand}); } @@ -3857,15 +3892,12 @@ fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { const packed_struct = zcu.typeToPackedStruct(struct_ty).?; const offset = pt.structPackedFieldBitOffset(packed_struct, field_index); const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)); - const wasm_bits = toWasmBits(backing_ty.intInfo(zcu).bits) orelse { - return cg.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); - }; - const const_wvalue: WValue = if (wasm_bits == 32) - .{ .imm32 = offset } - else if (wasm_bits == 64) + const host_bits = backing_ty.intInfo(zcu).bits; + + const const_wvalue: WValue = if (33 <= host_bits and host_bits <= 64) .{ .imm64 = offset } else - return cg.fail("TODO: airStructFieldVal for packed structs larger than 64 bits", .{}); + .{ .imm32 = offset }; // for first field we don't require any shifting const shifted_value = if (offset == 0) @@ -4043,7 +4075,7 @@ fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Inner if (use_br_table) { const width = width_maybe.?; - const br_value_original = try cg.binOp(target, try cg.resolveInst(Air.internedToRef(min.?.toIntern())), target_ty, .sub); + const br_value_original = try cg.binOp(target, try cg.resolveValue(min.?), target_ty, .sub); _ = try cg.intcast(br_value_original, target_ty, Type.u32); const jump_table: Mir.JumpTable = .{ .length = width + 1 }; @@ -5232,7 +5264,7 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } if (sentinel) |s| { - const val = try cg.resolveInst(Air.internedToRef(s.toIntern())); + const val = try cg.resolveValue(s); try cg.store(offset, val, elem_ty, 0); } } else { @@ -5243,7 +5275,7 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { offset += elem_size; } if (sentinel) |s| { - const val = try cg.resolveInst(Air.internedToRef(s.toIntern())); + const val = try cg.resolveValue(s); try cg.store(result, val, elem_ty, offset); } } diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 36e6b264af..1ffe301dd3 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -480,7 +480,6 @@ test "@bitCast of packed struct of bools all true" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO const P = packed struct { @@ -501,7 +500,6 @@ test "@bitCast of packed struct of bools all false" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO const P = packed struct { diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index c3707c2fa3..c735b338d0 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -1321,7 +1321,6 @@ test "packed struct with signed field" { test "assign packed struct initialized with RLS to packed struct literal field" { if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isWasm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const Inner = packed struct { x: u17 }; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 27f56c7cba..35d12cc162 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -421,9 +421,7 @@ const Foo96Bits = packed struct { test "packed struct 24bits" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.cpu.arch.isWasm()) return error.SkipZigTest; // TODO if (builtin.cpu.arch.isArm()) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -763,7 +761,6 @@ const S0 = struct { var g_foo: S0 = S0.init(); test "packed struct with fp fields" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO