diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index bfadbf0a79..b1ebf9126d 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -11,10 +11,8 @@ const log = std.log.scoped(.codegen); const codegen = @import("../../codegen.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../../InternPool.zig"); -const Decl = Module.Decl; +const Decl = Zcu.Decl; const Type = @import("../../type.zig").Type; const Value = @import("../../Value.zig"); const Compilation = @import("../../Compilation.zig"); @@ -674,7 +672,7 @@ local_index: u32 = 0, /// Used to track which argument is being referenced in `airArg`. arg_index: u32 = 0, /// If codegen fails, an error messages will be allocated and saved in `err_msg` -err_msg: *Module.ErrorMsg, +err_msg: *Zcu.ErrorMsg, /// List of all locals' types generated throughout this declaration /// used to emit locals count at start of 'code' section. locals: std.ArrayListUnmanaged(u8), @@ -768,7 +766,7 @@ pub fn deinit(func: *CodeGen) void { fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { const mod = func.bin_file.base.comp.module.?; const src_loc = func.decl.navSrcLoc(mod).upgrade(mod); - func.err_msg = try Module.ErrorMsg.create(func.gpa, src_loc, fmt, args); + func.err_msg = try Zcu.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -992,7 +990,7 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 } /// Using a given `Type`, returns the corresponding type -fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { +fn typeToValtype(ty: Type, mod: *Zcu) wasm.Valtype { const target = mod.getTarget(); const ip = &mod.intern_pool; return switch (ty.zigTypeTag(mod)) { @@ -1032,14 +1030,14 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { } /// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, mod: *Module) u8 { +fn genValtype(ty: Type, mod: *Zcu) u8 { return wasm.valtype(typeToValtype(ty, mod)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, mod: *Module) u8 { +fn genBlockType(ty: Type, mod: *Zcu) u8 { return switch (ty.ip_index) { .void_type, .noreturn_type => wasm.block_empty, else => genValtype(ty, mod), @@ -1149,7 +1147,7 @@ fn genFunctype( cc: std.builtin.CallingConvention, params: []const InternPool.Index, return_type: Type, - mod: *Module, + mod: *Zcu, ) !wasm.Type { var temp_params = std.ArrayList(wasm.Valtype).init(gpa); defer temp_params.deinit(); @@ -1204,7 +1202,7 @@ fn genFunctype( pub fn generate( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Zcu.SrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -1405,7 +1403,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Module) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Zcu) bool { switch (cc) { .Unspecified, .Inline => return isByRef(return_type, mod), .C => { @@ -1713,7 +1711,7 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, mod: *Module) bool { +fn isByRef(ty: Type, mod: *Zcu) bool { const ip = &mod.intern_pool; const target = mod.getTarget(); switch (ty.zigTypeTag(mod)) { @@ -1785,7 +1783,7 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, mod: *Module) SimdStoreStrategy { +fn determineSimdStoreStrategy(ty: Type, mod: *Zcu) SimdStoreStrategy { std.debug.assert(ty.zigTypeTag(mod) == .Vector); if (ty.bitSize(mod) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; @@ -2325,14 +2323,19 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void WValue{ .imm32 = @as(u32, @truncate(mask)) } else WValue{ .imm64 = mask }; + const wrap_mask_val = if (ptr_info.packed_offset.host_size <= 4) + WValue{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(mod))) } + else + WValue{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(mod)) }; try func.emitWValue(lhs); const loaded = try func.load(lhs, int_elem_ty, 0); const anded = try func.binOp(loaded, mask_val, int_elem_ty, .@"and"); const extended_value = try func.intcast(rhs, ty, int_elem_ty); + const masked_value = try func.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and"); const shifted_value = if (ptr_info.packed_offset.bit_offset > 0) shifted: { - break :shifted try func.binOp(extended_value, shift_val, int_elem_ty, .shl); - } else extended_value; + break :shifted try func.binOp(masked_value, shift_val, int_elem_ty, .shl); + } else masked_value; const result = try func.binOp(anded, shifted_value, int_elem_ty, .@"or"); // lhs is still on the stack try func.store(.stack, result, int_elem_ty, lhs.offset()); @@ -2515,7 +2518,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu .valtype1 = typeToValtype(ty, mod), .width = abi_size * 8, .op = .load, - .signedness = .unsigned, + .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addMemArg( @@ -2800,10 +2803,7 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { switch (wasm_bits) { 32 => { try func.emitWValue(operand); - if (wasm_bits != int_bits) { - try func.addImm32(wasm_bits - int_bits); - try func.addTag(.i32_shl); - } + try func.addImm32(31); try func.addTag(.i32_shr_s); @@ -2815,15 +2815,10 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_xor); try func.emitWValue(tmp); try func.addTag(.i32_sub); - - _ = try func.wrapOperand(.stack, ty); }, 64 => { try func.emitWValue(operand); - if (wasm_bits != int_bits) { - try func.addImm64(wasm_bits - int_bits); - try func.addTag(.i64_shl); - } + try func.addImm64(63); try func.addTag(.i64_shr_s); @@ -2835,8 +2830,6 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i64_xor); try func.emitWValue(tmp); try func.addTag(.i64_sub); - - _ = try func.wrapOperand(.stack, ty); }, 128 => { const mask = try func.allocStack(Type.u128); @@ -2844,10 +2837,6 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(mask); _ = try func.load(operand, Type.u64, 8); - if (int_bits != 128) { - try func.addImm64(128 - int_bits); - try func.addTag(.i64_shl); - } try func.addImm64(63); try func.addTag(.i64_shr_s); @@ -2860,9 +2849,8 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const a = try func.binOpBigInt(operand, mask, Type.u128, .xor); const b = try func.binOpBigInt(a, mask, Type.u128, .sub); - const result = try func.wrapOperand(b, ty); - func.finishAir(inst, result, &.{ty_op.operand}); + func.finishAir(inst, b, &.{ty_op.operand}); return; }, else => unreachable, @@ -3058,14 +3046,28 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { switch (wasm_bits) { 32 => { try func.emitWValue(operand); - try func.addImm32((@as(u32, 1) << @intCast(int_bits)) - 1); - try func.addTag(.i32_and); + if (ty.isSignedInt(mod)) { + try func.addImm32(32 - int_bits); + try func.addTag(.i32_shl); + try func.addImm32(32 - int_bits); + try func.addTag(.i32_shr_s); + } else { + try func.addImm32(~@as(u32, 0) >> @intCast(32 - int_bits)); + try func.addTag(.i32_and); + } return .stack; }, 64 => { try func.emitWValue(operand); - try func.addImm64((@as(u64, 1) << @intCast(int_bits)) - 1); - try func.addTag(.i64_and); + if (ty.isSignedInt(mod)) { + try func.addImm64(64 - int_bits); + try func.addTag(.i64_shl); + try func.addImm64(64 - int_bits); + try func.addTag(.i64_shr_s); + } else { + try func.addImm64(~@as(u64, 0) >> @intCast(64 - int_bits)); + try func.addTag(.i64_and); + } return .stack; }, 128 => { @@ -3078,8 +3080,15 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { try func.emitWValue(result); _ = try func.load(operand, Type.u64, 8); - try func.addImm64((@as(u64, 1) << @intCast(int_bits - 64)) - 1); - try func.addTag(.i64_and); + if (ty.isSignedInt(mod)) { + try func.addImm64(128 - int_bits); + try func.addTag(.i64_shl); + try func.addImm64(128 - int_bits); + try func.addTag(.i64_shr_s); + } else { + try func.addImm64(~@as(u64, 0) >> @intCast(128 - int_bits)); + try func.addTag(.i64_and); + } try func.store(.stack, .stack, Type.u64, result.offset() + 8); return result; @@ -3201,22 +3210,6 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u } else return WValue{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } }; } -/// Converts a signed integer to its 2's complement form and returns -/// an unsigned integer instead. -/// Asserts bitsize <= 64 -fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(@TypeOf(value)).Int.bits) { - const T = @TypeOf(value); - comptime assert(@typeInfo(T) == .Int); - comptime assert(@typeInfo(T).Int.signedness == .signed); - assert(bits <= 64); - const WantedT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); - if (value >= 0) return @as(WantedT, @bitCast(value)); - const max_value = @as(u64, @intCast((@as(u65, 1) << bits) - 1)); - const flipped = @as(T, @intCast((~-@as(i65, value)) + 1)); - const result = @as(WantedT, @bitCast(flipped)) & max_value; - return @as(WantedT, @intCast(result)); -} - /// Asserts that `isByRef` returns `false` for `ty`. fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { const mod = func.bin_file.base.comp.module.?; @@ -3268,18 +3261,12 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @as(u32, @intCast(toTwosComplement( - val.toSignedInt(mod), - @as(u6, @intCast(int_info.bits)), - ))) }, - 33...64 => return WValue{ .imm64 = toTwosComplement( - val.toSignedInt(mod), - @as(u7, @intCast(int_info.bits)), - ) }, + 0...32 => return WValue{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(mod)))) }, + 33...64 => return WValue{ .imm64 = @bitCast(val.toSignedInt(mod)) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @as(u32, @intCast(val.toUnsignedInt(mod))) }, + 0...32 => return WValue{ .imm32 = @intCast(val.toUnsignedInt(mod)) }, 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, else => unreachable, }, @@ -3447,7 +3434,7 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { assert(ptr.base_addr == .int); return @intCast(ptr.byte_offset); }, - .err => |err| @as(i32, @bitCast(@as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))), + .err => |err| @as(i32, @bitCast(@as(Zcu.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))), else => unreachable, }, } @@ -3458,11 +3445,11 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { }; } -fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 { +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Zcu) i32 { return intStorageAsI32(ip.indexToKey(int).int.storage, mod); } -fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { +fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Zcu) i32 { return switch (storage) { .i64 => |x| @as(i32, @intCast(x)), .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))), @@ -3618,29 +3605,11 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO // incase of an actual integer, we emit the correct signedness break :blk ty.intInfo(mod).signedness; }; - const extend_sign = blk: { - // do we need to extend the sign bit? - if (signedness != .signed) break :blk false; - if (op == .eq or op == .neq) break :blk false; - const int_bits = ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(int_bits) orelse unreachable; - break :blk (wasm_bits != int_bits); - }; - - const lhs_wasm = if (extend_sign) - try func.signExtendInt(lhs, ty) - else - lhs; - - const rhs_wasm = if (extend_sign) - try func.signExtendInt(rhs, ty) - else - rhs; // ensure that when we compare pointers, we emit // the true pointer of a stack value, rather than the stack pointer. - try func.lowerToStack(lhs_wasm); - try func.lowerToStack(rhs_wasm); + try func.lowerToStack(lhs); + try func.lowerToStack(rhs); const opcode: wasm.Opcode = buildOpcode(.{ .valtype1 = typeToValtype(ty, mod), @@ -3760,32 +3729,49 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addLabel(.local_set, not_tmp.local.value); break :result not_tmp; } else { - const operand_bits = operand_ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(operand_bits) orelse { - return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits}); + const int_info = operand_ty.intInfo(mod); + const wasm_bits = toWasmBits(int_info.bits) orelse { + return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(mod)}); }; switch (wasm_bits) { 32 => { - const bin_op = try func.binOp(operand, .{ .imm32 = ~@as(u32, 0) }, operand_ty, .xor); - break :result try (try func.wrapOperand(bin_op, operand_ty)).toLocal(func, operand_ty); + try func.emitWValue(operand); + try func.addImm32(switch (int_info.signedness) { + .unsigned => ~@as(u32, 0) >> @intCast(32 - int_info.bits), + .signed => ~@as(u32, 0), + }); + try func.addTag(.i32_xor); + break :result try @as(WValue, .stack).toLocal(func, operand_ty); }, 64 => { - const bin_op = try func.binOp(operand, .{ .imm64 = ~@as(u64, 0) }, operand_ty, .xor); - break :result try (try func.wrapOperand(bin_op, operand_ty)).toLocal(func, operand_ty); + try func.emitWValue(operand); + try func.addImm64(switch (int_info.signedness) { + .unsigned => ~@as(u64, 0) >> @intCast(64 - int_info.bits), + .signed => ~@as(u64, 0), + }); + try func.addTag(.i64_xor); + break :result try @as(WValue, .stack).toLocal(func, operand_ty); }, 128 => { - const result_ptr = try func.allocStack(operand_ty); - try func.emitWValue(result_ptr); - const msb = try func.load(operand, Type.u64, 0); - const msb_xor = try func.binOp(msb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor); - try func.store(.{ .stack = {} }, msb_xor, Type.u64, 0 + result_ptr.offset()); + const ptr = try func.allocStack(operand_ty); - try func.emitWValue(result_ptr); - const lsb = try func.load(operand, Type.u64, 8); - const lsb_xor = try func.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor); - try func.store(result_ptr, lsb_xor, Type.u64, 8 + result_ptr.offset()); - break :result result_ptr; + try func.emitWValue(ptr); + _ = try func.load(operand, Type.u64, 0); + try func.addImm64(~@as(u64, 0)); + try func.addTag(.i64_xor); + try func.store(.stack, .stack, Type.u64, ptr.offset()); + + try func.emitWValue(ptr); + _ = try func.load(operand, Type.u64, 8); + try func.addImm64(switch (int_info.signedness) { + .unsigned => ~@as(u64, 0) >> @intCast(128 - int_info.bits), + .signed => ~@as(u64, 0), + }); + try func.addTag(.i64_xor); + try func.store(.stack, .stack, Type.u64, ptr.offset() + 8); + + break :result ptr; }, else => unreachable, } @@ -3812,25 +3798,44 @@ fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.comp.module.?; const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand = try func.resolveInst(ty_op.operand); + const wanted_ty = func.typeOfIndex(inst); + const given_ty = func.typeOf(ty_op.operand); + + const bit_size = given_ty.bitSize(mod); + const needs_wrapping = (given_ty.isSignedInt(mod) != wanted_ty.isSignedInt(mod)) and + bit_size != 32 and bit_size != 64 and bit_size != 128; + const result = result: { - const operand = try func.resolveInst(ty_op.operand); - const wanted_ty = func.typeOfIndex(inst); - const given_ty = func.typeOf(ty_op.operand); if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) { const bitcast_result = try func.bitcast(wanted_ty, given_ty, operand); break :result try bitcast_result.toLocal(func, wanted_ty); } - const mod = func.bin_file.base.comp.module.?; + if (isByRef(given_ty, mod) and !isByRef(wanted_ty, mod)) { const loaded_memory = try func.load(operand, wanted_ty, 0); - break :result try loaded_memory.toLocal(func, wanted_ty); + if (needs_wrapping) { + break :result try (try func.wrapOperand(loaded_memory, wanted_ty)).toLocal(func, wanted_ty); + } else { + break :result try loaded_memory.toLocal(func, wanted_ty); + } } if (!isByRef(given_ty, mod) and isByRef(wanted_ty, mod)) { const stack_memory = try func.allocStack(wanted_ty); try func.store(stack_memory, operand, given_ty, 0); - break :result stack_memory; + if (needs_wrapping) { + break :result try (try func.wrapOperand(stack_memory, wanted_ty)).toLocal(func, wanted_ty); + } else { + break :result stack_memory; + } } + + if (needs_wrapping) { + break :result try (try func.wrapOperand(operand, wanted_ty)).toLocal(func, wanted_ty); + } + break :result func.reuseOperand(ty_op.operand, operand); }; func.finishAir(inst, result, &.{ty_op.operand}); @@ -4355,7 +4360,7 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const op_bits = toWasmBits(@as(u16, @intCast(operand_ty.bitSize(mod)))).?; const wanted_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?; - const result = if (op_bits == wanted_bits and !ty.isSignedInt(mod)) + const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else try (try func.intcast(operand, operand_ty, ty)).toLocal(func, ty); @@ -4377,37 +4382,17 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro const op_bits = toWasmBits(given_bitsize).?; const wanted_bits = toWasmBits(wanted_bitsize).?; if (op_bits == wanted_bits) { - if (given.isSignedInt(mod)) { - if (given_bitsize < wanted_bitsize) { - // signed integers are stored as two's complement, - // when we upcast from a smaller integer to larger - // integers, we must get its absolute value similar to - // i64_extend_i32_s instruction. - return func.signExtendInt(operand, given); - } - return func.wrapOperand(operand, wanted); - } return operand; } - if (op_bits > 32 and op_bits <= 64 and wanted_bits == 32) { + if (op_bits == 64 and wanted_bits == 32) { try func.emitWValue(operand); try func.addTag(.i32_wrap_i64); - if (given.isSignedInt(mod) and wanted_bitsize < 32) - return func.wrapOperand(.{ .stack = {} }, wanted) - else - return WValue{ .stack = {} }; - } else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) { - const operand32 = if (given_bitsize < 32 and wanted.isSignedInt(mod)) - try func.signExtendInt(operand, given) - else - operand; - try func.emitWValue(operand32); + return .stack; + } else if (op_bits == 32 and wanted_bits == 64) { + try func.emitWValue(operand); try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u); - if (given.isSignedInt(mod) and wanted_bitsize < 64) - return func.wrapOperand(.{ .stack = {} }, wanted) - else - return WValue{ .stack = {} }; + return .stack; } else if (wanted_bits == 128) { // for 128bit integers we store the integer in the virtual stack, rather than a local const stack_ptr = try func.allocStack(wanted); @@ -4416,17 +4401,18 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it // meaning less store operations are required. const lhs = if (op_bits == 32) blk: { - break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64); + const sign_ty = if (wanted.isSignedInt(mod)) Type.i64 else Type.u64; + break :blk try (try func.intcast(operand, given, sign_ty)).toLocal(func, sign_ty); } else operand; // store msb first - try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset()); + try func.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset()); // For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value if (wanted.isSignedInt(mod)) { try func.emitWValue(stack_ptr); const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr); - try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset()); + try func.store(.stack, shr, Type.u64, 8 + stack_ptr.offset()); } else { // Ensure memory of lsb is zero'd try func.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8); @@ -5823,25 +5809,34 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; switch (wasm_bits) { + 32 => { + try func.emitWValue(operand); + if (op_ty.isSignedInt(mod) and bits != wasm_bits) { + _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits)); + } + try func.addTag(.i32_popcnt); + }, + 64 => { + try func.emitWValue(operand); + if (op_ty.isSignedInt(mod) and bits != wasm_bits) { + _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits)); + } + try func.addTag(.i64_popcnt); + try func.addTag(.i32_wrap_i64); + try func.emitWValue(operand); + }, 128 => { _ = try func.load(operand, Type.u64, 0); try func.addTag(.i64_popcnt); _ = try func.load(operand, Type.u64, 8); + if (op_ty.isSignedInt(mod) and bits != wasm_bits) { + _ = try func.wrapOperand(.stack, try mod.intType(.unsigned, bits - 64)); + } try func.addTag(.i64_popcnt); try func.addTag(.i64_add); try func.addTag(.i32_wrap_i64); }, - else => { - try func.emitWValue(operand); - switch (wasm_bits) { - 32 => try func.addTag(.i32_popcnt), - 64 => { - try func.addTag(.i64_popcnt); - try func.addTag(.i32_wrap_i64); - }, - else => unreachable, - } - }, + else => unreachable, } const result = try func.allocLocal(result_ty); @@ -5877,7 +5872,7 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const reversed = if (bits == 32) intrin_ret else - try func.binOp(intrin_ret, .{ .imm32 = 32 - bits }, Type.u32, .shr); + try func.binOp(intrin_ret, .{ .imm32 = 32 - bits }, ty, .shr); const result = try reversed.toLocal(func, ty); func.finishAir(inst, result, &.{ty_op.operand}); }, @@ -5891,7 +5886,7 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const reversed = if (bits == 64) intrin_ret else - try func.binOp(intrin_ret, .{ .imm64 = 64 - bits }, Type.u64, .shr); + try func.binOp(intrin_ret, .{ .imm64 = 64 - bits }, ty, .shr); const result = try reversed.toLocal(func, ty); func.finishAir(inst, result, &.{ty_op.operand}); }, @@ -5928,7 +5923,11 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { defer tmp.free(func); try func.addLabel(.local_tee, tmp.local.value); try func.emitWValue(.{ .imm64 = 128 - bits }); - try func.addTag(.i64_shr_u); + if (ty.isSignedInt(mod)) { + try func.addTag(.i64_shr_s); + } else { + try func.addTag(.i64_shr_u); + } try func.store(.stack, .stack, Type.u64, result.offset() + 8); try func.addLabel(.local_get, tmp.local.value); try func.emitWValue(.{ .imm64 = bits - 64 }); @@ -5996,8 +5995,8 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_op = try func.resolveInst(extra.lhs); - const rhs_op = try func.resolveInst(extra.rhs); + const lhs = try func.resolveInst(extra.lhs); + const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.typeOf(extra.lhs); const mod = func.bin_file.base.comp.module.?; @@ -6012,7 +6011,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro }; if (wasm_bits == 128) { - const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.typeOfIndex(inst), op); + const result = try func.addSubWithOverflowBigInt(lhs, rhs, lhs_ty, func.typeOfIndex(inst), op); return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } @@ -6022,24 +6021,6 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro else => unreachable, }; - // for signed integers, we first apply signed shifts by the difference in bits - // to get the signed value, as we store it internally as 2's complement. - var lhs = if (wasm_bits != int_info.bits and is_signed) blk: { - break :blk try (try func.signExtendInt(lhs_op, lhs_ty)).toLocal(func, lhs_ty); - } else lhs_op; - var rhs = if (wasm_bits != int_info.bits and is_signed) blk: { - break :blk try (try func.signExtendInt(rhs_op, lhs_ty)).toLocal(func, lhs_ty); - } else rhs_op; - - // in this case, we performed a signExtendInt which created a temporary local - // so let's free this so it can be re-used instead. - // In the other case we do not want to free it, because that would free the - // resolved instructions which may be referenced by other instructions. - defer if (wasm_bits != int_info.bits and is_signed) { - lhs.free(func); - rhs.free(func); - }; - const bin_op = try (try func.binOp(lhs, rhs, lhs_ty, op)).toLocal(func, lhs_ty); var result = if (wasm_bits != int_info.bits) blk: { break :blk try (try func.wrapOperand(bin_op, lhs_ty)).toLocal(func, lhs_ty); @@ -6053,8 +6034,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lt = try func.cmp(bin_op, lhs, lhs_ty, .lt); break :blk try func.binOp(cmp_zero, lt, Type.u32, .xor); } - const abs = try func.signExtendInt(bin_op, lhs_ty); - break :blk try func.cmp(abs, bin_op, lhs_ty, .neq); + break :blk try func.cmp(bin_op, bin_op, lhs_ty, .neq); } else if (wasm_bits == int_info.bits) try func.cmp(bin_op, lhs, lhs_ty, cmp_op) else @@ -6150,7 +6130,6 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const int_info = lhs_ty.intInfo(mod); - const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); }; @@ -6170,13 +6149,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } else shl; defer result.free(func); // it's a no-op to free the same local twice (when wasm_bits == int_info.bits) - const overflow_bit = if (wasm_bits != int_info.bits and is_signed) blk: { - // emit lhs to stack to we can keep 'wrapped' on the stack also - try func.emitWValue(lhs); - const abs = try func.signExtendInt(shl, lhs_ty); - const wrapped = try func.wrapBinOp(abs, rhs_final, lhs_ty, .shr); - break :blk try func.cmp(.{ .stack = {} }, wrapped, lhs_ty, .neq); - } else blk: { + const overflow_bit = blk: { try func.emitWValue(lhs); const shr = try func.binOp(result, rhs_final, lhs_ty, .shr); break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq); @@ -6245,10 +6218,8 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :blk down_cast; } } else if (int_info.signedness == .signed and wasm_bits == 32) blk: { - const lhs_abs = try func.signExtendInt(lhs, lhs_ty); - const rhs_abs = try func.signExtendInt(rhs, lhs_ty); - const bin_op = try (try func.binOp(lhs_abs, rhs_abs, lhs_ty, .mul)).toLocal(func, lhs_ty); - const mul_abs = try func.signExtendInt(bin_op, lhs_ty); + const bin_op = try (try func.binOp(lhs, rhs, lhs_ty, .mul)).toLocal(func, lhs_ty); + const mul_abs = try func.wrapOperand(bin_op, lhs_ty); _ = try func.cmp(mul_abs, bin_op, lhs_ty, .neq); try func.addLabel(.local_set, overflow_bit.local.value); break :blk try func.wrapOperand(bin_op, lhs_ty); @@ -6697,6 +6668,9 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("TODO: @byteSwap for vectors", .{}); } const int_info = ty.intInfo(mod); + const wasm_bits = toWasmBits(int_info.bits) orelse { + return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}); + }; // bytes are no-op if (int_info.bits == 8) { @@ -6704,73 +6678,34 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const result = result: { - switch (int_info.bits) { - 16 => { - const shl_res = try func.binOp(operand, .{ .imm32 = 8 }, ty, .shl); - const lhs = try func.binOp(shl_res, .{ .imm32 = 0xFF00 }, ty, .@"and"); - const shr_res = try func.binOp(operand, .{ .imm32 = 8 }, ty, .shr); - const res = if (int_info.signedness == .signed) blk: { - break :blk try func.wrapOperand(shr_res, Type.u8); - } else shr_res; - break :result try (try func.binOp(lhs, res, ty, .@"or")).toLocal(func, ty); - }, - 24 => { - var msb = try (try func.wrapOperand(operand, Type.u16)).toLocal(func, Type.u16); - defer msb.free(func); - - const shl_res = try func.binOp(msb, .{ .imm32 = 8 }, Type.u16, .shl); - const lhs = try func.binOp(shl_res, .{ .imm32 = 0xFF0000 }, Type.u16, .@"and"); - const shr_res = try func.binOp(msb, .{ .imm32 = 8 }, ty, .shr); - - const res = if (int_info.signedness == .signed) blk: { - break :blk try func.wrapOperand(shr_res, Type.u8); - } else shr_res; - const lhs_tmp = try func.binOp(lhs, res, ty, .@"or"); - const lhs_result = try func.binOp(lhs_tmp, .{ .imm32 = 8 }, ty, .shr); - const rhs_wrap = try func.wrapOperand(msb, Type.u8); - const rhs_result = try func.binOp(rhs_wrap, .{ .imm32 = 16 }, ty, .shl); - - const lsb = try func.wrapBinOp(operand, .{ .imm32 = 16 }, Type.u8, .shr); - const tmp = try func.binOp(lhs_result, rhs_result, ty, .@"or"); - break :result try (try func.binOp(tmp, lsb, ty, .@"or")).toLocal(func, ty); - }, + switch (wasm_bits) { 32 => { - const shl_tmp = try func.binOp(operand, .{ .imm32 = 8 }, Type.u32, .shl); - const lhs = try func.binOp(shl_tmp, .{ .imm32 = 0xFF00FF00 }, Type.u32, .@"and"); - const shr_tmp = try func.binOp(operand, .{ .imm32 = 8 }, Type.u32, .shr); - const rhs = try func.binOp(shr_tmp, .{ .imm32 = 0x00FF00FF }, Type.u32, .@"and"); - var tmp_or = try (try func.binOp(lhs, rhs, Type.u32, .@"or")).toLocal(func, Type.u32); + const intrin_ret = try func.callIntrinsic( + "__bswapsi2", + &.{.u32_type}, + Type.u32, + &.{operand}, + ); + const swapped = if (int_info.bits == 32) + intrin_ret + else + try func.binOp(intrin_ret, .{ .imm32 = 32 - int_info.bits }, ty, .shr); - const shl = try func.binOp(tmp_or, .{ .imm32 = 16 }, Type.u32, .shl); - const shr = try func.binOp(tmp_or, .{ .imm32 = 16 }, Type.u32, .shr); - - tmp_or.free(func); - - break :result try (try func.binOp(shl, shr, Type.u32, .@"or")).toLocal(func, Type.u32); + break :result try swapped.toLocal(func, ty); }, 64 => { - const shl_tmp_1 = try func.binOp(operand, .{ .imm64 = 8 }, Type.u64, .shl); - const lhs_1 = try func.binOp(shl_tmp_1, .{ .imm64 = 0xFF00FF00FF00FF00 }, Type.u64, .@"and"); + const intrin_ret = try func.callIntrinsic( + "__bswapdi2", + &.{.u64_type}, + Type.u64, + &.{operand}, + ); + const swapped = if (int_info.bits == 64) + intrin_ret + else + try func.binOp(intrin_ret, .{ .imm64 = 64 - int_info.bits }, ty, .shr); - const shr_tmp_1 = try func.binOp(operand, .{ .imm64 = 8 }, Type.u64, .shr); - const rhs_1 = try func.binOp(shr_tmp_1, .{ .imm64 = 0x00FF00FF00FF00FF }, Type.u64, .@"and"); - - var tmp_or_1 = try (try func.binOp(lhs_1, rhs_1, Type.u64, .@"or")).toLocal(func, Type.u64); - - const shl_tmp_2 = try func.binOp(tmp_or_1, .{ .imm64 = 16 }, Type.u64, .shl); - const lhs_2 = try func.binOp(shl_tmp_2, .{ .imm64 = 0xFFFF0000FFFF0000 }, Type.u64, .@"and"); - - const shr_tmp_2 = try func.binOp(tmp_or_1, .{ .imm64 = 16 }, Type.u64, .shr); - tmp_or_1.free(func); - const rhs_2 = try func.binOp(shr_tmp_2, .{ .imm64 = 0x0000FFFF0000FFFF }, Type.u64, .@"and"); - - var tmp_or_2 = try (try func.binOp(lhs_2, rhs_2, Type.u64, .@"or")).toLocal(func, Type.u64); - - const shl = try func.binOp(tmp_or_2, .{ .imm64 = 32 }, Type.u64, .shl); - const shr = try func.binOp(tmp_or_2, .{ .imm64 = 32 }, Type.u64, .shr); - tmp_or_2.free(func); - - break :result try (try func.binOp(shl, shr, Type.u64, .@"or")).toLocal(func, Type.u64); + break :result try swapped.toLocal(func, ty); }, else => return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}), } @@ -6779,32 +6714,24 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.isSignedInt(mod)) - try func.divSigned(lhs, rhs, ty) - else - try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); + const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const mod = func.bin_file.base.comp.module.?; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const div_result = if (ty.isSignedInt(mod)) - try func.divSigned(lhs, rhs, ty) - else - try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); + const div_result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); if (ty.isAnyFloat()) { const trunc_result = try (try func.floatOp(.trunc, ty, &.{div_result})).toLocal(func, ty); @@ -6834,16 +6761,6 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); } - const lhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(lhs, ty)).toLocal(func, ty) - else - lhs; - - const rhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(rhs, ty)).toLocal(func, ty) - else - rhs; - const zero = switch (wasm_bits) { 32 => WValue{ .imm32 = 0 }, 64 => WValue{ .imm64 = 0 }, @@ -6852,7 +6769,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // tee leaves the value on the stack and stores it in a local. const quotient = try func.allocLocal(ty); - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .div); + _ = try func.binOp(lhs, rhs, ty, .div); try func.addLabel(.local_tee, quotient.local.value); // select takes a 32 bit value as the condition, so in the 64 bit case we use eqz to narrow @@ -6864,7 +6781,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // 0 if the signs of rhs_wasm and lhs_wasm are the same, 1 otherwise. - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .xor); + _ = try func.binOp(lhs, rhs, ty, .xor); _ = try func.cmp(.stack, zero, ty, .lt); switch (wasm_bits) { @@ -6879,7 +6796,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, } - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .rem); + _ = try func.binOp(lhs, rhs, ty, .rem); if (wasm_bits == 64) { try func.addTag(.i64_eqz); @@ -6929,68 +6846,14 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } -fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - const int_bits = ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits}); - }; - - if (wasm_bits == 128) { - return func.fail("TODO: Implement signed division for 128-bit integerrs", .{}); - } - - if (wasm_bits != int_bits) { - // Leave both values on the stack - _ = try func.signExtendInt(lhs, ty); - _ = try func.signExtendInt(rhs, ty); - } else { - try func.emitWValue(lhs); - try func.emitWValue(rhs); - } - switch (wasm_bits) { - 32 => try func.addTag(.i32_div_s), - 64 => try func.addTag(.i64_div_s), - else => unreachable, - } - _ = try func.wrapOperand(.stack, ty); - - const result = try func.allocLocal(ty); - try func.addLabel(.local_set, result.local.value); - return result; -} - fn airRem(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const mod = func.bin_file.base.comp.module.?; const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.isSignedInt(mod)) result: { - const int_bits = ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: `@rem` for signed integers larger than 128 bits ({d} bits requested)", .{int_bits}); - }; - - if (wasm_bits > 64) { - return func.fail("TODO: `@rem` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); - } - - const lhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(lhs, ty)).toLocal(func, ty) - else - lhs; - - const rhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(rhs, ty)).toLocal(func, ty) - else - rhs; - - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .rem); - break :result try func.wrapOperand(.stack, ty); - } else try func.binOp(lhs, rhs, ty, .rem); + const result = try func.binOp(lhs, rhs, ty, .rem); const return_local = try result.toLocal(func, ty); func.finishAir(inst, return_local, &.{ bin_op.lhs, bin_op.rhs }); @@ -7022,19 +6885,9 @@ fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits}); } - const lhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(lhs, ty)).toLocal(func, ty) - else - lhs; - - const rhs_wasm = if (wasm_bits != int_bits) - try (try func.signExtendInt(rhs, ty)).toLocal(func, ty) - else - rhs; - - _ = try func.binOp(lhs_wasm, rhs_wasm, ty, .rem); - _ = try func.binOp(.stack, rhs_wasm, ty, .add); - _ = try func.binOp(.stack, rhs_wasm, ty, .rem); + _ = try func.binOp(lhs, rhs, ty, .rem); + _ = try func.binOp(.stack, rhs, ty, .add); + _ = try func.binOp(.stack, rhs, ty, .rem); } else { return func.fail("TODO: implement `@mod` on floating point types for {}", .{func.target.cpu.arch}); } @@ -7044,42 +6897,6 @@ fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } -/// Sign extends an N bit signed integer and pushes the result to the stack. -/// The result will be sign extended to 32 bits if N <= 32 or 64 bits if N <= 64. -/// Support for integers wider than 64 bits has not yet been implemented. -fn signExtendInt(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - const mod = func.bin_file.base.comp.module.?; - const int_bits = ty.intInfo(mod).bits; - const wasm_bits = toWasmBits(int_bits) orelse { - return func.fail("TODO: signExtendInt for signed integers larger than '{d}' bits", .{int_bits}); - }; - - const shift_val = switch (wasm_bits) { - 32 => WValue{ .imm32 = wasm_bits - int_bits }, - 64 => WValue{ .imm64 = wasm_bits - int_bits }, - else => return func.fail("TODO: signExtendInt for i128", .{}), - }; - - try func.emitWValue(operand); - switch (wasm_bits) { - 32 => { - try func.emitWValue(shift_val); - try func.addTag(.i32_shl); - try func.emitWValue(shift_val); - try func.addTag(.i32_shr_s); - }, - 64 => { - try func.emitWValue(shift_val); - try func.addTag(.i64_shl); - try func.emitWValue(shift_val); - try func.addTag(.i64_shr_s); - }, - else => unreachable, - } - - return WValue{ .stack = {} }; -} - fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -7131,20 +6948,13 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } -fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue { +fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { const mod = func.bin_file.base.comp.module.?; const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits).?; const is_wasm_bits = wasm_bits == int_info.bits; const ext_ty = if (!is_wasm_bits) try mod.intType(int_info.signedness, wasm_bits) else ty; - var lhs = if (!is_wasm_bits) lhs: { - break :lhs try (try func.signExtendInt(lhs_operand, ty)).toLocal(func, ext_ty); - } else lhs_operand; - var rhs = if (!is_wasm_bits) rhs: { - break :rhs try (try func.signExtendInt(rhs_operand, ty)).toLocal(func, ext_ty); - } else rhs_operand; - const max_val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits - 1))) - 1)); const min_val: i64 = (-@as(i64, @intCast(@as(u63, @intCast(max_val))))) - 1; const max_wvalue = switch (wasm_bits) { @@ -7161,8 +6971,6 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, var bin_result = try (try func.binOp(lhs, rhs, ext_ty, op)).toLocal(func, ext_ty); if (!is_wasm_bits) { defer bin_result.free(func); // not returned in this branch - defer lhs.free(func); // uses temporary local for absvalue - defer rhs.free(func); // uses temporary local for absvalue try func.emitWValue(bin_result); try func.emitWValue(max_wvalue); _ = try func.cmp(bin_result, max_wvalue, ext_ty, .lt); @@ -7547,7 +7355,7 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lowest: ?u32 = null; var highest: ?u32 = null; for (0..names.len) |name_index| { - const err_int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[name_index]).?); + const err_int: Zcu.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[name_index]).?); if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index 716e6e3b94..c41ea9ec55 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -6,8 +6,6 @@ const std = @import("std"); const Mir = @import("Mir.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../../InternPool.zig"); const codegen = @import("../../codegen.zig"); const leb128 = std.leb; @@ -18,7 +16,7 @@ mir: Mir, bin_file: *link.File.Wasm, /// Possible error message. When set, the value is allocated and /// must be freed manually. -error_msg: ?*Module.ErrorMsg = null, +error_msg: ?*Zcu.ErrorMsg = null, /// The binary representation that will be emit by this module. code: *std.ArrayList(u8), /// List of allocated locals. @@ -259,7 +257,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { const comp = emit.bin_file.base.comp; const zcu = comp.module.?; const gpa = comp.gpa; - emit.error_msg = try Module.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu).upgrade(zcu), format, args); + emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu).upgrade(zcu), format, args); return error.EmitFail; } diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index c4d49d51b5..03c68daa85 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -10,8 +10,6 @@ const assert = std.debug.assert; const Type = @import("../../type.zig").Type; const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; /// Defines how to pass a type as part of a function signature, /// both for parameters as well as return values. @@ -24,7 +22,7 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, mod: *Module) [2]Class { +pub fn classifyType(ty: Type, mod: *Zcu) [2]Class { const ip = &mod.intern_pool; const target = mod.getTarget(); if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; @@ -102,7 +100,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, mod: *Module) Type { +pub fn scalarType(ty: Type, mod: *Zcu) Type { const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { .Struct => { diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 6ff4380a44..44daec9ed5 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -393,9 +393,39 @@ fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int return a + b; } +fn not(comptime T: type, a: T) T { + return ~a; +} + test "binary not" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(not(u0, 0) == 0); + try expect(not(u1, 0) == 1); + try expect(not(u1, 1) == 0); + try expect(not(u5, 0b01001) == 0b10110); + try expect(not(u5, 0b10110) == 0b01001); + try expect(not(u16, 0b10101010_10101010) == 0b01010101_01010101); + try expect(not(u16, 0b01010101_01010101) == 0b10101010_10101010); + try expect(not(u32, 0xAAAA_3333) == 0x5555_CCCC); + try expect(not(u32, 0x5555_CCCC) == 0xAAAA_3333); + try expect(not(u35, 0x4_1111_FFFF) == 0x3_EEEE_0000); + try expect(not(u35, 0x3_EEEE_0000) == 0x4_1111_FFFF); + try expect(not(u48, 0x4567_89AB_CDEF) == 0xBA98_7654_3210); + try expect(not(u48, 0xBA98_7654_3210) == 0x4567_89AB_CDEF); + try expect(not(u64, 0x0123_4567_89AB_CDEF) == 0xFEDC_BA98_7654_3210); + try expect(not(u64, 0xFEDC_BA98_7654_3210) == 0x0123_4567_89AB_CDEF); + + try expect(not(i0, 0) == 0); + try expect(not(i1, 0) == -1); + try expect(not(i1, -1) == 0); + try expect(not(i5, -2) == 1); + try expect(not(i5, 3) == -4); + try expect(not(i32, 0) == -1); + try expect(not(i32, -2147483648) == 2147483647); + try expect(not(i64, -1) == 0); + try expect(not(i64, 0) == -1); + try expect(comptime x: { break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101; }); @@ -405,34 +435,40 @@ test "binary not" { try expect(comptime x: { break :x ~@as(u0, 0) == 0; }); - try testBinaryNot(0b1010101010101010); } -fn testBinaryNot(x: u16) !void { - try expect(~x == 0b0101010101010101); -} - -test "binary not 128-bit" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO +test "binary not big int <= 128 bits" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + try expect(not(u65, 1) == 0x1_FFFFFFFF_FFFFFFFE); + try expect(not(u65, 0x1_FFFFFFFF_FFFFFFFE) == 1); + + try expect(not(u96, 0x01234567_89ABCDEF_00000001) == 0xFEDCBA98_76543210_FFFFFFFE); + try expect(not(u96, 0xFEDCBA98_76543210_FFFFFFFE) == 0x01234567_89ABCDEF_00000001); + + try expect(not(u128, 0xAAAAAAAA_AAAAAAAA_AAAAAAAA_AAAAAAAA) == 0x55555555_55555555_55555555_55555555); + try expect(not(u128, 0x55555555_55555555_55555555_55555555) == 0xAAAAAAAA_AAAAAAAA_AAAAAAAA_AAAAAAAA); + + try expect(not(i65, -1) == 0); + try expect(not(i65, 0) == -1); + try expect(not(i65, -18446744073709551616) == 18446744073709551615); + try expect(not(i65, 18446744073709551615) == -18446744073709551616); + + try expect(not(i128, -1) == 0); + try expect(not(i128, 0) == -1); + try expect(not(i128, -200) == 199); + try expect(not(i128, 199) == -200); + try expect(comptime x: { break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa; }); try expect(comptime x: { break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa))); }); - - try testBinaryNot128(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa); - try testBinaryNot128(i128, @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)))); -} - -fn testBinaryNot128(comptime Type: type, x: Type) !void { - try expect(~x == @as(Type, 0x55555555_55555555_55555555_55555555)); } test "division" {