diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 9ab111496c..5cbdc57832 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -544,6 +544,13 @@ pub fn formatType( return formatText(value, actual_fmt, options, writer); } } + if (comptime std.meta.trait.isZigString(info.child)) { + for (value) |item, i| { + if (i != 0) try formatText(", ", actual_fmt, options, writer); + try formatText(item, actual_fmt, options, writer); + } + return; + } @compileError("Unknown format string: '" ++ actual_fmt ++ "' for type '" ++ @typeName(T) ++ "'"); }, .Enum, .Union, .Struct => { diff --git a/lib/std/rand.zig b/lib/std/rand.zig index 66f758cb48..7d967d3715 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -47,6 +47,19 @@ pub const Random = struct { return r.int(u1) != 0; } + /// Returns a random value from an enum, evenly distributed. + pub fn enumValue(r: *Random, comptime EnumType: type) EnumType { + if (comptime !std.meta.trait.is(.Enum)(EnumType)) { + @compileError("Random.enumValue requires an enum type, not a " ++ @typeName(EnumType)); + } + + // We won't use int -> enum casting because enum elements can have + // arbitrary values. Instead we'll randomly pick one of the type's values. + const values = std.enums.values(EnumType); + const index = r.uintLessThan(usize, values.len); + return values[index]; + } + /// Returns a random int `i` such that `0 <= i <= maxInt(T)`. /// `i` is evenly distributed. pub fn int(r: *Random, comptime T: type) T { @@ -377,6 +390,23 @@ fn testRandomBoolean() !void { try expect(r.random.boolean() == true); } +test "Random enum" { + try testRandomEnumValue(); + comptime try testRandomEnumValue(); +} +fn testRandomEnumValue() !void { + const TestEnum = enum { + First, + Second, + Third, + }; + var r = SequentialPrng.init(); + r.next_value = 0; + try expect(r.random.enumValue(TestEnum) == TestEnum.First); + try expect(r.random.enumValue(TestEnum) == TestEnum.First); + try expect(r.random.enumValue(TestEnum) == TestEnum.First); +} + test "Random intLessThan" { @setEvalBranchQuota(10000); try testRandomIntLessThan(); diff --git a/src/Sema.zig b/src/Sema.zig index 0813f749e2..8b6f6d4a9f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8323,7 +8323,7 @@ fn coerceNum( return sema.mod.fail(&block.base, inst_src, "TODO float to int", .{}); } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { if (!val.intFitsInType(dest_type, target)) { - return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ inst_ty, val }); + return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ dest_type, val }); } return try sema.addConstant(dest_type, val); } diff --git a/src/codegen.zig b/src/codegen.zig index d5b106dbe3..11c007dbed 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1247,6 +1247,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and), + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), else => return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}), }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1256,6 +1257,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or), + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), else => return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}), }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 10e079d4f1..3b6eccbc3a 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -54,6 +54,11 @@ d_sym: ?DebugSymbols = null, /// For x86_64 that's 4KB, whereas for aarch64, that's 16KB. page_size: u16, +/// TODO Should we figure out embedding code signatures for other Apple platforms as part of the linker? +/// Or should this be a separate tool? +/// https://github.com/ziglang/zig/issues/9567 +requires_adhoc_codesig: bool, + /// We commit 0x1000 = 4096 bytes of space to the header and /// the table of load commands. This should be plenty for any /// potential future extensions. @@ -391,6 +396,13 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio pub fn createEmpty(gpa: *Allocator, options: link.Options) !*MachO { const self = try gpa.create(MachO); + const cpu_arch = options.target.cpu.arch; + const os_tag = options.target.os.tag; + const abi = options.target.abi; + const page_size: u16 = if (cpu_arch == .aarch64) 0x4000 else 0x1000; + // Adhoc code signature is required when targeting aarch64-macos either directly or indirectly via the simulator + // ABI such as aarch64-ios-simulator, etc. + const requires_adhoc_codesig = cpu_arch == .aarch64 and (os_tag == .macos or abi == .simulator); self.* = .{ .base = .{ @@ -399,7 +411,8 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*MachO { .allocator = gpa, .file = null, }, - .page_size = if (options.target.cpu.arch == .aarch64) 0x4000 else 0x1000, + .page_size = page_size, + .requires_adhoc_codesig = requires_adhoc_codesig, }; return self; @@ -433,7 +446,6 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void { defer tracy.end(); const output_mode = self.base.options.output_mode; - const target = self.base.options.target; switch (output_mode) { .Exe => { @@ -459,7 +471,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void { try ds.flushModule(self.base.allocator, self.base.options); } - if (target.cpu.arch == .aarch64) { + if (self.requires_adhoc_codesig) { // Preallocate space for the code signature. // We need to do this at this stage so that we have the load commands with proper values // written out to the file. @@ -492,11 +504,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void { assert(!self.strtab_dirty); assert(!self.strtab_needs_relocation); - if (target.cpu.arch == .aarch64) { - switch (output_mode) { - .Exe, .Lib => try self.writeCodeSignature(), // code signing always comes last - else => {}, - } + if (self.requires_adhoc_codesig) { + try self.writeCodeSignature(); // code signing always comes last } } @@ -2841,7 +2850,7 @@ fn addDataInCodeLC(self: *MachO) !void { } fn addCodeSignatureLC(self: *MachO) !void { - if (self.code_signature_cmd_index == null and self.base.options.target.cpu.arch == .aarch64) { + if (self.code_signature_cmd_index == null and self.requires_adhoc_codesig) { self.code_signature_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .LinkeditData = .{ @@ -2935,14 +2944,14 @@ fn flushZld(self: *MachO) !void { seg.inner.vmsize = mem.alignForwardGeneric(u64, seg.inner.filesize, self.page_size); } - if (self.base.options.target.cpu.arch == .aarch64) { + if (self.requires_adhoc_codesig) { try self.writeCodeSignaturePadding(); } try self.writeLoadCommands(); try self.writeHeader(); - if (self.base.options.target.cpu.arch == .aarch64) { + if (self.requires_adhoc_codesig) { try self.writeCodeSignature(); } } @@ -4454,7 +4463,7 @@ pub fn populateMissingMetadata(self: *MachO) !void { try self.load_commands.append(self.base.allocator, .{ .Uuid = uuid_cmd }); self.load_commands_dirty = true; } - if (self.code_signature_cmd_index == null) { + if (self.code_signature_cmd_index == null and self.requires_adhoc_codesig) { self.code_signature_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .LinkeditData = .{ @@ -5719,8 +5728,8 @@ fn writeStringTableZld(self: *MachO) !void { try self.base.file.?.pwriteAll(self.strtab.items, symtab.stroff); - if (symtab.strsize > self.strtab.items.len and self.base.options.target.cpu.arch == .x86_64) { - // This is the last section, so we need to pad it out. + if (symtab.strsize > self.strtab.items.len) { + // This is potentially the last section, so we need to pad it out. try self.base.file.?.pwriteAll(&[_]u8{0}, seg.inner.fileoff + seg.inner.filesize - 1); } } diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index 063671b1bf..37f1cdef8d 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -3831,10 +3831,14 @@ static LLVMValueRef ir_render_load_ptr(CodeGen *g, Stage1Air *executable, LLVMValueRef shift_amt_val = LLVMConstInt(LLVMTypeOf(containing_int), shift_amt, false); LLVMValueRef shifted_value = LLVMBuildLShr(g->builder, containing_int, shift_amt_val, ""); + LLVMTypeRef same_size_int = LLVMIntType(size_in_bits); + LLVMValueRef mask = LLVMConstAllOnes(LLVMIntType(size_in_bits)); + mask = LLVMConstZExt(mask, LLVMTypeOf(containing_int)); + LLVMValueRef masked_value = LLVMBuildAnd(g->builder, shifted_value, mask, ""); + if (handle_is_ptr(g, child_type)) { LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); - LLVMTypeRef same_size_int = LLVMIntType(size_in_bits); - LLVMValueRef truncated_int = LLVMBuildTrunc(g->builder, shifted_value, same_size_int, ""); + LLVMValueRef truncated_int = LLVMBuildTrunc(g->builder, masked_value, same_size_int, ""); LLVMValueRef bitcasted_ptr = LLVMBuildBitCast(g->builder, result_loc, LLVMPointerType(same_size_int, 0), ""); LLVMBuildStore(g->builder, truncated_int, bitcasted_ptr); @@ -3842,12 +3846,11 @@ static LLVMValueRef ir_render_load_ptr(CodeGen *g, Stage1Air *executable, } if (child_type->id == ZigTypeIdFloat) { - LLVMTypeRef same_size_int = LLVMIntType(size_in_bits); - LLVMValueRef truncated_int = LLVMBuildTrunc(g->builder, shifted_value, same_size_int, ""); + LLVMValueRef truncated_int = LLVMBuildTrunc(g->builder, masked_value, same_size_int, ""); return LLVMBuildBitCast(g->builder, truncated_int, get_llvm_type(g, child_type), ""); } - return LLVMBuildTrunc(g->builder, shifted_value, get_llvm_type(g, child_type), ""); + return LLVMBuildTrunc(g->builder, masked_value, get_llvm_type(g, child_type), ""); } static bool value_is_all_undef_array(CodeGen *g, ZigValue *const_val, size_t len) { diff --git a/src/type.zig b/src/type.zig index 28b87a8afe..41f392c04a 100644 --- a/src/type.zig +++ b/src/type.zig @@ -534,15 +534,24 @@ pub const Type = extern union { return a_data.error_set.eql(b_data.error_set) and a_data.payload.eql(b_data.payload); }, .ErrorSet => { - const a_is_anyerror = a.tag() == .anyerror; - const b_is_anyerror = b.tag() == .anyerror; + if (a.tag() == .anyerror and b.tag() == .anyerror) { + return true; + } - if (a_is_anyerror and b_is_anyerror) return true; - if (a_is_anyerror or b_is_anyerror) return false; + if (a.tag() == .error_set and b.tag() == .error_set) { + return a.castTag(.error_set).?.data.owner_decl == b.castTag(.error_set).?.data.owner_decl; + } - std.debug.panic("TODO implement Type equality comparison of {} and {}", .{ - a.tag(), b.tag(), - }); + if (a.tag() == .error_set_inferred and b.tag() == .error_set_inferred) { + return a.castTag(.error_set_inferred).?.data.func == b.castTag(.error_set_inferred).?.data.func; + } + + if (a.tag() == .error_set_single and b.tag() == .error_set_single) { + const a_data = a.castTag(.error_set_single).?.data; + const b_data = b.castTag(.error_set_single).?.data; + return std.mem.eql(u8, a_data, b_data); + } + return false; }, .Opaque, .Float, diff --git a/test/behavior.zig b/test/behavior.zig index 7c6e98daa9..bb55ec83f1 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -71,6 +71,7 @@ test { _ = @import("behavior/bugs/7047.zig"); _ = @import("behavior/bugs/7003.zig"); _ = @import("behavior/bugs/7250.zig"); + _ = @import("behavior/bugs/9584.zig"); _ = @import("behavior/bugs/394.zig"); _ = @import("behavior/bugs/421.zig"); _ = @import("behavior/bugs/529.zig"); diff --git a/test/behavior/bugs/9584.zig b/test/behavior/bugs/9584.zig new file mode 100644 index 0000000000..63a607981e --- /dev/null +++ b/test/behavior/bugs/9584.zig @@ -0,0 +1,60 @@ +const std = @import("std"); + +const A = packed struct { + a: bool, + b: bool, + c: bool, + d: bool, + + e: bool, + f: bool, + g: bool, + h: bool, +}; + +const X = union { + x: A, + y: u64, +}; + +pub fn a( + x0: i32, + x1: i32, + x2: i32, + x3: i32, + x4: i32, + flag_a: bool, + flag_b: bool, +) !void { + _ = x0; + _ = x1; + _ = x2; + _ = x3; + _ = x4; + _ = flag_a; + // With this bug present, `flag_b` would actually contain the value 17. + // Note: this bug only presents itself on debug mode. + try std.testing.expect(@ptrCast(*const u8, &flag_b).* == 1); +} + +pub fn b(x: *X) !void { + try a(0, 1, 2, 3, 4, x.x.a, x.x.b); +} + +test "bug 9584" { + var flags = A{ + .a = false, + .b = true, + .c = false, + .d = false, + + .e = false, + .f = true, + .g = false, + .h = false, + }; + var x = X{ + .x = flags, + }; + try b(&x); +} diff --git a/test/cases.zig b/test/cases.zig index ffff88f7d8..e8479b792e 100644 --- a/test/cases.zig +++ b/test/cases.zig @@ -1535,6 +1535,48 @@ pub fn addCases(ctx: *TestContext) !void { \\} , ""); } + { + var case = ctx.exe("runtime bitwise and", linux_x64); + + case.addCompareOutput( + \\pub fn main() void { + \\ var i: u32 = 10; + \\ var j: u32 = 11; + \\ assert(i & 1 == 0); + \\ assert(j & 1 == 1); + \\ var m1: u32 = 0b1111; + \\ var m2: u32 = 0b0000; + \\ assert(m1 & 0b1010 == 0b1010); + \\ assert(m2 & 0b1010 == 0b0000); + \\} + \\fn assert(b: bool) void { + \\ if (!b) unreachable; + \\} + , + "", + ); + } + { + var case = ctx.exe("runtime bitwise or", linux_x64); + + case.addCompareOutput( + \\pub fn main() void { + \\ var i: u32 = 10; + \\ var j: u32 = 11; + \\ assert(i | 1 == 11); + \\ assert(j | 1 == 11); + \\ var m1: u32 = 0b1111; + \\ var m2: u32 = 0b0000; + \\ assert(m1 | 0b1010 == 0b1111); + \\ assert(m2 | 0b1010 == 0b1010); + \\} + \\fn assert(b: bool) void { + \\ if (!b) unreachable; + \\} + , + "", + ); + } { var case = ctx.exe("merge error sets", linux_x64); @@ -1567,6 +1609,24 @@ pub fn addCases(ctx: *TestContext) !void { ":2:20: note: '||' merges error sets; 'or' performs boolean OR", }); } + { + var case = ctx.exe("error set equality", linux_x64); + + case.addCompareOutput( + \\pub fn main() void { + \\ assert(@TypeOf(error.Foo) == @TypeOf(error.Foo)); + \\ assert(@TypeOf(error.Bar) != @TypeOf(error.Foo)); + \\ assert(anyerror == anyerror); + \\ assert(error{Foo} != error{Foo}); + \\ // TODO put inferred error sets here when @typeInfo works + \\} + \\fn assert(b: bool) void { + \\ if (!b) unreachable; + \\} + , + "", + ); + } { var case = ctx.exe("inline assembly", linux_x64);